rf_reconstruct.c revision 1.127.10.2 1 1.127.10.2 martin /* $NetBSD: rf_reconstruct.c,v 1.127.10.2 2024/04/28 12:09:08 martin Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author: Mark Holland
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster
29 1.1 oster /************************************************************
30 1.1 oster *
31 1.1 oster * rf_reconstruct.c -- code to perform on-line reconstruction
32 1.1 oster *
33 1.1 oster ************************************************************/
34 1.31 lukem
35 1.31 lukem #include <sys/cdefs.h>
36 1.127.10.2 martin __KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.127.10.2 2024/04/28 12:09:08 martin Exp $");
37 1.1 oster
38 1.97 ad #include <sys/param.h>
39 1.1 oster #include <sys/time.h>
40 1.1 oster #include <sys/buf.h>
41 1.1 oster #include <sys/errno.h>
42 1.5 oster #include <sys/systm.h>
43 1.5 oster #include <sys/proc.h>
44 1.5 oster #include <sys/ioctl.h>
45 1.5 oster #include <sys/fcntl.h>
46 1.5 oster #include <sys/vnode.h>
47 1.110 dholland #include <sys/namei.h> /* for pathbuf */
48 1.30 oster #include <dev/raidframe/raidframevar.h>
49 1.5 oster
50 1.120 hannken #include <miscfs/specfs/specdev.h> /* for v_rdev */
51 1.120 hannken
52 1.1 oster #include "rf_raid.h"
53 1.1 oster #include "rf_reconutil.h"
54 1.1 oster #include "rf_revent.h"
55 1.1 oster #include "rf_reconbuffer.h"
56 1.1 oster #include "rf_acctrace.h"
57 1.1 oster #include "rf_etimer.h"
58 1.1 oster #include "rf_dag.h"
59 1.1 oster #include "rf_desc.h"
60 1.36 oster #include "rf_debugprint.h"
61 1.1 oster #include "rf_general.h"
62 1.1 oster #include "rf_driver.h"
63 1.1 oster #include "rf_utils.h"
64 1.1 oster #include "rf_shutdown.h"
65 1.1 oster
66 1.1 oster #include "rf_kintf.h"
67 1.1 oster
68 1.1 oster /* setting these to -1 causes them to be set to their default values if not set by debug options */
69 1.1 oster
70 1.41 oster #if RF_DEBUG_RECON
71 1.1 oster #define Dprintf(s) if (rf_reconDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
72 1.1 oster #define Dprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
73 1.1 oster #define Dprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
74 1.1 oster #define Dprintf3(s,a,b,c) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL)
75 1.1 oster #define Dprintf4(s,a,b,c,d) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL)
76 1.1 oster #define Dprintf5(s,a,b,c,d,e) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL)
77 1.1 oster #define Dprintf6(s,a,b,c,d,e,f) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),NULL,NULL)
78 1.1 oster #define Dprintf7(s,a,b,c,d,e,f,g) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),(void *)((unsigned long)g),NULL)
79 1.1 oster
80 1.1 oster #define DDprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
81 1.1 oster #define DDprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
82 1.33 oster
83 1.41 oster #else /* RF_DEBUG_RECON */
84 1.33 oster
85 1.33 oster #define Dprintf(s) {}
86 1.33 oster #define Dprintf1(s,a) {}
87 1.33 oster #define Dprintf2(s,a,b) {}
88 1.33 oster #define Dprintf3(s,a,b,c) {}
89 1.33 oster #define Dprintf4(s,a,b,c,d) {}
90 1.33 oster #define Dprintf5(s,a,b,c,d,e) {}
91 1.33 oster #define Dprintf6(s,a,b,c,d,e,f) {}
92 1.33 oster #define Dprintf7(s,a,b,c,d,e,f,g) {}
93 1.33 oster
94 1.33 oster #define DDprintf1(s,a) {}
95 1.33 oster #define DDprintf2(s,a,b) {}
96 1.33 oster
97 1.41 oster #endif /* RF_DEBUG_RECON */
98 1.33 oster
99 1.82 oster #define RF_RECON_DONE_READS 1
100 1.82 oster #define RF_RECON_READ_ERROR 2
101 1.82 oster #define RF_RECON_WRITE_ERROR 3
102 1.82 oster #define RF_RECON_READ_STOPPED 4
103 1.104 oster #define RF_RECON_WRITE_DONE 5
104 1.82 oster
105 1.73 oster #define RF_MAX_FREE_RECONBUFFER 32
106 1.73 oster #define RF_MIN_FREE_RECONBUFFER 16
107 1.1 oster
108 1.69 oster static RF_RaidReconDesc_t *AllocRaidReconDesc(RF_Raid_t *, RF_RowCol_t,
109 1.69 oster RF_RaidDisk_t *, int, RF_RowCol_t);
110 1.69 oster static void FreeReconDesc(RF_RaidReconDesc_t *);
111 1.69 oster static int ProcessReconEvent(RF_Raid_t *, RF_ReconEvent_t *);
112 1.69 oster static int IssueNextReadRequest(RF_Raid_t *, RF_RowCol_t);
113 1.69 oster static int TryToRead(RF_Raid_t *, RF_RowCol_t);
114 1.87 perry static int ComputePSDiskOffsets(RF_Raid_t *, RF_StripeNum_t, RF_RowCol_t,
115 1.69 oster RF_SectorNum_t *, RF_SectorNum_t *, RF_RowCol_t *,
116 1.69 oster RF_SectorNum_t *);
117 1.69 oster static int IssueNextWriteRequest(RF_Raid_t *);
118 1.123 christos static void ReconReadDoneProc(void *, int);
119 1.123 christos static void ReconWriteDoneProc(void *, int);
120 1.69 oster static void CheckForNewMinHeadSep(RF_Raid_t *, RF_HeadSepLimit_t);
121 1.69 oster static int CheckHeadSeparation(RF_Raid_t *, RF_PerDiskReconCtrl_t *,
122 1.69 oster RF_RowCol_t, RF_HeadSepLimit_t,
123 1.69 oster RF_ReconUnitNum_t);
124 1.69 oster static int CheckForcedOrBlockedReconstruction(RF_Raid_t *,
125 1.69 oster RF_ReconParityStripeStatus_t *,
126 1.69 oster RF_PerDiskReconCtrl_t *,
127 1.69 oster RF_RowCol_t, RF_StripeNum_t,
128 1.69 oster RF_ReconUnitNum_t);
129 1.69 oster static void ForceReconReadDoneProc(void *, int);
130 1.1 oster static void rf_ShutdownReconstruction(void *);
131 1.1 oster
132 1.1 oster struct RF_ReconDoneProc_s {
133 1.4 oster void (*proc) (RF_Raid_t *, void *);
134 1.4 oster void *arg;
135 1.4 oster RF_ReconDoneProc_t *next;
136 1.1 oster };
137 1.1 oster
138 1.13 oster /**************************************************************************
139 1.1 oster *
140 1.1 oster * sets up the parameters that will be used by the reconstruction process
141 1.1 oster * currently there are none, except for those that the layout-specific
142 1.1 oster * configuration (e.g. rf_ConfigureDeclustered) routine sets up.
143 1.1 oster *
144 1.1 oster * in the kernel, we fire off the recon thread.
145 1.1 oster *
146 1.13 oster **************************************************************************/
147 1.87 perry static void
148 1.126 oster rf_ShutdownReconstruction(void *arg)
149 1.4 oster {
150 1.126 oster RF_Raid_t *raidPtr;
151 1.126 oster
152 1.126 oster raidPtr = (RF_Raid_t *) arg;
153 1.126 oster
154 1.126 oster pool_destroy(&raidPtr->pools.reconbuffer);
155 1.4 oster }
156 1.4 oster
157 1.87 perry int
158 1.126 oster rf_ConfigureReconstruction(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
159 1.126 oster RF_Config_t *cfgPtr)
160 1.4 oster {
161 1.4 oster
162 1.126 oster rf_pool_init(raidPtr, raidPtr->poolNames.reconbuffer, &raidPtr->pools.reconbuffer, sizeof(RF_ReconBuffer_t),
163 1.126 oster "reconbuf", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
164 1.126 oster rf_ShutdownCreate(listp, rf_ShutdownReconstruction, raidPtr);
165 1.66 oster
166 1.4 oster return (0);
167 1.4 oster }
168 1.4 oster
169 1.4 oster static RF_RaidReconDesc_t *
170 1.87 perry AllocRaidReconDesc(RF_Raid_t *raidPtr, RF_RowCol_t col,
171 1.60 oster RF_RaidDisk_t *spareDiskPtr, int numDisksDone,
172 1.60 oster RF_RowCol_t scol)
173 1.1 oster {
174 1.1 oster
175 1.4 oster RF_RaidReconDesc_t *reconDesc;
176 1.4 oster
177 1.122 christos reconDesc = RF_Malloc(sizeof(*reconDesc));
178 1.4 oster reconDesc->raidPtr = raidPtr;
179 1.4 oster reconDesc->col = col;
180 1.4 oster reconDesc->spareDiskPtr = spareDiskPtr;
181 1.4 oster reconDesc->numDisksDone = numDisksDone;
182 1.4 oster reconDesc->scol = scol;
183 1.4 oster reconDesc->next = NULL;
184 1.1 oster
185 1.4 oster return (reconDesc);
186 1.1 oster }
187 1.1 oster
188 1.87 perry static void
189 1.60 oster FreeReconDesc(RF_RaidReconDesc_t *reconDesc)
190 1.1 oster {
191 1.1 oster #if RF_RECON_STATS > 0
192 1.50 oster printf("raid%d: %lu recon event waits, %lu recon delays\n",
193 1.50 oster reconDesc->raidPtr->raidid,
194 1.87 perry (long) reconDesc->numReconEventWaits,
195 1.50 oster (long) reconDesc->numReconExecDelays);
196 1.4 oster #endif /* RF_RECON_STATS > 0 */
197 1.50 oster printf("raid%d: %lu max exec ticks\n",
198 1.50 oster reconDesc->raidPtr->raidid,
199 1.50 oster (long) reconDesc->maxReconExecTicks);
200 1.80 oster RF_Free(reconDesc, sizeof(RF_RaidReconDesc_t));
201 1.1 oster }
202 1.1 oster
203 1.1 oster
204 1.13 oster /*****************************************************************************
205 1.1 oster *
206 1.1 oster * primary routine to reconstruct a failed disk. This should be called from
207 1.1 oster * within its own thread. It won't return until reconstruction completes,
208 1.1 oster * fails, or is aborted.
209 1.13 oster *****************************************************************************/
210 1.87 perry int
211 1.60 oster rf_ReconstructFailedDisk(RF_Raid_t *raidPtr, RF_RowCol_t col)
212 1.4 oster {
213 1.52 jdolecek const RF_LayoutSW_t *lp;
214 1.4 oster int rc;
215 1.4 oster
216 1.4 oster lp = raidPtr->Layout.map;
217 1.4 oster if (lp->SubmitReconBuffer) {
218 1.4 oster /*
219 1.4 oster * The current infrastructure only supports reconstructing one
220 1.4 oster * disk at a time for each array.
221 1.4 oster */
222 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
223 1.4 oster while (raidPtr->reconInProgress) {
224 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
225 1.4 oster }
226 1.4 oster raidPtr->reconInProgress++;
227 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
228 1.57 oster rc = rf_ReconstructFailedDiskBasic(raidPtr, col);
229 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
230 1.6 oster raidPtr->reconInProgress--;
231 1.4 oster } else {
232 1.4 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
233 1.4 oster lp->parityConfig);
234 1.4 oster rc = EIO;
235 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
236 1.4 oster }
237 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
238 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
239 1.4 oster return (rc);
240 1.4 oster }
241 1.4 oster
242 1.87 perry int
243 1.60 oster rf_ReconstructFailedDiskBasic(RF_Raid_t *raidPtr, RF_RowCol_t col)
244 1.4 oster {
245 1.108 jld RF_ComponentLabel_t *c_label;
246 1.4 oster RF_RaidDisk_t *spareDiskPtr = NULL;
247 1.4 oster RF_RaidReconDesc_t *reconDesc;
248 1.57 oster RF_RowCol_t scol;
249 1.4 oster int numDisksDone = 0, rc;
250 1.4 oster
251 1.4 oster /* first look for a spare drive onto which to reconstruct the data */
252 1.4 oster /* spare disk descriptors are stored in row 0. This may have to
253 1.4 oster * change eventually */
254 1.4 oster
255 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
256 1.57 oster RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed);
257 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
258 1.4 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
259 1.57 oster if (raidPtr->status != rf_rs_degraded) {
260 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col);
261 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
262 1.4 oster return (EINVAL);
263 1.4 oster }
264 1.4 oster scol = (-1);
265 1.4 oster } else {
266 1.72 oster #endif
267 1.4 oster for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) {
268 1.57 oster if (raidPtr->Disks[scol].status == rf_ds_spare) {
269 1.57 oster spareDiskPtr = &raidPtr->Disks[scol];
270 1.121 oster spareDiskPtr->status = rf_ds_rebuilding_spare;
271 1.4 oster break;
272 1.4 oster }
273 1.4 oster }
274 1.4 oster if (!spareDiskPtr) {
275 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because no spares are available\n", col);
276 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
277 1.4 oster return (ENOSPC);
278 1.4 oster }
279 1.57 oster printf("RECON: initiating reconstruction on col %d -> spare at col %d\n", col, scol);
280 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
281 1.4 oster }
282 1.72 oster #endif
283 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
284 1.1 oster
285 1.57 oster reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr, numDisksDone, scol);
286 1.4 oster raidPtr->reconDesc = (void *) reconDesc;
287 1.1 oster #if RF_RECON_STATS > 0
288 1.4 oster reconDesc->hsStallCount = 0;
289 1.4 oster reconDesc->numReconExecDelays = 0;
290 1.4 oster reconDesc->numReconEventWaits = 0;
291 1.4 oster #endif /* RF_RECON_STATS > 0 */
292 1.4 oster reconDesc->reconExecTimerRunning = 0;
293 1.4 oster reconDesc->reconExecTicks = 0;
294 1.4 oster reconDesc->maxReconExecTicks = 0;
295 1.4 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
296 1.5 oster
297 1.5 oster if (!rc) {
298 1.127.10.2 martin /* fix up the component label. Note that at this point col and scol have swapped places. */
299 1.127.10.2 martin /* We need to read from the *spared* disk, but use that label for the real component */
300 1.127.10.2 martin
301 1.127.10.2 martin c_label = raidget_component_label(raidPtr, col);
302 1.108 jld
303 1.108 jld raid_init_component_label(raidPtr, c_label);
304 1.108 jld c_label->row = 0;
305 1.108 jld c_label->column = col;
306 1.108 jld c_label->clean = RF_RAID_DIRTY;
307 1.108 jld c_label->status = rf_ds_optimal;
308 1.111 enami rf_component_label_set_partitionsize(c_label,
309 1.127.10.2 martin raidPtr->Disks[col].partitionSize);
310 1.15 oster
311 1.28 oster /* We've just done a rebuild based on all the other
312 1.28 oster disks, so at this point the parity is known to be
313 1.28 oster clean, even if it wasn't before. */
314 1.28 oster
315 1.28 oster /* XXX doesn't hold for RAID 6!!*/
316 1.28 oster
317 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
318 1.121 oster /* The failed disk has already been marked as rf_ds_spared
319 1.121 oster (or rf_ds_dist_spared) in
320 1.121 oster rf_ContinueReconstructFailedDisk()
321 1.121 oster so we just update the spare disk as being a used spare
322 1.121 oster */
323 1.121 oster
324 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
325 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
326 1.28 oster
327 1.15 oster /* XXXX MORE NEEDED HERE */
328 1.127.10.2 martin raidflush_component_label(raidPtr, col);
329 1.82 oster } else {
330 1.82 oster /* Reconstruct failed. */
331 1.82 oster
332 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
333 1.82 oster /* Failed disk goes back to "failed" status */
334 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
335 1.82 oster
336 1.82 oster /* Spare disk goes back to "spare" status. */
337 1.82 oster spareDiskPtr->status = rf_ds_spare;
338 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
339 1.84 oster
340 1.5 oster }
341 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
342 1.5 oster return (rc);
343 1.5 oster }
344 1.5 oster
345 1.87 perry /*
346 1.5 oster
347 1.5 oster Allow reconstructing a disk in-place -- i.e. component /dev/sd2e goes AWOL,
348 1.87 perry and you don't get a spare until the next Monday. With this function
349 1.87 perry (and hot-swappable drives) you can now put your new disk containing
350 1.5 oster /dev/sd2e on the bus, scsictl it alive, and then use raidctl(8) to
351 1.5 oster rebuild the data "on the spot".
352 1.5 oster
353 1.5 oster */
354 1.5 oster
355 1.5 oster int
356 1.60 oster rf_ReconstructInPlace(RF_Raid_t *raidPtr, RF_RowCol_t col)
357 1.5 oster {
358 1.5 oster RF_RaidDisk_t *spareDiskPtr = NULL;
359 1.5 oster RF_RaidReconDesc_t *reconDesc;
360 1.52 jdolecek const RF_LayoutSW_t *lp;
361 1.108 jld RF_ComponentLabel_t *c_label;
362 1.5 oster int numDisksDone = 0, rc;
363 1.116 oster uint64_t numsec;
364 1.116 oster unsigned int secsize;
365 1.110 dholland struct pathbuf *pb;
366 1.5 oster struct vnode *vp;
367 1.5 oster int retcode;
368 1.21 oster int ac;
369 1.5 oster
370 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
371 1.5 oster lp = raidPtr->Layout.map;
372 1.61 oster if (!lp->SubmitReconBuffer) {
373 1.61 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
374 1.61 oster lp->parityConfig);
375 1.61 oster /* wakeup anyone who might be waiting to do a reconstruct */
376 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
377 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
378 1.61 oster return(EIO);
379 1.62 oster }
380 1.5 oster
381 1.62 oster /*
382 1.62 oster * The current infrastructure only supports reconstructing one
383 1.62 oster * disk at a time for each array.
384 1.62 oster */
385 1.5 oster
386 1.62 oster if (raidPtr->Disks[col].status != rf_ds_failed) {
387 1.62 oster /* "It's gone..." */
388 1.62 oster raidPtr->numFailures++;
389 1.62 oster raidPtr->Disks[col].status = rf_ds_failed;
390 1.62 oster raidPtr->status = rf_rs_degraded;
391 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
392 1.62 oster rf_update_component_labels(raidPtr,
393 1.62 oster RF_NORMAL_COMPONENT_UPDATE);
394 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
395 1.62 oster }
396 1.87 perry
397 1.62 oster while (raidPtr->reconInProgress) {
398 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
399 1.62 oster }
400 1.87 perry
401 1.62 oster raidPtr->reconInProgress++;
402 1.87 perry
403 1.62 oster /* first look for a spare drive onto which to reconstruct the
404 1.62 oster data. spare disk descriptors are stored in row 0. This
405 1.62 oster may have to change eventually */
406 1.87 perry
407 1.62 oster /* Actually, we don't care if it's failed or not... On a RAID
408 1.62 oster set with correct parity, this function should be callable
409 1.99 oster on any component without ill effects. */
410 1.62 oster /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */
411 1.87 perry
412 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
413 1.62 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
414 1.62 oster RF_ERRORMSG1("Unable to reconstruct to disk at col %d: operation not supported for RF_DISTRIBUTE_SPARE\n", col);
415 1.87 perry
416 1.62 oster raidPtr->reconInProgress--;
417 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
418 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
419 1.62 oster return (EINVAL);
420 1.87 perry }
421 1.72 oster #endif
422 1.87 perry
423 1.87 perry /* This device may have been opened successfully the
424 1.62 oster first time. Close it before trying to open it again.. */
425 1.87 perry
426 1.62 oster if (raidPtr->raid_cinfo[col].ci_vp != NULL) {
427 1.37 oster #if 0
428 1.62 oster printf("Closed the open device: %s\n",
429 1.62 oster raidPtr->Disks[col].devname);
430 1.37 oster #endif
431 1.62 oster vp = raidPtr->raid_cinfo[col].ci_vp;
432 1.62 oster ac = raidPtr->Disks[col].auto_configured;
433 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
434 1.62 oster rf_close_component(raidPtr, vp, ac);
435 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
436 1.62 oster raidPtr->raid_cinfo[col].ci_vp = NULL;
437 1.62 oster }
438 1.62 oster /* note that this disk was *not* auto_configured (any longer)*/
439 1.62 oster raidPtr->Disks[col].auto_configured = 0;
440 1.87 perry
441 1.37 oster #if 0
442 1.62 oster printf("About to (re-)open the device for rebuilding: %s\n",
443 1.62 oster raidPtr->Disks[col].devname);
444 1.37 oster #endif
445 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
446 1.110 dholland pb = pathbuf_create(raidPtr->Disks[col].devname);
447 1.110 dholland if (pb == NULL) {
448 1.110 dholland retcode = ENOMEM;
449 1.110 dholland } else {
450 1.124 mlelstv retcode = vn_bdev_openpath(pb, &vp, curlwp);
451 1.110 dholland pathbuf_destroy(pb);
452 1.110 dholland }
453 1.87 perry
454 1.62 oster if (retcode) {
455 1.124 mlelstv printf("raid%d: rebuilding: open device: %s failed: %d!\n",raidPtr->raidid,
456 1.62 oster raidPtr->Disks[col].devname, retcode);
457 1.87 perry
458 1.87 perry /* the component isn't responding properly...
459 1.62 oster must be still dead :-( */
460 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
461 1.62 oster raidPtr->reconInProgress--;
462 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
463 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
464 1.62 oster return(retcode);
465 1.63 oster }
466 1.63 oster
467 1.87 perry /* Ok, so we can at least do a lookup...
468 1.63 oster How about actually getting a vp for it? */
469 1.87 perry
470 1.116 oster retcode = getdisksize(vp, &numsec, &secsize);
471 1.63 oster if (retcode) {
472 1.115 yamt vn_close(vp, FREAD | FWRITE, kauth_cred_get());
473 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
474 1.63 oster raidPtr->reconInProgress--;
475 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
476 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
477 1.63 oster return(retcode);
478 1.62 oster }
479 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
480 1.116 oster raidPtr->Disks[col].blockSize = secsize;
481 1.116 oster raidPtr->Disks[col].numBlocks = numsec - rf_protectedSectors;
482 1.87 perry
483 1.63 oster raidPtr->raid_cinfo[col].ci_vp = vp;
484 1.120 hannken raidPtr->raid_cinfo[col].ci_dev = vp->v_rdev;
485 1.87 perry
486 1.120 hannken raidPtr->Disks[col].dev = vp->v_rdev;
487 1.87 perry
488 1.63 oster /* we allow the user to specify that only a fraction
489 1.63 oster of the disks should be used this is just for debug:
490 1.63 oster it speeds up * the parity scan */
491 1.63 oster raidPtr->Disks[col].numBlocks = raidPtr->Disks[col].numBlocks *
492 1.63 oster rf_sizePercentage / 100;
493 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
494 1.87 perry
495 1.62 oster spareDiskPtr = &raidPtr->Disks[col];
496 1.121 oster spareDiskPtr->status = rf_ds_rebuilding_spare;
497 1.87 perry
498 1.87 perry printf("raid%d: initiating in-place reconstruction on column %d\n",
499 1.62 oster raidPtr->raidid, col);
500 1.5 oster
501 1.87 perry reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr,
502 1.62 oster numDisksDone, col);
503 1.62 oster raidPtr->reconDesc = (void *) reconDesc;
504 1.5 oster #if RF_RECON_STATS > 0
505 1.62 oster reconDesc->hsStallCount = 0;
506 1.62 oster reconDesc->numReconExecDelays = 0;
507 1.62 oster reconDesc->numReconEventWaits = 0;
508 1.5 oster #endif /* RF_RECON_STATS > 0 */
509 1.62 oster reconDesc->reconExecTimerRunning = 0;
510 1.62 oster reconDesc->reconExecTicks = 0;
511 1.62 oster reconDesc->maxReconExecTicks = 0;
512 1.62 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
513 1.87 perry
514 1.5 oster if (!rc) {
515 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
516 1.5 oster /* Need to set these here, as at this point it'll be claiming
517 1.5 oster that the disk is in rf_ds_spared! But we know better :-) */
518 1.87 perry
519 1.57 oster raidPtr->Disks[col].status = rf_ds_optimal;
520 1.57 oster raidPtr->status = rf_rs_optimal;
521 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
522 1.87 perry
523 1.5 oster /* fix up the component label */
524 1.108 jld c_label = raidget_component_label(raidPtr, col);
525 1.16 oster
526 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
527 1.108 jld raid_init_component_label(raidPtr, c_label);
528 1.16 oster
529 1.108 jld c_label->row = 0;
530 1.108 jld c_label->column = col;
531 1.28 oster
532 1.28 oster /* We've just done a rebuild based on all the other
533 1.28 oster disks, so at this point the parity is known to be
534 1.28 oster clean, even if it wasn't before. */
535 1.28 oster
536 1.28 oster /* XXX doesn't hold for RAID 6!!*/
537 1.28 oster
538 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
539 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
540 1.87 perry
541 1.108 jld raidflush_component_label(raidPtr, col);
542 1.82 oster } else {
543 1.82 oster /* Reconstruct-in-place failed. Disk goes back to
544 1.82 oster "failed" status, regardless of what it was before. */
545 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
546 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
547 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
548 1.82 oster }
549 1.5 oster
550 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
551 1.84 oster
552 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
553 1.82 oster raidPtr->reconInProgress--;
554 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
555 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
556 1.87 perry
557 1.4 oster return (rc);
558 1.4 oster }
559 1.4 oster
560 1.4 oster
561 1.87 perry int
562 1.60 oster rf_ContinueReconstructFailedDisk(RF_RaidReconDesc_t *reconDesc)
563 1.4 oster {
564 1.4 oster RF_Raid_t *raidPtr = reconDesc->raidPtr;
565 1.4 oster RF_RowCol_t col = reconDesc->col;
566 1.4 oster RF_RowCol_t scol = reconDesc->scol;
567 1.4 oster RF_ReconMap_t *mapPtr;
568 1.46 oster RF_ReconCtrl_t *tmp_reconctrl;
569 1.4 oster RF_ReconEvent_t *event;
570 1.104 oster RF_StripeCount_t incPSID,lastPSID,num_writes,pending_writes,prev;
571 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
572 1.118 oster RF_StripeCount_t startPSID,endPSID,aPSID,bPSID,offPSID;
573 1.118 oster #endif
574 1.104 oster RF_ReconUnitCount_t RUsPerPU;
575 1.4 oster struct timeval etime, elpsd;
576 1.4 oster unsigned long xor_s, xor_resid_us;
577 1.54 simonb int i, ds;
578 1.104 oster int status, done;
579 1.82 oster int recon_error, write_error;
580 1.4 oster
581 1.78 oster raidPtr->accumXorTimeUs = 0;
582 1.67 oster #if RF_ACC_TRACE > 0
583 1.78 oster /* create one trace record per physical disk */
584 1.122 christos raidPtr->recon_tracerecs =
585 1.122 christos RF_Malloc(raidPtr->numCol * sizeof(*raidPtr->recon_tracerecs));
586 1.67 oster #endif
587 1.87 perry
588 1.78 oster /* quiesce the array prior to starting recon. this is needed
589 1.78 oster * to assure no nasty interactions with pending user writes.
590 1.78 oster * We need to do this before we change the disk or row status. */
591 1.87 perry
592 1.78 oster Dprintf("RECON: begin request suspend\n");
593 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
594 1.78 oster Dprintf("RECON: end request suspend\n");
595 1.87 perry
596 1.78 oster /* allocate our RF_ReconCTRL_t before we protect raidPtr->reconControl[row] */
597 1.78 oster tmp_reconctrl = rf_MakeReconControl(reconDesc, col, scol);
598 1.87 perry
599 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
600 1.87 perry
601 1.78 oster /* create the reconstruction control pointer and install it in
602 1.78 oster * the right slot */
603 1.78 oster raidPtr->reconControl = tmp_reconctrl;
604 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
605 1.88 oster raidPtr->reconControl->numRUsTotal = mapPtr->totalRUs;
606 1.88 oster raidPtr->reconControl->numRUsComplete = 0;
607 1.78 oster raidPtr->status = rf_rs_reconstructing;
608 1.78 oster raidPtr->Disks[col].status = rf_ds_reconstructing;
609 1.78 oster raidPtr->Disks[col].spareCol = scol;
610 1.87 perry
611 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
612 1.87 perry
613 1.78 oster RF_GETTIME(raidPtr->reconControl->starttime);
614 1.87 perry
615 1.78 oster Dprintf("RECON: resume requests\n");
616 1.78 oster rf_ResumeNewRequests(raidPtr);
617 1.87 perry
618 1.4 oster
619 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
620 1.104 oster
621 1.104 oster incPSID = RF_RECONMAP_SIZE;
622 1.125 oster lastPSID = raidPtr->Layout.numStripe / raidPtr->Layout.SUsPerPU - 1;
623 1.104 oster RUsPerPU = raidPtr->Layout.SUsPerPU / raidPtr->Layout.SUsPerRU;
624 1.82 oster recon_error = 0;
625 1.82 oster write_error = 0;
626 1.104 oster pending_writes = incPSID;
627 1.118 oster raidPtr->reconControl->lastPSID = incPSID - 1;
628 1.118 oster
629 1.118 oster /* bounds check raidPtr->reconControl->lastPSID and
630 1.118 oster pending_writes so that we don't attempt to wait for more IO
631 1.118 oster than can possibly happen */
632 1.118 oster
633 1.118 oster if (raidPtr->reconControl->lastPSID > lastPSID)
634 1.118 oster raidPtr->reconControl->lastPSID = lastPSID;
635 1.118 oster
636 1.118 oster if (pending_writes > lastPSID)
637 1.125 oster pending_writes = lastPSID + 1;
638 1.104 oster
639 1.104 oster /* start the actual reconstruction */
640 1.82 oster
641 1.104 oster done = 0;
642 1.104 oster while (!done) {
643 1.104 oster
644 1.127.10.2 martin if (raidPtr->waitShutdown ||
645 1.127.10.2 martin raidPtr->abortRecon[col]) {
646 1.127.10.2 martin /*
647 1.127.10.2 martin * someone is unconfiguring this array
648 1.127.10.2 martin * or failed a component
649 1.127.10.2 martin *... bail on the reconstruct..
650 1.127.10.2 martin */
651 1.106 oster recon_error = 1;
652 1.127.10.2 martin raidPtr->abortRecon[col] = 0;
653 1.106 oster break;
654 1.106 oster }
655 1.106 oster
656 1.104 oster num_writes = 0;
657 1.118 oster
658 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
659 1.118 oster /* For RAID5 with Rotated Spares we will be 'short'
660 1.118 oster some number of writes since no writes will get
661 1.118 oster issued for stripes where the spare is on the
662 1.118 oster component being rebuilt. Account for the shortage
663 1.118 oster here so that we don't hang indefinitely below
664 1.118 oster waiting for writes to complete that were never
665 1.118 oster scheduled.
666 1.118 oster
667 1.118 oster XXX: Should be fixed for PARITY_DECLUSTERING and
668 1.118 oster others too!
669 1.118 oster
670 1.118 oster */
671 1.118 oster
672 1.118 oster if (raidPtr->Layout.numDataCol <
673 1.118 oster raidPtr->numCol - raidPtr->Layout.numParityCol) {
674 1.118 oster /* numDataCol is at least 2 less than numCol, so
675 1.118 oster should be RAID 5 with Rotated Spares */
676 1.118 oster
677 1.118 oster /* XXX need to update for RAID 6 */
678 1.118 oster
679 1.118 oster startPSID = raidPtr->reconControl->lastPSID - pending_writes + 1;
680 1.118 oster endPSID = raidPtr->reconControl->lastPSID;
681 1.118 oster
682 1.118 oster offPSID = raidPtr->numCol - col - 1;
683 1.118 oster
684 1.118 oster aPSID = startPSID - startPSID % raidPtr->numCol + offPSID;
685 1.118 oster if (aPSID < startPSID) {
686 1.118 oster aPSID += raidPtr->numCol;
687 1.118 oster }
688 1.118 oster
689 1.118 oster bPSID = endPSID - ((endPSID - offPSID) % raidPtr->numCol);
690 1.118 oster
691 1.118 oster if (aPSID < endPSID) {
692 1.118 oster num_writes = ((bPSID - aPSID) / raidPtr->numCol) + 1;
693 1.118 oster }
694 1.118 oster
695 1.118 oster if ((aPSID == endPSID) && (bPSID == endPSID)) {
696 1.118 oster num_writes++;
697 1.118 oster }
698 1.118 oster }
699 1.118 oster #endif
700 1.104 oster
701 1.104 oster /* issue a read for each surviving disk */
702 1.104 oster
703 1.104 oster reconDesc->numDisksDone = 0;
704 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
705 1.104 oster if (i != col) {
706 1.104 oster /* find and issue the next I/O on the
707 1.104 oster * indicated disk */
708 1.104 oster if (IssueNextReadRequest(raidPtr, i)) {
709 1.104 oster Dprintf1("RECON: done issuing for c%d\n", i);
710 1.104 oster reconDesc->numDisksDone++;
711 1.104 oster }
712 1.104 oster }
713 1.104 oster }
714 1.87 perry
715 1.104 oster /* process reconstruction events until all disks report that
716 1.104 oster * they've completed all work */
717 1.82 oster
718 1.104 oster while (reconDesc->numDisksDone < raidPtr->numCol - 1) {
719 1.82 oster
720 1.104 oster event = rf_GetNextReconEvent(reconDesc);
721 1.104 oster status = ProcessReconEvent(raidPtr, event);
722 1.104 oster
723 1.104 oster /* the normal case is that a read completes, and all is well. */
724 1.104 oster if (status == RF_RECON_DONE_READS) {
725 1.104 oster reconDesc->numDisksDone++;
726 1.104 oster } else if ((status == RF_RECON_READ_ERROR) ||
727 1.104 oster (status == RF_RECON_WRITE_ERROR)) {
728 1.104 oster /* an error was encountered while reconstructing...
729 1.104 oster Pretend we've finished this disk.
730 1.104 oster */
731 1.104 oster recon_error = 1;
732 1.104 oster raidPtr->reconControl->error = 1;
733 1.104 oster
734 1.104 oster /* bump the numDisksDone count for reads,
735 1.104 oster but not for writes */
736 1.104 oster if (status == RF_RECON_READ_ERROR)
737 1.104 oster reconDesc->numDisksDone++;
738 1.104 oster
739 1.104 oster /* write errors are special -- when we are
740 1.104 oster done dealing with the reads that are
741 1.104 oster finished, we don't want to wait for any
742 1.104 oster writes */
743 1.107 oster if (status == RF_RECON_WRITE_ERROR) {
744 1.104 oster write_error = 1;
745 1.107 oster num_writes++;
746 1.107 oster }
747 1.104 oster
748 1.104 oster } else if (status == RF_RECON_READ_STOPPED) {
749 1.104 oster /* count this component as being "done" */
750 1.82 oster reconDesc->numDisksDone++;
751 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
752 1.104 oster num_writes++;
753 1.104 oster }
754 1.104 oster
755 1.104 oster if (recon_error) {
756 1.104 oster /* make sure any stragglers are woken up so that
757 1.104 oster their theads will complete, and we can get out
758 1.104 oster of here with all IO processed */
759 1.104 oster
760 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
761 1.104 oster }
762 1.104 oster
763 1.104 oster raidPtr->reconControl->numRUsTotal =
764 1.104 oster mapPtr->totalRUs;
765 1.104 oster raidPtr->reconControl->numRUsComplete =
766 1.104 oster mapPtr->totalRUs -
767 1.104 oster rf_UnitsLeftToReconstruct(mapPtr);
768 1.82 oster
769 1.104 oster #if RF_DEBUG_RECON
770 1.104 oster raidPtr->reconControl->percentComplete =
771 1.104 oster (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
772 1.104 oster if (rf_prReconSched) {
773 1.104 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
774 1.82 oster }
775 1.104 oster #endif
776 1.82 oster }
777 1.82 oster
778 1.118 oster /* reads done, wakeup any waiters, and then wait for writes */
779 1.82 oster
780 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
781 1.104 oster
782 1.104 oster while (!recon_error && (num_writes < pending_writes)) {
783 1.104 oster event = rf_GetNextReconEvent(reconDesc);
784 1.104 oster status = ProcessReconEvent(raidPtr, event);
785 1.104 oster
786 1.104 oster if (status == RF_RECON_WRITE_ERROR) {
787 1.107 oster num_writes++;
788 1.104 oster recon_error = 1;
789 1.104 oster raidPtr->reconControl->error = 1;
790 1.104 oster /* an error was encountered at the very end... bail */
791 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
792 1.104 oster num_writes++;
793 1.107 oster } /* else it's something else, and we don't care */
794 1.104 oster }
795 1.104 oster if (recon_error ||
796 1.104 oster (raidPtr->reconControl->lastPSID == lastPSID)) {
797 1.104 oster done = 1;
798 1.104 oster break;
799 1.104 oster }
800 1.104 oster
801 1.104 oster prev = raidPtr->reconControl->lastPSID;
802 1.104 oster raidPtr->reconControl->lastPSID += incPSID;
803 1.104 oster
804 1.104 oster if (raidPtr->reconControl->lastPSID > lastPSID) {
805 1.104 oster pending_writes = lastPSID - prev;
806 1.104 oster raidPtr->reconControl->lastPSID = lastPSID;
807 1.104 oster }
808 1.104 oster /* back down curPSID to get ready for the next round... */
809 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
810 1.104 oster if (i != col) {
811 1.104 oster raidPtr->reconControl->perDiskInfo[i].curPSID--;
812 1.104 oster raidPtr->reconControl->perDiskInfo[i].ru_count = RUsPerPU - 1;
813 1.104 oster }
814 1.78 oster }
815 1.78 oster }
816 1.87 perry
817 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
818 1.78 oster if (rf_reconDebug) {
819 1.78 oster printf("RECON: all reads completed\n");
820 1.78 oster }
821 1.78 oster /* at this point all the reads have completed. We now wait
822 1.78 oster * for any pending writes to complete, and then we're done */
823 1.82 oster
824 1.82 oster while (!recon_error && rf_UnitsLeftToReconstruct(raidPtr->reconControl->reconMap) > 0) {
825 1.87 perry
826 1.78 oster event = rf_GetNextReconEvent(reconDesc);
827 1.83 oster status = ProcessReconEvent(raidPtr, event);
828 1.82 oster
829 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
830 1.82 oster recon_error = 1;
831 1.87 perry raidPtr->reconControl->error = 1;
832 1.82 oster /* an error was encountered at the very end... bail */
833 1.82 oster } else {
834 1.82 oster #if RF_DEBUG_RECON
835 1.82 oster raidPtr->reconControl->percentComplete = 100 - (rf_UnitsLeftToReconstruct(mapPtr) * 100 / mapPtr->totalRUs);
836 1.82 oster if (rf_prReconSched) {
837 1.82 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
838 1.82 oster }
839 1.82 oster #endif
840 1.82 oster }
841 1.82 oster }
842 1.82 oster
843 1.82 oster if (recon_error) {
844 1.82 oster /* we've encountered an error in reconstructing. */
845 1.82 oster printf("raid%d: reconstruction failed.\n", raidPtr->raidid);
846 1.87 perry
847 1.82 oster /* we start by blocking IO to the RAID set. */
848 1.82 oster rf_SuspendNewRequestsAndWait(raidPtr);
849 1.87 perry
850 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
851 1.82 oster /* mark set as being degraded, rather than
852 1.82 oster rf_rs_reconstructing as we were before the problem.
853 1.82 oster After this is done we can update status of the
854 1.82 oster component disks without worrying about someone
855 1.82 oster trying to read from a failed component.
856 1.82 oster */
857 1.82 oster raidPtr->status = rf_rs_degraded;
858 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
859 1.87 perry
860 1.82 oster /* resume IO */
861 1.87 perry rf_ResumeNewRequests(raidPtr);
862 1.87 perry
863 1.82 oster /* At this point there are two cases:
864 1.82 oster 1) If we've experienced a read error, then we've
865 1.82 oster already waited for all the reads we're going to get,
866 1.82 oster and we just need to wait for the writes.
867 1.82 oster
868 1.82 oster 2) If we've experienced a write error, we've also
869 1.82 oster already waited for all the reads to complete,
870 1.82 oster but there is little point in waiting for the writes --
871 1.82 oster when they do complete, they will just be ignored.
872 1.82 oster
873 1.87 perry So we just wait for writes to complete if we didn't have a
874 1.82 oster write error.
875 1.82 oster */
876 1.82 oster
877 1.82 oster if (!write_error) {
878 1.82 oster /* wait for writes to complete */
879 1.82 oster while (raidPtr->reconControl->pending_writes > 0) {
880 1.83 oster
881 1.82 oster event = rf_GetNextReconEvent(reconDesc);
882 1.82 oster status = ProcessReconEvent(raidPtr, event);
883 1.82 oster
884 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
885 1.87 perry raidPtr->reconControl->error = 1;
886 1.82 oster /* an error was encountered at the very end... bail.
887 1.82 oster This will be very bad news for the user, since
888 1.82 oster at this point there will have been a read error
889 1.82 oster on one component, and a write error on another!
890 1.82 oster */
891 1.82 oster break;
892 1.82 oster }
893 1.82 oster }
894 1.4 oster }
895 1.82 oster
896 1.87 perry
897 1.82 oster /* cleanup */
898 1.82 oster
899 1.82 oster /* drain the event queue - after waiting for the writes above,
900 1.82 oster there shouldn't be much (if anything!) left in the queue. */
901 1.82 oster
902 1.82 oster rf_DrainReconEventQueue(reconDesc);
903 1.87 perry
904 1.82 oster rf_FreeReconControl(raidPtr);
905 1.127.10.2 martin
906 1.82 oster #if RF_ACC_TRACE > 0
907 1.82 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
908 1.41 oster #endif
909 1.82 oster FreeReconDesc(reconDesc);
910 1.82 oster
911 1.82 oster return (1);
912 1.78 oster }
913 1.14 oster
914 1.78 oster /* Success: mark the dead disk as reconstructed. We quiesce
915 1.78 oster * the array here to assure no nasty interactions with pending
916 1.78 oster * user accesses when we free up the psstatus structure as
917 1.78 oster * part of FreeReconControl() */
918 1.87 perry
919 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
920 1.87 perry
921 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
922 1.78 oster raidPtr->numFailures--;
923 1.78 oster ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
924 1.78 oster raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared;
925 1.78 oster raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal;
926 1.127.10.2 martin
927 1.127.10.2 martin if (col != scol) {
928 1.127.10.2 martin /* swap the names, raid_cinfo. queues stay where they are. */
929 1.127.10.2 martin rf_swap_components(raidPtr, col, scol);
930 1.127.10.2 martin
931 1.127.10.2 martin /* mark the new spare as good */
932 1.127.10.2 martin raidPtr->Disks[col].status = rf_ds_optimal;
933 1.127.10.2 martin
934 1.127.10.2 martin for (i = scol; i < raidPtr->numCol+raidPtr->numSpare-1; i++) {
935 1.127.10.2 martin /* now we work our way up the array, swapping as we go. */
936 1.127.10.2 martin /* swap with the one at the next position, which must be there */
937 1.127.10.2 martin rf_swap_components(raidPtr, i, i+1);
938 1.127.10.2 martin }
939 1.127.10.2 martin raidPtr->numSpare--;
940 1.127.10.2 martin }
941 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
942 1.78 oster RF_GETTIME(etime);
943 1.78 oster RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd);
944 1.87 perry
945 1.78 oster rf_ResumeNewRequests(raidPtr);
946 1.87 perry
947 1.87 perry printf("raid%d: Reconstruction of disk at col %d completed\n",
948 1.78 oster raidPtr->raidid, col);
949 1.78 oster xor_s = raidPtr->accumXorTimeUs / 1000000;
950 1.78 oster xor_resid_us = raidPtr->accumXorTimeUs % 1000000;
951 1.78 oster printf("raid%d: Recon time was %d.%06d seconds, accumulated XOR time was %ld us (%ld.%06ld)\n",
952 1.87 perry raidPtr->raidid,
953 1.87 perry (int) elpsd.tv_sec, (int) elpsd.tv_usec,
954 1.78 oster raidPtr->accumXorTimeUs, xor_s, xor_resid_us);
955 1.78 oster printf("raid%d: (start time %d sec %d usec, end time %d sec %d usec)\n",
956 1.78 oster raidPtr->raidid,
957 1.78 oster (int) raidPtr->reconControl->starttime.tv_sec,
958 1.78 oster (int) raidPtr->reconControl->starttime.tv_usec,
959 1.78 oster (int) etime.tv_sec, (int) etime.tv_usec);
960 1.1 oster #if RF_RECON_STATS > 0
961 1.78 oster printf("raid%d: Total head-sep stall count was %d\n",
962 1.78 oster raidPtr->raidid, (int) reconDesc->hsStallCount);
963 1.4 oster #endif /* RF_RECON_STATS > 0 */
964 1.78 oster rf_FreeReconControl(raidPtr);
965 1.67 oster #if RF_ACC_TRACE > 0
966 1.78 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
967 1.67 oster #endif
968 1.78 oster FreeReconDesc(reconDesc);
969 1.87 perry
970 1.4 oster return (0);
971 1.82 oster
972 1.1 oster }
973 1.13 oster /*****************************************************************************
974 1.1 oster * do the right thing upon each reconstruction event.
975 1.13 oster *****************************************************************************/
976 1.87 perry static int
977 1.60 oster ProcessReconEvent(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
978 1.4 oster {
979 1.4 oster int retcode = 0, submitblocked;
980 1.4 oster RF_ReconBuffer_t *rbuf;
981 1.4 oster RF_SectorCount_t sectorsPerRU;
982 1.4 oster
983 1.82 oster retcode = RF_RECON_READ_STOPPED;
984 1.82 oster
985 1.4 oster Dprintf1("RECON: ProcessReconEvent type %d\n", event->type);
986 1.104 oster
987 1.4 oster switch (event->type) {
988 1.4 oster
989 1.4 oster /* a read I/O has completed */
990 1.4 oster case RF_REVENT_READDONE:
991 1.57 oster rbuf = raidPtr->reconControl->perDiskInfo[event->col].rbuf;
992 1.57 oster Dprintf2("RECON: READDONE EVENT: col %d psid %ld\n",
993 1.57 oster event->col, rbuf->parityStripeID);
994 1.4 oster Dprintf7("RECON: done read psid %ld buf %lx %02x %02x %02x %02x %02x\n",
995 1.4 oster rbuf->parityStripeID, rbuf->buffer, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
996 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
997 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
998 1.82 oster if (!raidPtr->reconControl->error) {
999 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 0, 0);
1000 1.82 oster Dprintf1("RECON: submitblocked=%d\n", submitblocked);
1001 1.82 oster if (!submitblocked)
1002 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1003 1.89 oster else
1004 1.89 oster retcode = 0;
1005 1.82 oster }
1006 1.4 oster break;
1007 1.4 oster
1008 1.4 oster /* a write I/O has completed */
1009 1.4 oster case RF_REVENT_WRITEDONE:
1010 1.40 oster #if RF_DEBUG_RECON
1011 1.4 oster if (rf_floatingRbufDebug) {
1012 1.4 oster rf_CheckFloatingRbufCount(raidPtr, 1);
1013 1.4 oster }
1014 1.38 oster #endif
1015 1.4 oster sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1016 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1017 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1018 1.4 oster Dprintf3("RECON: WRITEDONE EVENT: psid %d ru %d (%d %% complete)\n",
1019 1.57 oster rbuf->parityStripeID, rbuf->which_ru, raidPtr->reconControl->percentComplete);
1020 1.57 oster rf_ReconMapUpdate(raidPtr, raidPtr->reconControl->reconMap,
1021 1.4 oster rbuf->failedDiskSectorOffset, rbuf->failedDiskSectorOffset + sectorsPerRU - 1);
1022 1.57 oster rf_RemoveFromActiveReconTable(raidPtr, rbuf->parityStripeID, rbuf->which_ru);
1023 1.4 oster
1024 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1025 1.82 oster raidPtr->reconControl->pending_writes--;
1026 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1027 1.82 oster
1028 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FLOATING) {
1029 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1030 1.76 oster while(raidPtr->reconControl->rb_lock) {
1031 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1032 1.112 mrg raidPtr->reconControl->rb_mutex);
1033 1.76 oster }
1034 1.76 oster raidPtr->reconControl->rb_lock = 1;
1035 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1036 1.76 oster
1037 1.4 oster raidPtr->numFullReconBuffers--;
1038 1.57 oster rf_ReleaseFloatingReconBuffer(raidPtr, rbuf);
1039 1.76 oster
1040 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1041 1.76 oster raidPtr->reconControl->rb_lock = 0;
1042 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1043 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1044 1.4 oster } else
1045 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FORCED)
1046 1.4 oster rf_FreeReconBuffer(rbuf);
1047 1.4 oster else
1048 1.4 oster RF_ASSERT(0);
1049 1.104 oster retcode = RF_RECON_WRITE_DONE;
1050 1.4 oster break;
1051 1.4 oster
1052 1.4 oster case RF_REVENT_BUFCLEAR: /* A buffer-stall condition has been
1053 1.4 oster * cleared */
1054 1.57 oster Dprintf1("RECON: BUFCLEAR EVENT: col %d\n", event->col);
1055 1.82 oster if (!raidPtr->reconControl->error) {
1056 1.87 perry submitblocked = rf_SubmitReconBuffer(raidPtr->reconControl->perDiskInfo[event->col].rbuf,
1057 1.82 oster 0, (int) (long) event->arg);
1058 1.82 oster RF_ASSERT(!submitblocked); /* we wouldn't have gotten the
1059 1.82 oster * BUFCLEAR event if we
1060 1.82 oster * couldn't submit */
1061 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1062 1.82 oster }
1063 1.4 oster break;
1064 1.4 oster
1065 1.4 oster case RF_REVENT_BLOCKCLEAR: /* A user-write reconstruction
1066 1.4 oster * blockage has been cleared */
1067 1.57 oster DDprintf1("RECON: BLOCKCLEAR EVENT: col %d\n", event->col);
1068 1.82 oster if (!raidPtr->reconControl->error) {
1069 1.82 oster retcode = TryToRead(raidPtr, event->col);
1070 1.82 oster }
1071 1.4 oster break;
1072 1.4 oster
1073 1.4 oster case RF_REVENT_HEADSEPCLEAR: /* A max-head-separation
1074 1.4 oster * reconstruction blockage has been
1075 1.4 oster * cleared */
1076 1.57 oster Dprintf1("RECON: HEADSEPCLEAR EVENT: col %d\n", event->col);
1077 1.82 oster if (!raidPtr->reconControl->error) {
1078 1.82 oster retcode = TryToRead(raidPtr, event->col);
1079 1.82 oster }
1080 1.4 oster break;
1081 1.4 oster
1082 1.4 oster /* a buffer has become ready to write */
1083 1.4 oster case RF_REVENT_BUFREADY:
1084 1.57 oster Dprintf1("RECON: BUFREADY EVENT: col %d\n", event->col);
1085 1.82 oster if (!raidPtr->reconControl->error) {
1086 1.82 oster retcode = IssueNextWriteRequest(raidPtr);
1087 1.40 oster #if RF_DEBUG_RECON
1088 1.82 oster if (rf_floatingRbufDebug) {
1089 1.82 oster rf_CheckFloatingRbufCount(raidPtr, 1);
1090 1.82 oster }
1091 1.82 oster #endif
1092 1.4 oster }
1093 1.4 oster break;
1094 1.4 oster
1095 1.4 oster /* we need to skip the current RU entirely because it got
1096 1.4 oster * recon'd while we were waiting for something else to happen */
1097 1.4 oster case RF_REVENT_SKIP:
1098 1.57 oster DDprintf1("RECON: SKIP EVENT: col %d\n", event->col);
1099 1.87 perry if (!raidPtr->reconControl->error) {
1100 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1101 1.82 oster }
1102 1.4 oster break;
1103 1.4 oster
1104 1.4 oster /* a forced-reconstruction read access has completed. Just
1105 1.4 oster * submit the buffer */
1106 1.4 oster case RF_REVENT_FORCEDREADDONE:
1107 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1108 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1109 1.57 oster DDprintf1("RECON: FORCEDREADDONE EVENT: col %d\n", event->col);
1110 1.82 oster if (!raidPtr->reconControl->error) {
1111 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 1, 0);
1112 1.82 oster RF_ASSERT(!submitblocked);
1113 1.103 oster retcode = 0;
1114 1.82 oster }
1115 1.4 oster break;
1116 1.4 oster
1117 1.70 oster /* A read I/O failed to complete */
1118 1.70 oster case RF_REVENT_READ_FAILED:
1119 1.82 oster retcode = RF_RECON_READ_ERROR;
1120 1.82 oster break;
1121 1.70 oster
1122 1.70 oster /* A write I/O failed to complete */
1123 1.70 oster case RF_REVENT_WRITE_FAILED:
1124 1.82 oster retcode = RF_RECON_WRITE_ERROR;
1125 1.82 oster
1126 1.107 oster /* This is an error, but it was a pending write.
1127 1.107 oster Account for it. */
1128 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1129 1.107 oster raidPtr->reconControl->pending_writes--;
1130 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1131 1.107 oster
1132 1.82 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1133 1.82 oster
1134 1.82 oster /* cleanup the disk queue data */
1135 1.82 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1136 1.82 oster
1137 1.82 oster /* At this point we're erroring out, badly, and floatingRbufs
1138 1.82 oster may not even be valid. Rather than putting this back onto
1139 1.82 oster the floatingRbufs list, just arrange for its immediate
1140 1.82 oster destruction.
1141 1.82 oster */
1142 1.82 oster rf_FreeReconBuffer(rbuf);
1143 1.82 oster break;
1144 1.70 oster
1145 1.70 oster /* a forced read I/O failed to complete */
1146 1.70 oster case RF_REVENT_FORCEDREAD_FAILED:
1147 1.82 oster retcode = RF_RECON_READ_ERROR;
1148 1.82 oster break;
1149 1.70 oster
1150 1.4 oster default:
1151 1.4 oster RF_PANIC();
1152 1.4 oster }
1153 1.126 oster rf_FreeReconEventDesc(raidPtr, event);
1154 1.4 oster return (retcode);
1155 1.1 oster }
1156 1.13 oster /*****************************************************************************
1157 1.1 oster *
1158 1.13 oster * find the next thing that's needed on the indicated disk, and issue
1159 1.13 oster * a read request for it. We assume that the reconstruction buffer
1160 1.13 oster * associated with this process is free to receive the data. If
1161 1.13 oster * reconstruction is blocked on the indicated RU, we issue a
1162 1.13 oster * blockage-release request instead of a physical disk read request.
1163 1.13 oster * If the current disk gets too far ahead of the others, we issue a
1164 1.13 oster * head-separation wait request and return.
1165 1.13 oster *
1166 1.13 oster * ctrl->{ru_count, curPSID, diskOffset} and
1167 1.22 soren * rbuf->failedDiskSectorOffset are maintained to point to the unit
1168 1.13 oster * we're currently accessing. Note that this deviates from the
1169 1.13 oster * standard C idiom of having counters point to the next thing to be
1170 1.13 oster * accessed. This allows us to easily retry when we're blocked by
1171 1.13 oster * head separation or reconstruction-blockage events.
1172 1.1 oster *
1173 1.13 oster *****************************************************************************/
1174 1.87 perry static int
1175 1.60 oster IssueNextReadRequest(RF_Raid_t *raidPtr, RF_RowCol_t col)
1176 1.4 oster {
1177 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1178 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1179 1.4 oster RF_ReconBuffer_t *rbuf = ctrl->rbuf;
1180 1.4 oster RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
1181 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1182 1.4 oster int do_new_check = 0, retcode = 0, status;
1183 1.4 oster
1184 1.4 oster /* if we are currently the slowest disk, mark that we have to do a new
1185 1.4 oster * check */
1186 1.57 oster if (ctrl->headSepCounter <= raidPtr->reconControl->minHeadSepCounter)
1187 1.4 oster do_new_check = 1;
1188 1.4 oster
1189 1.4 oster while (1) {
1190 1.4 oster
1191 1.4 oster ctrl->ru_count++;
1192 1.4 oster if (ctrl->ru_count < RUsPerPU) {
1193 1.4 oster ctrl->diskOffset += sectorsPerRU;
1194 1.4 oster rbuf->failedDiskSectorOffset += sectorsPerRU;
1195 1.4 oster } else {
1196 1.4 oster ctrl->curPSID++;
1197 1.4 oster ctrl->ru_count = 0;
1198 1.4 oster /* code left over from when head-sep was based on
1199 1.4 oster * parity stripe id */
1200 1.118 oster if (ctrl->curPSID > raidPtr->reconControl->lastPSID) {
1201 1.57 oster CheckForNewMinHeadSep(raidPtr, ++(ctrl->headSepCounter));
1202 1.82 oster return (RF_RECON_DONE_READS); /* finito! */
1203 1.4 oster }
1204 1.4 oster /* find the disk offsets of the start of the parity
1205 1.4 oster * stripe on both the current disk and the failed
1206 1.4 oster * disk. skip this entire parity stripe if either disk
1207 1.4 oster * does not appear in the indicated PS */
1208 1.57 oster status = ComputePSDiskOffsets(raidPtr, ctrl->curPSID, col, &ctrl->diskOffset, &rbuf->failedDiskSectorOffset,
1209 1.57 oster &rbuf->spCol, &rbuf->spOffset);
1210 1.4 oster if (status) {
1211 1.4 oster ctrl->ru_count = RUsPerPU - 1;
1212 1.4 oster continue;
1213 1.4 oster }
1214 1.4 oster }
1215 1.4 oster rbuf->which_ru = ctrl->ru_count;
1216 1.4 oster
1217 1.4 oster /* skip this RU if it's already been reconstructed */
1218 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, rbuf->failedDiskSectorOffset)) {
1219 1.4 oster Dprintf2("Skipping psid %ld ru %d: already reconstructed\n", ctrl->curPSID, ctrl->ru_count);
1220 1.4 oster continue;
1221 1.4 oster }
1222 1.4 oster break;
1223 1.4 oster }
1224 1.4 oster ctrl->headSepCounter++;
1225 1.4 oster if (do_new_check)
1226 1.57 oster CheckForNewMinHeadSep(raidPtr, ctrl->headSepCounter); /* update min if needed */
1227 1.4 oster
1228 1.4 oster
1229 1.4 oster /* at this point, we have definitely decided what to do, and we have
1230 1.4 oster * only to see if we can actually do it now */
1231 1.4 oster rbuf->parityStripeID = ctrl->curPSID;
1232 1.4 oster rbuf->which_ru = ctrl->ru_count;
1233 1.67 oster #if RF_ACC_TRACE > 0
1234 1.122 christos memset(&raidPtr->recon_tracerecs[col], 0,
1235 1.29 thorpej sizeof(raidPtr->recon_tracerecs[col]));
1236 1.4 oster raidPtr->recon_tracerecs[col].reconacc = 1;
1237 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1238 1.67 oster #endif
1239 1.57 oster retcode = TryToRead(raidPtr, col);
1240 1.4 oster return (retcode);
1241 1.1 oster }
1242 1.13 oster
1243 1.13 oster /*
1244 1.13 oster * tries to issue the next read on the indicated disk. We may be
1245 1.13 oster * blocked by (a) the heads being too far apart, or (b) recon on the
1246 1.13 oster * indicated RU being blocked due to a write by a user thread. In
1247 1.13 oster * this case, we issue a head-sep or blockage wait request, which will
1248 1.13 oster * cause this same routine to be invoked again later when the blockage
1249 1.87 perry * has cleared.
1250 1.1 oster */
1251 1.13 oster
1252 1.87 perry static int
1253 1.60 oster TryToRead(RF_Raid_t *raidPtr, RF_RowCol_t col)
1254 1.4 oster {
1255 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1256 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1257 1.4 oster RF_StripeNum_t psid = ctrl->curPSID;
1258 1.4 oster RF_ReconUnitNum_t which_ru = ctrl->ru_count;
1259 1.4 oster RF_DiskQueueData_t *req;
1260 1.68 oster int status;
1261 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;
1262 1.4 oster
1263 1.4 oster /* if the current disk is too far ahead of the others, issue a
1264 1.4 oster * head-separation wait and return */
1265 1.57 oster if (CheckHeadSeparation(raidPtr, ctrl, col, ctrl->headSepCounter, which_ru))
1266 1.4 oster return (0);
1267 1.68 oster
1268 1.68 oster /* allocate a new PSS in case we need it */
1269 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1270 1.68 oster
1271 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1272 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE, newpssPtr);
1273 1.68 oster
1274 1.68 oster if (pssPtr != newpssPtr) {
1275 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1276 1.68 oster }
1277 1.4 oster
1278 1.4 oster /* if recon is blocked on the indicated parity stripe, issue a
1279 1.4 oster * block-wait request and return. this also must mark the indicated RU
1280 1.4 oster * in the stripe as under reconstruction if not blocked. */
1281 1.57 oster status = CheckForcedOrBlockedReconstruction(raidPtr, pssPtr, ctrl, col, psid, which_ru);
1282 1.4 oster if (status == RF_PSS_RECON_BLOCKED) {
1283 1.4 oster Dprintf2("RECON: Stalling psid %ld ru %d: recon blocked\n", psid, which_ru);
1284 1.4 oster goto out;
1285 1.4 oster } else
1286 1.4 oster if (status == RF_PSS_FORCED_ON_WRITE) {
1287 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1288 1.4 oster goto out;
1289 1.4 oster }
1290 1.4 oster /* make one last check to be sure that the indicated RU didn't get
1291 1.4 oster * reconstructed while we were waiting for something else to happen.
1292 1.4 oster * This is unfortunate in that it causes us to make this check twice
1293 1.4 oster * in the normal case. Might want to make some attempt to re-work
1294 1.4 oster * this so that we only do this check if we've definitely blocked on
1295 1.4 oster * one of the above checks. When this condition is detected, we may
1296 1.4 oster * have just created a bogus status entry, which we need to delete. */
1297 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, ctrl->rbuf->failedDiskSectorOffset)) {
1298 1.4 oster Dprintf2("RECON: Skipping psid %ld ru %d: prior recon after stall\n", psid, which_ru);
1299 1.68 oster if (pssPtr == newpssPtr)
1300 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1301 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1302 1.4 oster goto out;
1303 1.4 oster }
1304 1.4 oster /* found something to read. issue the I/O */
1305 1.57 oster Dprintf4("RECON: Read for psid %ld on col %d offset %ld buf %lx\n",
1306 1.57 oster psid, col, ctrl->diskOffset, ctrl->rbuf->buffer);
1307 1.67 oster #if RF_ACC_TRACE > 0
1308 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[col].recon_timer);
1309 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[col].recon_timer);
1310 1.4 oster raidPtr->recon_tracerecs[col].specific.recon.recon_start_to_fetch_us =
1311 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[col].recon_timer);
1312 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1313 1.67 oster #endif
1314 1.4 oster /* should be ok to use a NULL proc pointer here, all the bufs we use
1315 1.4 oster * should be in kernel space */
1316 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, ctrl->diskOffset, sectorsPerRU, ctrl->rbuf->buffer, psid, which_ru,
1317 1.86 oster ReconReadDoneProc, (void *) ctrl,
1318 1.67 oster #if RF_ACC_TRACE > 0
1319 1.67 oster &raidPtr->recon_tracerecs[col],
1320 1.67 oster #else
1321 1.67 oster NULL,
1322 1.67 oster #endif
1323 1.127 oster (void *) raidPtr, 0, NULL);
1324 1.4 oster
1325 1.4 oster ctrl->rbuf->arg = (void *) req;
1326 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[col], req, RF_IO_RECON_PRIORITY);
1327 1.4 oster pssPtr->issued[col] = 1;
1328 1.1 oster
1329 1.1 oster out:
1330 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1331 1.4 oster return (0);
1332 1.1 oster }
1333 1.1 oster
1334 1.1 oster
1335 1.13 oster /*
1336 1.13 oster * given a parity stripe ID, we want to find out whether both the
1337 1.13 oster * current disk and the failed disk exist in that parity stripe. If
1338 1.13 oster * not, we want to skip this whole PS. If so, we want to find the
1339 1.13 oster * disk offset of the start of the PS on both the current disk and the
1340 1.13 oster * failed disk.
1341 1.13 oster *
1342 1.13 oster * this works by getting a list of disks comprising the indicated
1343 1.13 oster * parity stripe, and searching the list for the current and failed
1344 1.13 oster * disks. Once we've decided they both exist in the parity stripe, we
1345 1.13 oster * need to decide whether each is data or parity, so that we'll know
1346 1.13 oster * which mapping function to call to get the corresponding disk
1347 1.1 oster * offsets.
1348 1.1 oster *
1349 1.13 oster * this is kind of unpleasant, but doing it this way allows the
1350 1.13 oster * reconstruction code to use parity stripe IDs rather than physical
1351 1.13 oster * disks address to march through the failed disk, which greatly
1352 1.13 oster * simplifies a lot of code, as well as eliminating the need for a
1353 1.13 oster * reverse-mapping function. I also think it will execute faster,
1354 1.13 oster * since the calls to the mapping module are kept to a minimum.
1355 1.1 oster *
1356 1.13 oster * ASSUMES THAT THE STRIPE IDENTIFIER IDENTIFIES THE DISKS COMPRISING
1357 1.87 perry * THE STRIPE IN THE CORRECT ORDER
1358 1.87 perry *
1359 1.60 oster * raidPtr - raid descriptor
1360 1.60 oster * psid - parity stripe identifier
1361 1.60 oster * col - column of disk to find the offsets for
1362 1.60 oster * spCol - out: col of spare unit for failed unit
1363 1.60 oster * spOffset - out: offset into disk containing spare unit
1364 1.60 oster *
1365 1.60 oster */
1366 1.13 oster
1367 1.13 oster
1368 1.87 perry static int
1369 1.60 oster ComputePSDiskOffsets(RF_Raid_t *raidPtr, RF_StripeNum_t psid,
1370 1.60 oster RF_RowCol_t col, RF_SectorNum_t *outDiskOffset,
1371 1.60 oster RF_SectorNum_t *outFailedDiskSectorOffset,
1372 1.60 oster RF_RowCol_t *spCol, RF_SectorNum_t *spOffset)
1373 1.60 oster {
1374 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1375 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1376 1.4 oster RF_RaidAddr_t sosRaidAddress; /* start-of-stripe */
1377 1.4 oster RF_RowCol_t *diskids;
1378 1.4 oster u_int i, j, k, i_offset, j_offset;
1379 1.57 oster RF_RowCol_t pcol;
1380 1.57 oster int testcol;
1381 1.4 oster RF_SectorNum_t poffset;
1382 1.4 oster char i_is_parity = 0, j_is_parity = 0;
1383 1.4 oster RF_RowCol_t stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
1384 1.4 oster
1385 1.4 oster /* get a listing of the disks comprising that stripe */
1386 1.4 oster sosRaidAddress = rf_ParityStripeIDToRaidAddress(layoutPtr, psid);
1387 1.57 oster (layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids);
1388 1.4 oster RF_ASSERT(diskids);
1389 1.4 oster
1390 1.4 oster /* reject this entire parity stripe if it does not contain the
1391 1.4 oster * indicated disk or it does not contain the failed disk */
1392 1.57 oster
1393 1.4 oster for (i = 0; i < stripeWidth; i++) {
1394 1.4 oster if (col == diskids[i])
1395 1.4 oster break;
1396 1.4 oster }
1397 1.4 oster if (i == stripeWidth)
1398 1.4 oster goto skipit;
1399 1.4 oster for (j = 0; j < stripeWidth; j++) {
1400 1.4 oster if (fcol == diskids[j])
1401 1.4 oster break;
1402 1.4 oster }
1403 1.4 oster if (j == stripeWidth) {
1404 1.4 oster goto skipit;
1405 1.4 oster }
1406 1.4 oster /* find out which disk the parity is on */
1407 1.57 oster (layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &pcol, &poffset, RF_DONT_REMAP);
1408 1.4 oster
1409 1.4 oster /* find out if either the current RU or the failed RU is parity */
1410 1.4 oster /* also, if the parity occurs in this stripe prior to the data and/or
1411 1.4 oster * failed col, we need to decrement i and/or j */
1412 1.4 oster for (k = 0; k < stripeWidth; k++)
1413 1.4 oster if (diskids[k] == pcol)
1414 1.4 oster break;
1415 1.4 oster RF_ASSERT(k < stripeWidth);
1416 1.4 oster i_offset = i;
1417 1.4 oster j_offset = j;
1418 1.4 oster if (k < i)
1419 1.4 oster i_offset--;
1420 1.4 oster else
1421 1.4 oster if (k == i) {
1422 1.4 oster i_is_parity = 1;
1423 1.4 oster i_offset = 0;
1424 1.4 oster } /* set offsets to zero to disable multiply
1425 1.4 oster * below */
1426 1.4 oster if (k < j)
1427 1.4 oster j_offset--;
1428 1.4 oster else
1429 1.4 oster if (k == j) {
1430 1.4 oster j_is_parity = 1;
1431 1.4 oster j_offset = 0;
1432 1.4 oster }
1433 1.4 oster /* at this point, [ij]_is_parity tells us whether the [current,failed]
1434 1.4 oster * disk is parity at the start of this RU, and, if data, "[ij]_offset"
1435 1.4 oster * tells us how far into the stripe the [current,failed] disk is. */
1436 1.4 oster
1437 1.4 oster /* call the mapping routine to get the offset into the current disk,
1438 1.4 oster * repeat for failed disk. */
1439 1.4 oster if (i_is_parity)
1440 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1441 1.4 oster else
1442 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1443 1.4 oster
1444 1.57 oster RF_ASSERT(col == testcol);
1445 1.4 oster
1446 1.4 oster if (j_is_parity)
1447 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1448 1.4 oster else
1449 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1450 1.57 oster RF_ASSERT(fcol == testcol);
1451 1.4 oster
1452 1.4 oster /* now locate the spare unit for the failed unit */
1453 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1454 1.4 oster if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
1455 1.4 oster if (j_is_parity)
1456 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1457 1.4 oster else
1458 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1459 1.4 oster } else {
1460 1.72 oster #endif
1461 1.57 oster *spCol = raidPtr->reconControl->spareCol;
1462 1.4 oster *spOffset = *outFailedDiskSectorOffset;
1463 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1464 1.4 oster }
1465 1.72 oster #endif
1466 1.4 oster return (0);
1467 1.1 oster
1468 1.1 oster skipit:
1469 1.99 oster Dprintf2("RECON: Skipping psid %ld: nothing needed from c%d\n",
1470 1.57 oster psid, col);
1471 1.4 oster return (1);
1472 1.1 oster }
1473 1.4 oster /* this is called when a buffer has become ready to write to the replacement disk */
1474 1.87 perry static int
1475 1.60 oster IssueNextWriteRequest(RF_Raid_t *raidPtr)
1476 1.4 oster {
1477 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1478 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1479 1.67 oster #if RF_ACC_TRACE > 0
1480 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1481 1.67 oster #endif
1482 1.4 oster RF_ReconBuffer_t *rbuf;
1483 1.4 oster RF_DiskQueueData_t *req;
1484 1.4 oster
1485 1.57 oster rbuf = rf_GetFullReconBuffer(raidPtr->reconControl);
1486 1.4 oster RF_ASSERT(rbuf); /* there must be one available, or we wouldn't
1487 1.4 oster * have gotten the event that sent us here */
1488 1.4 oster RF_ASSERT(rbuf->pssPtr);
1489 1.4 oster
1490 1.4 oster rbuf->pssPtr->writeRbuf = rbuf;
1491 1.4 oster rbuf->pssPtr = NULL;
1492 1.4 oster
1493 1.57 oster Dprintf6("RECON: New write (c %d offs %d) for psid %ld ru %d (failed disk offset %ld) buf %lx\n",
1494 1.57 oster rbuf->spCol, rbuf->spOffset, rbuf->parityStripeID,
1495 1.4 oster rbuf->which_ru, rbuf->failedDiskSectorOffset, rbuf->buffer);
1496 1.4 oster Dprintf6("RECON: new write psid %ld %02x %02x %02x %02x %02x\n",
1497 1.4 oster rbuf->parityStripeID, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
1498 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
1499 1.4 oster
1500 1.4 oster /* should be ok to use a NULL b_proc here b/c all addrs should be in
1501 1.4 oster * kernel space */
1502 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_WRITE, rbuf->spOffset,
1503 1.4 oster sectorsPerRU, rbuf->buffer,
1504 1.4 oster rbuf->parityStripeID, rbuf->which_ru,
1505 1.86 oster ReconWriteDoneProc, (void *) rbuf,
1506 1.67 oster #if RF_ACC_TRACE > 0
1507 1.4 oster &raidPtr->recon_tracerecs[fcol],
1508 1.67 oster #else
1509 1.87 perry NULL,
1510 1.67 oster #endif
1511 1.127 oster (void *) raidPtr, 0, NULL);
1512 1.1 oster
1513 1.4 oster rbuf->arg = (void *) req;
1514 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1515 1.82 oster raidPtr->reconControl->pending_writes++;
1516 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1517 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[rbuf->spCol], req, RF_IO_RECON_PRIORITY);
1518 1.1 oster
1519 1.4 oster return (0);
1520 1.1 oster }
1521 1.13 oster
1522 1.13 oster /*
1523 1.13 oster * this gets called upon the completion of a reconstruction read
1524 1.13 oster * operation the arg is a pointer to the per-disk reconstruction
1525 1.13 oster * control structure for the process that just finished a read.
1526 1.1 oster *
1527 1.13 oster * called at interrupt context in the kernel, so don't do anything
1528 1.87 perry * illegal here.
1529 1.1 oster */
1530 1.123 christos static void
1531 1.60 oster ReconReadDoneProc(void *arg, int status)
1532 1.4 oster {
1533 1.4 oster RF_PerDiskReconCtrl_t *ctrl = (RF_PerDiskReconCtrl_t *) arg;
1534 1.82 oster RF_Raid_t *raidPtr;
1535 1.82 oster
1536 1.82 oster /* Detect that reconCtrl is no longer valid, and if that
1537 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1538 1.82 oster There won't be anyone listening for this event anyway */
1539 1.82 oster
1540 1.82 oster if (ctrl->reconCtrl == NULL)
1541 1.123 christos return;
1542 1.82 oster
1543 1.82 oster raidPtr = ctrl->reconCtrl->reconDesc->raidPtr;
1544 1.4 oster
1545 1.4 oster if (status) {
1546 1.102 oster printf("raid%d: Recon read failed: %d\n", raidPtr->raidid, status);
1547 1.70 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READ_FAILED);
1548 1.123 christos return;
1549 1.4 oster }
1550 1.67 oster #if RF_ACC_TRACE > 0
1551 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1552 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1553 1.4 oster raidPtr->recon_tracerecs[ctrl->col].specific.recon.recon_fetch_to_return_us =
1554 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1555 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1556 1.67 oster #endif
1557 1.57 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READDONE);
1558 1.123 christos return;
1559 1.1 oster }
1560 1.1 oster /* this gets called upon the completion of a reconstruction write operation.
1561 1.1 oster * the arg is a pointer to the rbuf that was just written
1562 1.1 oster *
1563 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1564 1.1 oster */
1565 1.123 christos static void
1566 1.60 oster ReconWriteDoneProc(void *arg, int status)
1567 1.4 oster {
1568 1.4 oster RF_ReconBuffer_t *rbuf = (RF_ReconBuffer_t *) arg;
1569 1.4 oster
1570 1.82 oster /* Detect that reconControl is no longer valid, and if that
1571 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1572 1.82 oster There won't be anyone listening for this event anyway */
1573 1.82 oster
1574 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1575 1.123 christos return;
1576 1.82 oster
1577 1.4 oster Dprintf2("Reconstruction completed on psid %ld ru %d\n", rbuf->parityStripeID, rbuf->which_ru);
1578 1.4 oster if (status) {
1579 1.119 yamt printf("raid%d: Recon write failed (status %d(0x%x))!\n", rbuf->raidPtr->raidid,status,status);
1580 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITE_FAILED);
1581 1.123 christos return;
1582 1.4 oster }
1583 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITEDONE);
1584 1.1 oster }
1585 1.1 oster
1586 1.1 oster
1587 1.87 perry /*
1588 1.13 oster * computes a new minimum head sep, and wakes up anyone who needs to
1589 1.87 perry * be woken as a result
1590 1.13 oster */
1591 1.87 perry static void
1592 1.95 christos CheckForNewMinHeadSep(RF_Raid_t *raidPtr, RF_HeadSepLimit_t hsCtr)
1593 1.4 oster {
1594 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1595 1.4 oster RF_HeadSepLimit_t new_min;
1596 1.4 oster RF_RowCol_t i;
1597 1.123 christos RF_CallbackValueDesc_t *p;
1598 1.4 oster RF_ASSERT(hsCtr >= reconCtrlPtr->minHeadSepCounter); /* from the definition
1599 1.4 oster * of a minimum */
1600 1.4 oster
1601 1.4 oster
1602 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1603 1.76 oster while(reconCtrlPtr->rb_lock) {
1604 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1605 1.76 oster }
1606 1.76 oster reconCtrlPtr->rb_lock = 1;
1607 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1608 1.4 oster
1609 1.4 oster new_min = ~(1L << (8 * sizeof(long) - 1)); /* 0x7FFF....FFF */
1610 1.4 oster for (i = 0; i < raidPtr->numCol; i++)
1611 1.4 oster if (i != reconCtrlPtr->fcol) {
1612 1.4 oster if (reconCtrlPtr->perDiskInfo[i].headSepCounter < new_min)
1613 1.4 oster new_min = reconCtrlPtr->perDiskInfo[i].headSepCounter;
1614 1.4 oster }
1615 1.4 oster /* set the new minimum and wake up anyone who can now run again */
1616 1.4 oster if (new_min != reconCtrlPtr->minHeadSepCounter) {
1617 1.4 oster reconCtrlPtr->minHeadSepCounter = new_min;
1618 1.4 oster Dprintf1("RECON: new min head pos counter val is %ld\n", new_min);
1619 1.4 oster while (reconCtrlPtr->headSepCBList) {
1620 1.123 christos if (reconCtrlPtr->headSepCBList->v > new_min)
1621 1.4 oster break;
1622 1.4 oster p = reconCtrlPtr->headSepCBList;
1623 1.4 oster reconCtrlPtr->headSepCBList = p->next;
1624 1.4 oster p->next = NULL;
1625 1.57 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1626 1.126 oster rf_FreeCallbackValueDesc(raidPtr, p);
1627 1.4 oster }
1628 1.1 oster
1629 1.4 oster }
1630 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1631 1.76 oster reconCtrlPtr->rb_lock = 0;
1632 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1633 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1634 1.1 oster }
1635 1.13 oster
1636 1.13 oster /*
1637 1.13 oster * checks to see that the maximum head separation will not be violated
1638 1.13 oster * if we initiate a reconstruction I/O on the indicated disk.
1639 1.13 oster * Limiting the maximum head separation between two disks eliminates
1640 1.13 oster * the nasty buffer-stall conditions that occur when one disk races
1641 1.13 oster * ahead of the others and consumes all of the floating recon buffers.
1642 1.13 oster * This code is complex and unpleasant but it's necessary to avoid
1643 1.13 oster * some very nasty, albeit fairly rare, reconstruction behavior.
1644 1.1 oster *
1645 1.13 oster * returns non-zero if and only if we have to stop working on the
1646 1.87 perry * indicated disk due to a head-separation delay.
1647 1.1 oster */
1648 1.87 perry static int
1649 1.60 oster CheckHeadSeparation(RF_Raid_t *raidPtr, RF_PerDiskReconCtrl_t *ctrl,
1650 1.95 christos RF_RowCol_t col, RF_HeadSepLimit_t hsCtr,
1651 1.95 christos RF_ReconUnitNum_t which_ru)
1652 1.4 oster {
1653 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1654 1.123 christos RF_CallbackValueDesc_t *cb, *p, *pt;
1655 1.10 oster int retval = 0;
1656 1.4 oster
1657 1.4 oster /* if we're too far ahead of the slowest disk, stop working on this
1658 1.4 oster * disk until the slower ones catch up. We do this by scheduling a
1659 1.4 oster * wakeup callback for the time when the slowest disk has caught up.
1660 1.4 oster * We define "caught up" with 20% hysteresis, i.e. the head separation
1661 1.4 oster * must have fallen to at most 80% of the max allowable head
1662 1.4 oster * separation before we'll wake up.
1663 1.87 perry *
1664 1.4 oster */
1665 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1666 1.76 oster while(reconCtrlPtr->rb_lock) {
1667 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1668 1.76 oster }
1669 1.76 oster reconCtrlPtr->rb_lock = 1;
1670 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1671 1.4 oster if ((raidPtr->headSepLimit >= 0) &&
1672 1.4 oster ((ctrl->headSepCounter - reconCtrlPtr->minHeadSepCounter) > raidPtr->headSepLimit)) {
1673 1.57 oster Dprintf5("raid%d: RECON: head sep stall: col %d hsCtr %ld minHSCtr %ld limit %ld\n",
1674 1.87 perry raidPtr->raidid, col, ctrl->headSepCounter,
1675 1.87 perry reconCtrlPtr->minHeadSepCounter,
1676 1.10 oster raidPtr->headSepLimit);
1677 1.126 oster cb = rf_AllocCallbackValueDesc(raidPtr);
1678 1.4 oster /* the minHeadSepCounter value we have to get to before we'll
1679 1.4 oster * wake up. build in 20% hysteresis. */
1680 1.123 christos cb->v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
1681 1.4 oster cb->col = col;
1682 1.4 oster cb->next = NULL;
1683 1.4 oster
1684 1.4 oster /* insert this callback descriptor into the sorted list of
1685 1.4 oster * pending head-sep callbacks */
1686 1.4 oster p = reconCtrlPtr->headSepCBList;
1687 1.4 oster if (!p)
1688 1.4 oster reconCtrlPtr->headSepCBList = cb;
1689 1.4 oster else
1690 1.123 christos if (cb->v < p->v) {
1691 1.4 oster cb->next = reconCtrlPtr->headSepCBList;
1692 1.4 oster reconCtrlPtr->headSepCBList = cb;
1693 1.4 oster } else {
1694 1.123 christos for (pt = p, p = p->next; p && (p->v < cb->v); pt = p, p = p->next);
1695 1.4 oster cb->next = p;
1696 1.4 oster pt->next = cb;
1697 1.4 oster }
1698 1.4 oster retval = 1;
1699 1.1 oster #if RF_RECON_STATS > 0
1700 1.4 oster ctrl->reconCtrl->reconDesc->hsStallCount++;
1701 1.4 oster #endif /* RF_RECON_STATS > 0 */
1702 1.4 oster }
1703 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1704 1.76 oster reconCtrlPtr->rb_lock = 0;
1705 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1706 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1707 1.1 oster
1708 1.4 oster return (retval);
1709 1.1 oster }
1710 1.87 perry /*
1711 1.13 oster * checks to see if reconstruction has been either forced or blocked
1712 1.13 oster * by a user operation. if forced, we skip this RU entirely. else if
1713 1.13 oster * blocked, put ourselves on the wait list. else return 0.
1714 1.1 oster *
1715 1.87 perry * ASSUMES THE PSS MUTEX IS LOCKED UPON ENTRY
1716 1.1 oster */
1717 1.87 perry static int
1718 1.95 christos CheckForcedOrBlockedReconstruction(RF_Raid_t *raidPtr,
1719 1.60 oster RF_ReconParityStripeStatus_t *pssPtr,
1720 1.95 christos RF_PerDiskReconCtrl_t *ctrl,
1721 1.94 christos RF_RowCol_t col,
1722 1.95 christos RF_StripeNum_t psid,
1723 1.95 christos RF_ReconUnitNum_t which_ru)
1724 1.4 oster {
1725 1.123 christos RF_CallbackValueDesc_t *cb;
1726 1.4 oster int retcode = 0;
1727 1.4 oster
1728 1.4 oster if ((pssPtr->flags & RF_PSS_FORCED_ON_READ) || (pssPtr->flags & RF_PSS_FORCED_ON_WRITE))
1729 1.4 oster retcode = RF_PSS_FORCED_ON_WRITE;
1730 1.4 oster else
1731 1.4 oster if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
1732 1.57 oster Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
1733 1.126 oster cb = rf_AllocCallbackValueDesc(raidPtr); /* append ourselves to
1734 1.126 oster * the blockage-wait
1735 1.126 oster * list */
1736 1.4 oster cb->col = col;
1737 1.4 oster cb->next = pssPtr->blockWaitList;
1738 1.4 oster pssPtr->blockWaitList = cb;
1739 1.4 oster retcode = RF_PSS_RECON_BLOCKED;
1740 1.4 oster }
1741 1.4 oster if (!retcode)
1742 1.4 oster pssPtr->flags |= RF_PSS_UNDER_RECON; /* mark this RU as under
1743 1.4 oster * reconstruction */
1744 1.4 oster
1745 1.4 oster return (retcode);
1746 1.1 oster }
1747 1.13 oster /*
1748 1.13 oster * if reconstruction is currently ongoing for the indicated stripeID,
1749 1.13 oster * reconstruction is forced to completion and we return non-zero to
1750 1.13 oster * indicate that the caller must wait. If not, then reconstruction is
1751 1.13 oster * blocked on the indicated stripe and the routine returns zero. If
1752 1.13 oster * and only if we return non-zero, we'll cause the cbFunc to get
1753 1.87 perry * invoked with the cbArg when the reconstruction has completed.
1754 1.1 oster */
1755 1.87 perry int
1756 1.60 oster rf_ForceOrBlockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap,
1757 1.123 christos void (*cbFunc)(void *), void *cbArg)
1758 1.4 oster {
1759 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID; /* the stripe ID we're
1760 1.4 oster * forcing recon on */
1761 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU; /* num sects in one RU */
1762 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr; /* a pointer to the parity
1763 1.4 oster * stripe status structure */
1764 1.4 oster RF_StripeNum_t psid; /* parity stripe id */
1765 1.4 oster RF_SectorNum_t offset, fd_offset; /* disk offset, failed-disk
1766 1.4 oster * offset */
1767 1.4 oster RF_RowCol_t *diskids;
1768 1.4 oster RF_ReconUnitNum_t which_ru; /* RU within parity stripe */
1769 1.4 oster RF_RowCol_t fcol, diskno, i;
1770 1.4 oster RF_ReconBuffer_t *new_rbuf; /* ptr to newly allocated rbufs */
1771 1.4 oster RF_DiskQueueData_t *req;/* disk I/O req to be enqueued */
1772 1.123 christos RF_CallbackFuncDesc_t *cb;
1773 1.68 oster int nPromoted;
1774 1.4 oster
1775 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1776 1.4 oster
1777 1.68 oster /* allocate a new PSS in case we need it */
1778 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1779 1.68 oster
1780 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1781 1.4 oster
1782 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE | RF_PSS_RECON_BLOCKED, newpssPtr);
1783 1.68 oster
1784 1.68 oster if (pssPtr != newpssPtr) {
1785 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1786 1.68 oster }
1787 1.4 oster
1788 1.4 oster /* if recon is not ongoing on this PS, just return */
1789 1.4 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1790 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1791 1.4 oster return (0);
1792 1.4 oster }
1793 1.4 oster /* otherwise, we have to wait for reconstruction to complete on this
1794 1.4 oster * RU. */
1795 1.4 oster /* In order to avoid waiting for a potentially large number of
1796 1.4 oster * low-priority accesses to complete, we force a normal-priority (i.e.
1797 1.4 oster * not low-priority) reconstruction on this RU. */
1798 1.4 oster if (!(pssPtr->flags & RF_PSS_FORCED_ON_WRITE) && !(pssPtr->flags & RF_PSS_FORCED_ON_READ)) {
1799 1.4 oster DDprintf1("Forcing recon on psid %ld\n", psid);
1800 1.4 oster pssPtr->flags |= RF_PSS_FORCED_ON_WRITE; /* mark this RU as under
1801 1.4 oster * forced recon */
1802 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED; /* clear the blockage
1803 1.4 oster * that we just set */
1804 1.57 oster fcol = raidPtr->reconControl->fcol;
1805 1.4 oster
1806 1.4 oster /* get a listing of the disks comprising the indicated stripe */
1807 1.57 oster (raidPtr->Layout.map->IdentifyStripe) (raidPtr, asmap->raidAddress, &diskids);
1808 1.4 oster
1809 1.4 oster /* For previously issued reads, elevate them to normal
1810 1.4 oster * priority. If the I/O has already completed, it won't be
1811 1.4 oster * found in the queue, and hence this will be a no-op. For
1812 1.4 oster * unissued reads, allocate buffers and issue new reads. The
1813 1.4 oster * fact that we've set the FORCED bit means that the regular
1814 1.4 oster * recon procs will not re-issue these reqs */
1815 1.4 oster for (i = 0; i < raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol; i++)
1816 1.4 oster if ((diskno = diskids[i]) != fcol) {
1817 1.4 oster if (pssPtr->issued[diskno]) {
1818 1.57 oster nPromoted = rf_DiskIOPromote(&raidPtr->Queues[diskno], psid, which_ru);
1819 1.4 oster if (rf_reconDebug && nPromoted)
1820 1.57 oster printf("raid%d: promoted read from col %d\n", raidPtr->raidid, diskno);
1821 1.4 oster } else {
1822 1.57 oster new_rbuf = rf_MakeReconBuffer(raidPtr, diskno, RF_RBUF_TYPE_FORCED); /* create new buf */
1823 1.57 oster ComputePSDiskOffsets(raidPtr, psid, diskno, &offset, &fd_offset,
1824 1.57 oster &new_rbuf->spCol, &new_rbuf->spOffset); /* find offsets & spare
1825 1.4 oster * location */
1826 1.4 oster new_rbuf->parityStripeID = psid; /* fill in the buffer */
1827 1.4 oster new_rbuf->which_ru = which_ru;
1828 1.4 oster new_rbuf->failedDiskSectorOffset = fd_offset;
1829 1.4 oster new_rbuf->priority = RF_IO_NORMAL_PRIORITY;
1830 1.4 oster
1831 1.4 oster /* use NULL b_proc b/c all addrs
1832 1.4 oster * should be in kernel space */
1833 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, offset + which_ru * sectorsPerRU, sectorsPerRU, new_rbuf->buffer,
1834 1.123 christos psid, which_ru,
1835 1.123 christos ForceReconReadDoneProc,
1836 1.123 christos (void *) new_rbuf,
1837 1.127 oster NULL, (void *) raidPtr, 0, NULL);
1838 1.4 oster
1839 1.4 oster new_rbuf->arg = req;
1840 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[diskno], req, RF_IO_NORMAL_PRIORITY); /* enqueue the I/O */
1841 1.57 oster Dprintf2("raid%d: Issued new read req on col %d\n", raidPtr->raidid, diskno);
1842 1.4 oster }
1843 1.4 oster }
1844 1.4 oster /* if the write is sitting in the disk queue, elevate its
1845 1.4 oster * priority */
1846 1.57 oster if (rf_DiskIOPromote(&raidPtr->Queues[fcol], psid, which_ru))
1847 1.102 oster if (rf_reconDebug)
1848 1.102 oster printf("raid%d: promoted write to col %d\n",
1849 1.102 oster raidPtr->raidid, fcol);
1850 1.4 oster }
1851 1.4 oster /* install a callback descriptor to be invoked when recon completes on
1852 1.4 oster * this parity stripe. */
1853 1.126 oster cb = rf_AllocCallbackFuncDesc(raidPtr);
1854 1.123 christos cb->callbackFunc = cbFunc;
1855 1.123 christos cb->callbackArg = cbArg;
1856 1.4 oster cb->next = pssPtr->procWaitList;
1857 1.4 oster pssPtr->procWaitList = cb;
1858 1.87 perry DDprintf2("raid%d: Waiting for forced recon on psid %ld\n",
1859 1.10 oster raidPtr->raidid, psid);
1860 1.4 oster
1861 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1862 1.4 oster return (1);
1863 1.1 oster }
1864 1.1 oster /* called upon the completion of a forced reconstruction read.
1865 1.1 oster * all we do is schedule the FORCEDREADONE event.
1866 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1867 1.1 oster */
1868 1.87 perry static void
1869 1.60 oster ForceReconReadDoneProc(void *arg, int status)
1870 1.4 oster {
1871 1.4 oster RF_ReconBuffer_t *rbuf = arg;
1872 1.4 oster
1873 1.82 oster /* Detect that reconControl is no longer valid, and if that
1874 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1875 1.82 oster There won't be anyone listening for this event anyway */
1876 1.82 oster
1877 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1878 1.82 oster return;
1879 1.82 oster
1880 1.4 oster if (status) {
1881 1.70 oster printf("raid%d: Forced recon read failed!\n", rbuf->raidPtr->raidid);
1882 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREAD_FAILED);
1883 1.79 oster return;
1884 1.4 oster }
1885 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREADDONE);
1886 1.1 oster }
1887 1.1 oster /* releases a block on the reconstruction of the indicated stripe */
1888 1.87 perry int
1889 1.60 oster rf_UnblockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
1890 1.4 oster {
1891 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID;
1892 1.4 oster RF_ReconParityStripeStatus_t *pssPtr;
1893 1.4 oster RF_ReconUnitNum_t which_ru;
1894 1.4 oster RF_StripeNum_t psid;
1895 1.123 christos RF_CallbackValueDesc_t *cb;
1896 1.4 oster
1897 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1898 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1899 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_NONE, NULL);
1900 1.4 oster
1901 1.4 oster /* When recon is forced, the pss desc can get deleted before we get
1902 1.4 oster * back to unblock recon. But, this can _only_ happen when recon is
1903 1.4 oster * forced. It would be good to put some kind of sanity check here, but
1904 1.4 oster * how to decide if recon was just forced or not? */
1905 1.4 oster if (!pssPtr) {
1906 1.4 oster /* printf("Warning: no pss descriptor upon unblock on psid %ld
1907 1.4 oster * RU %d\n",psid,which_ru); */
1908 1.43 oster #if (RF_DEBUG_RECON > 0) || (RF_DEBUG_PSS > 0)
1909 1.4 oster if (rf_reconDebug || rf_pssDebug)
1910 1.4 oster printf("Warning: no pss descriptor upon unblock on psid %ld RU %d\n", (long) psid, which_ru);
1911 1.43 oster #endif
1912 1.4 oster goto out;
1913 1.4 oster }
1914 1.4 oster pssPtr->blockCount--;
1915 1.10 oster Dprintf3("raid%d: unblocking recon on psid %ld: blockcount is %d\n",
1916 1.10 oster raidPtr->raidid, psid, pssPtr->blockCount);
1917 1.4 oster if (pssPtr->blockCount == 0) { /* if recon blockage has been released */
1918 1.4 oster
1919 1.4 oster /* unblock recon before calling CauseReconEvent in case
1920 1.4 oster * CauseReconEvent causes us to try to issue a new read before
1921 1.4 oster * returning here. */
1922 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;
1923 1.4 oster
1924 1.4 oster
1925 1.87 perry while (pssPtr->blockWaitList) {
1926 1.13 oster /* spin through the block-wait list and
1927 1.13 oster release all the waiters */
1928 1.4 oster cb = pssPtr->blockWaitList;
1929 1.4 oster pssPtr->blockWaitList = cb->next;
1930 1.4 oster cb->next = NULL;
1931 1.57 oster rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
1932 1.126 oster rf_FreeCallbackValueDesc(raidPtr, cb);
1933 1.4 oster }
1934 1.13 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1935 1.13 oster /* if no recon was requested while recon was blocked */
1936 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1937 1.4 oster }
1938 1.4 oster }
1939 1.1 oster out:
1940 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1941 1.4 oster return (0);
1942 1.1 oster }
1943 1.104 oster
1944 1.104 oster void
1945 1.104 oster rf_WakeupHeadSepCBWaiters(RF_Raid_t *raidPtr)
1946 1.104 oster {
1947 1.123 christos RF_CallbackValueDesc_t *p;
1948 1.104 oster
1949 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1950 1.104 oster while(raidPtr->reconControl->rb_lock) {
1951 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1952 1.112 mrg raidPtr->reconControl->rb_mutex);
1953 1.104 oster }
1954 1.104 oster
1955 1.104 oster raidPtr->reconControl->rb_lock = 1;
1956 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1957 1.104 oster
1958 1.104 oster while (raidPtr->reconControl->headSepCBList) {
1959 1.104 oster p = raidPtr->reconControl->headSepCBList;
1960 1.104 oster raidPtr->reconControl->headSepCBList = p->next;
1961 1.104 oster p->next = NULL;
1962 1.104 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1963 1.126 oster rf_FreeCallbackValueDesc(raidPtr, p);
1964 1.104 oster }
1965 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1966 1.104 oster raidPtr->reconControl->rb_lock = 0;
1967 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1968 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1969 1.104 oster
1970 1.104 oster }
1971 1.104 oster
1972