rf_reconstruct.c revision 1.125.4.1 1 1.125.4.1 thorpej /* $NetBSD: rf_reconstruct.c,v 1.125.4.1 2021/08/01 22:42:31 thorpej Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author: Mark Holland
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster
29 1.1 oster /************************************************************
30 1.1 oster *
31 1.1 oster * rf_reconstruct.c -- code to perform on-line reconstruction
32 1.1 oster *
33 1.1 oster ************************************************************/
34 1.31 lukem
35 1.31 lukem #include <sys/cdefs.h>
36 1.125.4.1 thorpej __KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.125.4.1 2021/08/01 22:42:31 thorpej Exp $");
37 1.1 oster
38 1.97 ad #include <sys/param.h>
39 1.1 oster #include <sys/time.h>
40 1.1 oster #include <sys/buf.h>
41 1.1 oster #include <sys/errno.h>
42 1.5 oster #include <sys/systm.h>
43 1.5 oster #include <sys/proc.h>
44 1.5 oster #include <sys/ioctl.h>
45 1.5 oster #include <sys/fcntl.h>
46 1.5 oster #include <sys/vnode.h>
47 1.110 dholland #include <sys/namei.h> /* for pathbuf */
48 1.30 oster #include <dev/raidframe/raidframevar.h>
49 1.5 oster
50 1.120 hannken #include <miscfs/specfs/specdev.h> /* for v_rdev */
51 1.120 hannken
52 1.1 oster #include "rf_raid.h"
53 1.1 oster #include "rf_reconutil.h"
54 1.1 oster #include "rf_revent.h"
55 1.1 oster #include "rf_reconbuffer.h"
56 1.1 oster #include "rf_acctrace.h"
57 1.1 oster #include "rf_etimer.h"
58 1.1 oster #include "rf_dag.h"
59 1.1 oster #include "rf_desc.h"
60 1.36 oster #include "rf_debugprint.h"
61 1.1 oster #include "rf_general.h"
62 1.1 oster #include "rf_driver.h"
63 1.1 oster #include "rf_utils.h"
64 1.1 oster #include "rf_shutdown.h"
65 1.1 oster
66 1.1 oster #include "rf_kintf.h"
67 1.1 oster
68 1.1 oster /* setting these to -1 causes them to be set to their default values if not set by debug options */
69 1.1 oster
70 1.41 oster #if RF_DEBUG_RECON
71 1.1 oster #define Dprintf(s) if (rf_reconDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
72 1.1 oster #define Dprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
73 1.1 oster #define Dprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
74 1.1 oster #define Dprintf3(s,a,b,c) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL)
75 1.1 oster #define Dprintf4(s,a,b,c,d) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL)
76 1.1 oster #define Dprintf5(s,a,b,c,d,e) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL)
77 1.1 oster #define Dprintf6(s,a,b,c,d,e,f) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),NULL,NULL)
78 1.1 oster #define Dprintf7(s,a,b,c,d,e,f,g) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),(void *)((unsigned long)g),NULL)
79 1.1 oster
80 1.1 oster #define DDprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
81 1.1 oster #define DDprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
82 1.33 oster
83 1.41 oster #else /* RF_DEBUG_RECON */
84 1.33 oster
85 1.33 oster #define Dprintf(s) {}
86 1.33 oster #define Dprintf1(s,a) {}
87 1.33 oster #define Dprintf2(s,a,b) {}
88 1.33 oster #define Dprintf3(s,a,b,c) {}
89 1.33 oster #define Dprintf4(s,a,b,c,d) {}
90 1.33 oster #define Dprintf5(s,a,b,c,d,e) {}
91 1.33 oster #define Dprintf6(s,a,b,c,d,e,f) {}
92 1.33 oster #define Dprintf7(s,a,b,c,d,e,f,g) {}
93 1.33 oster
94 1.33 oster #define DDprintf1(s,a) {}
95 1.33 oster #define DDprintf2(s,a,b) {}
96 1.33 oster
97 1.41 oster #endif /* RF_DEBUG_RECON */
98 1.33 oster
99 1.82 oster #define RF_RECON_DONE_READS 1
100 1.82 oster #define RF_RECON_READ_ERROR 2
101 1.82 oster #define RF_RECON_WRITE_ERROR 3
102 1.82 oster #define RF_RECON_READ_STOPPED 4
103 1.104 oster #define RF_RECON_WRITE_DONE 5
104 1.82 oster
105 1.73 oster #define RF_MAX_FREE_RECONBUFFER 32
106 1.73 oster #define RF_MIN_FREE_RECONBUFFER 16
107 1.1 oster
108 1.69 oster static RF_RaidReconDesc_t *AllocRaidReconDesc(RF_Raid_t *, RF_RowCol_t,
109 1.69 oster RF_RaidDisk_t *, int, RF_RowCol_t);
110 1.69 oster static void FreeReconDesc(RF_RaidReconDesc_t *);
111 1.69 oster static int ProcessReconEvent(RF_Raid_t *, RF_ReconEvent_t *);
112 1.69 oster static int IssueNextReadRequest(RF_Raid_t *, RF_RowCol_t);
113 1.69 oster static int TryToRead(RF_Raid_t *, RF_RowCol_t);
114 1.87 perry static int ComputePSDiskOffsets(RF_Raid_t *, RF_StripeNum_t, RF_RowCol_t,
115 1.69 oster RF_SectorNum_t *, RF_SectorNum_t *, RF_RowCol_t *,
116 1.69 oster RF_SectorNum_t *);
117 1.69 oster static int IssueNextWriteRequest(RF_Raid_t *);
118 1.123 christos static void ReconReadDoneProc(void *, int);
119 1.123 christos static void ReconWriteDoneProc(void *, int);
120 1.69 oster static void CheckForNewMinHeadSep(RF_Raid_t *, RF_HeadSepLimit_t);
121 1.69 oster static int CheckHeadSeparation(RF_Raid_t *, RF_PerDiskReconCtrl_t *,
122 1.69 oster RF_RowCol_t, RF_HeadSepLimit_t,
123 1.69 oster RF_ReconUnitNum_t);
124 1.69 oster static int CheckForcedOrBlockedReconstruction(RF_Raid_t *,
125 1.69 oster RF_ReconParityStripeStatus_t *,
126 1.69 oster RF_PerDiskReconCtrl_t *,
127 1.69 oster RF_RowCol_t, RF_StripeNum_t,
128 1.69 oster RF_ReconUnitNum_t);
129 1.69 oster static void ForceReconReadDoneProc(void *, int);
130 1.1 oster static void rf_ShutdownReconstruction(void *);
131 1.1 oster
132 1.1 oster struct RF_ReconDoneProc_s {
133 1.4 oster void (*proc) (RF_Raid_t *, void *);
134 1.4 oster void *arg;
135 1.4 oster RF_ReconDoneProc_t *next;
136 1.1 oster };
137 1.1 oster
138 1.13 oster /**************************************************************************
139 1.1 oster *
140 1.1 oster * sets up the parameters that will be used by the reconstruction process
141 1.1 oster * currently there are none, except for those that the layout-specific
142 1.1 oster * configuration (e.g. rf_ConfigureDeclustered) routine sets up.
143 1.1 oster *
144 1.1 oster * in the kernel, we fire off the recon thread.
145 1.1 oster *
146 1.13 oster **************************************************************************/
147 1.87 perry static void
148 1.125.4.1 thorpej rf_ShutdownReconstruction(void *arg)
149 1.4 oster {
150 1.125.4.1 thorpej RF_Raid_t *raidPtr;
151 1.125.4.1 thorpej
152 1.125.4.1 thorpej raidPtr = (RF_Raid_t *) arg;
153 1.125.4.1 thorpej
154 1.125.4.1 thorpej pool_destroy(&raidPtr->pools.reconbuffer);
155 1.4 oster }
156 1.4 oster
157 1.87 perry int
158 1.125.4.1 thorpej rf_ConfigureReconstruction(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
159 1.125.4.1 thorpej RF_Config_t *cfgPtr)
160 1.4 oster {
161 1.4 oster
162 1.125.4.1 thorpej rf_pool_init(raidPtr, raidPtr->poolNames.reconbuffer, &raidPtr->pools.reconbuffer, sizeof(RF_ReconBuffer_t),
163 1.125.4.1 thorpej "reconbuf", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
164 1.125.4.1 thorpej rf_ShutdownCreate(listp, rf_ShutdownReconstruction, raidPtr);
165 1.66 oster
166 1.4 oster return (0);
167 1.4 oster }
168 1.4 oster
169 1.4 oster static RF_RaidReconDesc_t *
170 1.87 perry AllocRaidReconDesc(RF_Raid_t *raidPtr, RF_RowCol_t col,
171 1.60 oster RF_RaidDisk_t *spareDiskPtr, int numDisksDone,
172 1.60 oster RF_RowCol_t scol)
173 1.1 oster {
174 1.1 oster
175 1.4 oster RF_RaidReconDesc_t *reconDesc;
176 1.4 oster
177 1.122 christos reconDesc = RF_Malloc(sizeof(*reconDesc));
178 1.4 oster reconDesc->raidPtr = raidPtr;
179 1.4 oster reconDesc->col = col;
180 1.4 oster reconDesc->spareDiskPtr = spareDiskPtr;
181 1.4 oster reconDesc->numDisksDone = numDisksDone;
182 1.4 oster reconDesc->scol = scol;
183 1.4 oster reconDesc->next = NULL;
184 1.1 oster
185 1.4 oster return (reconDesc);
186 1.1 oster }
187 1.1 oster
188 1.87 perry static void
189 1.60 oster FreeReconDesc(RF_RaidReconDesc_t *reconDesc)
190 1.1 oster {
191 1.1 oster #if RF_RECON_STATS > 0
192 1.50 oster printf("raid%d: %lu recon event waits, %lu recon delays\n",
193 1.50 oster reconDesc->raidPtr->raidid,
194 1.87 perry (long) reconDesc->numReconEventWaits,
195 1.50 oster (long) reconDesc->numReconExecDelays);
196 1.4 oster #endif /* RF_RECON_STATS > 0 */
197 1.50 oster printf("raid%d: %lu max exec ticks\n",
198 1.50 oster reconDesc->raidPtr->raidid,
199 1.50 oster (long) reconDesc->maxReconExecTicks);
200 1.80 oster RF_Free(reconDesc, sizeof(RF_RaidReconDesc_t));
201 1.1 oster }
202 1.1 oster
203 1.1 oster
204 1.13 oster /*****************************************************************************
205 1.1 oster *
206 1.1 oster * primary routine to reconstruct a failed disk. This should be called from
207 1.1 oster * within its own thread. It won't return until reconstruction completes,
208 1.1 oster * fails, or is aborted.
209 1.13 oster *****************************************************************************/
210 1.87 perry int
211 1.60 oster rf_ReconstructFailedDisk(RF_Raid_t *raidPtr, RF_RowCol_t col)
212 1.4 oster {
213 1.52 jdolecek const RF_LayoutSW_t *lp;
214 1.4 oster int rc;
215 1.4 oster
216 1.4 oster lp = raidPtr->Layout.map;
217 1.4 oster if (lp->SubmitReconBuffer) {
218 1.4 oster /*
219 1.4 oster * The current infrastructure only supports reconstructing one
220 1.4 oster * disk at a time for each array.
221 1.4 oster */
222 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
223 1.4 oster while (raidPtr->reconInProgress) {
224 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
225 1.4 oster }
226 1.4 oster raidPtr->reconInProgress++;
227 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
228 1.57 oster rc = rf_ReconstructFailedDiskBasic(raidPtr, col);
229 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
230 1.6 oster raidPtr->reconInProgress--;
231 1.4 oster } else {
232 1.4 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
233 1.4 oster lp->parityConfig);
234 1.4 oster rc = EIO;
235 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
236 1.4 oster }
237 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
238 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
239 1.4 oster return (rc);
240 1.4 oster }
241 1.4 oster
242 1.87 perry int
243 1.60 oster rf_ReconstructFailedDiskBasic(RF_Raid_t *raidPtr, RF_RowCol_t col)
244 1.4 oster {
245 1.108 jld RF_ComponentLabel_t *c_label;
246 1.4 oster RF_RaidDisk_t *spareDiskPtr = NULL;
247 1.4 oster RF_RaidReconDesc_t *reconDesc;
248 1.57 oster RF_RowCol_t scol;
249 1.4 oster int numDisksDone = 0, rc;
250 1.4 oster
251 1.4 oster /* first look for a spare drive onto which to reconstruct the data */
252 1.4 oster /* spare disk descriptors are stored in row 0. This may have to
253 1.4 oster * change eventually */
254 1.4 oster
255 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
256 1.57 oster RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed);
257 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
258 1.4 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
259 1.57 oster if (raidPtr->status != rf_rs_degraded) {
260 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col);
261 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
262 1.4 oster return (EINVAL);
263 1.4 oster }
264 1.4 oster scol = (-1);
265 1.4 oster } else {
266 1.72 oster #endif
267 1.4 oster for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) {
268 1.57 oster if (raidPtr->Disks[scol].status == rf_ds_spare) {
269 1.57 oster spareDiskPtr = &raidPtr->Disks[scol];
270 1.121 oster spareDiskPtr->status = rf_ds_rebuilding_spare;
271 1.4 oster break;
272 1.4 oster }
273 1.4 oster }
274 1.4 oster if (!spareDiskPtr) {
275 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because no spares are available\n", col);
276 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
277 1.4 oster return (ENOSPC);
278 1.4 oster }
279 1.57 oster printf("RECON: initiating reconstruction on col %d -> spare at col %d\n", col, scol);
280 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
281 1.4 oster }
282 1.72 oster #endif
283 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
284 1.1 oster
285 1.57 oster reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr, numDisksDone, scol);
286 1.4 oster raidPtr->reconDesc = (void *) reconDesc;
287 1.1 oster #if RF_RECON_STATS > 0
288 1.4 oster reconDesc->hsStallCount = 0;
289 1.4 oster reconDesc->numReconExecDelays = 0;
290 1.4 oster reconDesc->numReconEventWaits = 0;
291 1.4 oster #endif /* RF_RECON_STATS > 0 */
292 1.4 oster reconDesc->reconExecTimerRunning = 0;
293 1.4 oster reconDesc->reconExecTicks = 0;
294 1.4 oster reconDesc->maxReconExecTicks = 0;
295 1.4 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
296 1.5 oster
297 1.5 oster if (!rc) {
298 1.5 oster /* fix up the component label */
299 1.5 oster /* Don't actually need the read here.. */
300 1.108 jld c_label = raidget_component_label(raidPtr, scol);
301 1.108 jld
302 1.108 jld raid_init_component_label(raidPtr, c_label);
303 1.108 jld c_label->row = 0;
304 1.108 jld c_label->column = col;
305 1.108 jld c_label->clean = RF_RAID_DIRTY;
306 1.108 jld c_label->status = rf_ds_optimal;
307 1.111 enami rf_component_label_set_partitionsize(c_label,
308 1.111 enami raidPtr->Disks[scol].partitionSize);
309 1.15 oster
310 1.28 oster /* We've just done a rebuild based on all the other
311 1.28 oster disks, so at this point the parity is known to be
312 1.28 oster clean, even if it wasn't before. */
313 1.28 oster
314 1.28 oster /* XXX doesn't hold for RAID 6!!*/
315 1.28 oster
316 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
317 1.121 oster /* The failed disk has already been marked as rf_ds_spared
318 1.121 oster (or rf_ds_dist_spared) in
319 1.121 oster rf_ContinueReconstructFailedDisk()
320 1.121 oster so we just update the spare disk as being a used spare
321 1.121 oster */
322 1.121 oster
323 1.121 oster spareDiskPtr->status = rf_ds_used_spare;
324 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
325 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
326 1.28 oster
327 1.15 oster /* XXXX MORE NEEDED HERE */
328 1.87 perry
329 1.108 jld raidflush_component_label(raidPtr, scol);
330 1.82 oster } else {
331 1.82 oster /* Reconstruct failed. */
332 1.82 oster
333 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
334 1.82 oster /* Failed disk goes back to "failed" status */
335 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
336 1.82 oster
337 1.82 oster /* Spare disk goes back to "spare" status. */
338 1.82 oster spareDiskPtr->status = rf_ds_spare;
339 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
340 1.84 oster
341 1.5 oster }
342 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
343 1.5 oster return (rc);
344 1.5 oster }
345 1.5 oster
346 1.87 perry /*
347 1.5 oster
348 1.5 oster Allow reconstructing a disk in-place -- i.e. component /dev/sd2e goes AWOL,
349 1.87 perry and you don't get a spare until the next Monday. With this function
350 1.87 perry (and hot-swappable drives) you can now put your new disk containing
351 1.5 oster /dev/sd2e on the bus, scsictl it alive, and then use raidctl(8) to
352 1.5 oster rebuild the data "on the spot".
353 1.5 oster
354 1.5 oster */
355 1.5 oster
356 1.5 oster int
357 1.60 oster rf_ReconstructInPlace(RF_Raid_t *raidPtr, RF_RowCol_t col)
358 1.5 oster {
359 1.5 oster RF_RaidDisk_t *spareDiskPtr = NULL;
360 1.5 oster RF_RaidReconDesc_t *reconDesc;
361 1.52 jdolecek const RF_LayoutSW_t *lp;
362 1.108 jld RF_ComponentLabel_t *c_label;
363 1.5 oster int numDisksDone = 0, rc;
364 1.116 oster uint64_t numsec;
365 1.116 oster unsigned int secsize;
366 1.110 dholland struct pathbuf *pb;
367 1.5 oster struct vnode *vp;
368 1.5 oster int retcode;
369 1.21 oster int ac;
370 1.5 oster
371 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
372 1.5 oster lp = raidPtr->Layout.map;
373 1.61 oster if (!lp->SubmitReconBuffer) {
374 1.61 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
375 1.61 oster lp->parityConfig);
376 1.61 oster /* wakeup anyone who might be waiting to do a reconstruct */
377 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
378 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
379 1.61 oster return(EIO);
380 1.62 oster }
381 1.5 oster
382 1.62 oster /*
383 1.62 oster * The current infrastructure only supports reconstructing one
384 1.62 oster * disk at a time for each array.
385 1.62 oster */
386 1.5 oster
387 1.62 oster if (raidPtr->Disks[col].status != rf_ds_failed) {
388 1.62 oster /* "It's gone..." */
389 1.62 oster raidPtr->numFailures++;
390 1.62 oster raidPtr->Disks[col].status = rf_ds_failed;
391 1.62 oster raidPtr->status = rf_rs_degraded;
392 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
393 1.62 oster rf_update_component_labels(raidPtr,
394 1.62 oster RF_NORMAL_COMPONENT_UPDATE);
395 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
396 1.62 oster }
397 1.87 perry
398 1.62 oster while (raidPtr->reconInProgress) {
399 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
400 1.62 oster }
401 1.87 perry
402 1.62 oster raidPtr->reconInProgress++;
403 1.87 perry
404 1.62 oster /* first look for a spare drive onto which to reconstruct the
405 1.62 oster data. spare disk descriptors are stored in row 0. This
406 1.62 oster may have to change eventually */
407 1.87 perry
408 1.62 oster /* Actually, we don't care if it's failed or not... On a RAID
409 1.62 oster set with correct parity, this function should be callable
410 1.99 oster on any component without ill effects. */
411 1.62 oster /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */
412 1.87 perry
413 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
414 1.62 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
415 1.62 oster RF_ERRORMSG1("Unable to reconstruct to disk at col %d: operation not supported for RF_DISTRIBUTE_SPARE\n", col);
416 1.87 perry
417 1.62 oster raidPtr->reconInProgress--;
418 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
419 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
420 1.62 oster return (EINVAL);
421 1.87 perry }
422 1.72 oster #endif
423 1.87 perry
424 1.87 perry /* This device may have been opened successfully the
425 1.62 oster first time. Close it before trying to open it again.. */
426 1.87 perry
427 1.62 oster if (raidPtr->raid_cinfo[col].ci_vp != NULL) {
428 1.37 oster #if 0
429 1.62 oster printf("Closed the open device: %s\n",
430 1.62 oster raidPtr->Disks[col].devname);
431 1.37 oster #endif
432 1.62 oster vp = raidPtr->raid_cinfo[col].ci_vp;
433 1.62 oster ac = raidPtr->Disks[col].auto_configured;
434 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
435 1.62 oster rf_close_component(raidPtr, vp, ac);
436 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
437 1.62 oster raidPtr->raid_cinfo[col].ci_vp = NULL;
438 1.62 oster }
439 1.62 oster /* note that this disk was *not* auto_configured (any longer)*/
440 1.62 oster raidPtr->Disks[col].auto_configured = 0;
441 1.87 perry
442 1.37 oster #if 0
443 1.62 oster printf("About to (re-)open the device for rebuilding: %s\n",
444 1.62 oster raidPtr->Disks[col].devname);
445 1.37 oster #endif
446 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
447 1.110 dholland pb = pathbuf_create(raidPtr->Disks[col].devname);
448 1.110 dholland if (pb == NULL) {
449 1.110 dholland retcode = ENOMEM;
450 1.110 dholland } else {
451 1.124 mlelstv retcode = vn_bdev_openpath(pb, &vp, curlwp);
452 1.110 dholland pathbuf_destroy(pb);
453 1.110 dholland }
454 1.87 perry
455 1.62 oster if (retcode) {
456 1.124 mlelstv printf("raid%d: rebuilding: open device: %s failed: %d!\n",raidPtr->raidid,
457 1.62 oster raidPtr->Disks[col].devname, retcode);
458 1.87 perry
459 1.87 perry /* the component isn't responding properly...
460 1.62 oster must be still dead :-( */
461 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
462 1.62 oster raidPtr->reconInProgress--;
463 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
464 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
465 1.62 oster return(retcode);
466 1.63 oster }
467 1.63 oster
468 1.87 perry /* Ok, so we can at least do a lookup...
469 1.63 oster How about actually getting a vp for it? */
470 1.87 perry
471 1.116 oster retcode = getdisksize(vp, &numsec, &secsize);
472 1.63 oster if (retcode) {
473 1.115 yamt vn_close(vp, FREAD | FWRITE, kauth_cred_get());
474 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
475 1.63 oster raidPtr->reconInProgress--;
476 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
477 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
478 1.63 oster return(retcode);
479 1.62 oster }
480 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
481 1.116 oster raidPtr->Disks[col].blockSize = secsize;
482 1.116 oster raidPtr->Disks[col].numBlocks = numsec - rf_protectedSectors;
483 1.87 perry
484 1.63 oster raidPtr->raid_cinfo[col].ci_vp = vp;
485 1.120 hannken raidPtr->raid_cinfo[col].ci_dev = vp->v_rdev;
486 1.87 perry
487 1.120 hannken raidPtr->Disks[col].dev = vp->v_rdev;
488 1.87 perry
489 1.63 oster /* we allow the user to specify that only a fraction
490 1.63 oster of the disks should be used this is just for debug:
491 1.63 oster it speeds up * the parity scan */
492 1.63 oster raidPtr->Disks[col].numBlocks = raidPtr->Disks[col].numBlocks *
493 1.63 oster rf_sizePercentage / 100;
494 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
495 1.87 perry
496 1.62 oster spareDiskPtr = &raidPtr->Disks[col];
497 1.121 oster spareDiskPtr->status = rf_ds_rebuilding_spare;
498 1.87 perry
499 1.87 perry printf("raid%d: initiating in-place reconstruction on column %d\n",
500 1.62 oster raidPtr->raidid, col);
501 1.5 oster
502 1.87 perry reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr,
503 1.62 oster numDisksDone, col);
504 1.62 oster raidPtr->reconDesc = (void *) reconDesc;
505 1.5 oster #if RF_RECON_STATS > 0
506 1.62 oster reconDesc->hsStallCount = 0;
507 1.62 oster reconDesc->numReconExecDelays = 0;
508 1.62 oster reconDesc->numReconEventWaits = 0;
509 1.5 oster #endif /* RF_RECON_STATS > 0 */
510 1.62 oster reconDesc->reconExecTimerRunning = 0;
511 1.62 oster reconDesc->reconExecTicks = 0;
512 1.62 oster reconDesc->maxReconExecTicks = 0;
513 1.62 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
514 1.87 perry
515 1.5 oster if (!rc) {
516 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
517 1.5 oster /* Need to set these here, as at this point it'll be claiming
518 1.5 oster that the disk is in rf_ds_spared! But we know better :-) */
519 1.87 perry
520 1.57 oster raidPtr->Disks[col].status = rf_ds_optimal;
521 1.57 oster raidPtr->status = rf_rs_optimal;
522 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
523 1.87 perry
524 1.5 oster /* fix up the component label */
525 1.5 oster /* Don't actually need the read here.. */
526 1.108 jld c_label = raidget_component_label(raidPtr, col);
527 1.16 oster
528 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
529 1.108 jld raid_init_component_label(raidPtr, c_label);
530 1.16 oster
531 1.108 jld c_label->row = 0;
532 1.108 jld c_label->column = col;
533 1.28 oster
534 1.28 oster /* We've just done a rebuild based on all the other
535 1.28 oster disks, so at this point the parity is known to be
536 1.28 oster clean, even if it wasn't before. */
537 1.28 oster
538 1.28 oster /* XXX doesn't hold for RAID 6!!*/
539 1.28 oster
540 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
541 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
542 1.87 perry
543 1.108 jld raidflush_component_label(raidPtr, col);
544 1.82 oster } else {
545 1.82 oster /* Reconstruct-in-place failed. Disk goes back to
546 1.82 oster "failed" status, regardless of what it was before. */
547 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
548 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
549 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
550 1.82 oster }
551 1.5 oster
552 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
553 1.84 oster
554 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
555 1.82 oster raidPtr->reconInProgress--;
556 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
557 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
558 1.87 perry
559 1.4 oster return (rc);
560 1.4 oster }
561 1.4 oster
562 1.4 oster
563 1.87 perry int
564 1.60 oster rf_ContinueReconstructFailedDisk(RF_RaidReconDesc_t *reconDesc)
565 1.4 oster {
566 1.4 oster RF_Raid_t *raidPtr = reconDesc->raidPtr;
567 1.4 oster RF_RowCol_t col = reconDesc->col;
568 1.4 oster RF_RowCol_t scol = reconDesc->scol;
569 1.4 oster RF_ReconMap_t *mapPtr;
570 1.46 oster RF_ReconCtrl_t *tmp_reconctrl;
571 1.4 oster RF_ReconEvent_t *event;
572 1.104 oster RF_StripeCount_t incPSID,lastPSID,num_writes,pending_writes,prev;
573 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
574 1.118 oster RF_StripeCount_t startPSID,endPSID,aPSID,bPSID,offPSID;
575 1.118 oster #endif
576 1.104 oster RF_ReconUnitCount_t RUsPerPU;
577 1.4 oster struct timeval etime, elpsd;
578 1.4 oster unsigned long xor_s, xor_resid_us;
579 1.54 simonb int i, ds;
580 1.104 oster int status, done;
581 1.82 oster int recon_error, write_error;
582 1.4 oster
583 1.78 oster raidPtr->accumXorTimeUs = 0;
584 1.67 oster #if RF_ACC_TRACE > 0
585 1.78 oster /* create one trace record per physical disk */
586 1.122 christos raidPtr->recon_tracerecs =
587 1.122 christos RF_Malloc(raidPtr->numCol * sizeof(*raidPtr->recon_tracerecs));
588 1.67 oster #endif
589 1.87 perry
590 1.78 oster /* quiesce the array prior to starting recon. this is needed
591 1.78 oster * to assure no nasty interactions with pending user writes.
592 1.78 oster * We need to do this before we change the disk or row status. */
593 1.87 perry
594 1.78 oster Dprintf("RECON: begin request suspend\n");
595 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
596 1.78 oster Dprintf("RECON: end request suspend\n");
597 1.87 perry
598 1.78 oster /* allocate our RF_ReconCTRL_t before we protect raidPtr->reconControl[row] */
599 1.78 oster tmp_reconctrl = rf_MakeReconControl(reconDesc, col, scol);
600 1.87 perry
601 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
602 1.87 perry
603 1.78 oster /* create the reconstruction control pointer and install it in
604 1.78 oster * the right slot */
605 1.78 oster raidPtr->reconControl = tmp_reconctrl;
606 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
607 1.88 oster raidPtr->reconControl->numRUsTotal = mapPtr->totalRUs;
608 1.88 oster raidPtr->reconControl->numRUsComplete = 0;
609 1.78 oster raidPtr->status = rf_rs_reconstructing;
610 1.78 oster raidPtr->Disks[col].status = rf_ds_reconstructing;
611 1.78 oster raidPtr->Disks[col].spareCol = scol;
612 1.87 perry
613 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
614 1.87 perry
615 1.78 oster RF_GETTIME(raidPtr->reconControl->starttime);
616 1.87 perry
617 1.78 oster Dprintf("RECON: resume requests\n");
618 1.78 oster rf_ResumeNewRequests(raidPtr);
619 1.87 perry
620 1.4 oster
621 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
622 1.104 oster
623 1.104 oster incPSID = RF_RECONMAP_SIZE;
624 1.125 oster lastPSID = raidPtr->Layout.numStripe / raidPtr->Layout.SUsPerPU - 1;
625 1.104 oster RUsPerPU = raidPtr->Layout.SUsPerPU / raidPtr->Layout.SUsPerRU;
626 1.82 oster recon_error = 0;
627 1.82 oster write_error = 0;
628 1.104 oster pending_writes = incPSID;
629 1.118 oster raidPtr->reconControl->lastPSID = incPSID - 1;
630 1.118 oster
631 1.118 oster /* bounds check raidPtr->reconControl->lastPSID and
632 1.118 oster pending_writes so that we don't attempt to wait for more IO
633 1.118 oster than can possibly happen */
634 1.118 oster
635 1.118 oster if (raidPtr->reconControl->lastPSID > lastPSID)
636 1.118 oster raidPtr->reconControl->lastPSID = lastPSID;
637 1.118 oster
638 1.118 oster if (pending_writes > lastPSID)
639 1.125 oster pending_writes = lastPSID + 1;
640 1.104 oster
641 1.104 oster /* start the actual reconstruction */
642 1.82 oster
643 1.104 oster done = 0;
644 1.104 oster while (!done) {
645 1.104 oster
646 1.106 oster if (raidPtr->waitShutdown) {
647 1.106 oster /* someone is unconfiguring this array... bail on the reconstruct.. */
648 1.106 oster recon_error = 1;
649 1.106 oster break;
650 1.106 oster }
651 1.106 oster
652 1.104 oster num_writes = 0;
653 1.118 oster
654 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
655 1.118 oster /* For RAID5 with Rotated Spares we will be 'short'
656 1.118 oster some number of writes since no writes will get
657 1.118 oster issued for stripes where the spare is on the
658 1.118 oster component being rebuilt. Account for the shortage
659 1.118 oster here so that we don't hang indefinitely below
660 1.118 oster waiting for writes to complete that were never
661 1.118 oster scheduled.
662 1.118 oster
663 1.118 oster XXX: Should be fixed for PARITY_DECLUSTERING and
664 1.118 oster others too!
665 1.118 oster
666 1.118 oster */
667 1.118 oster
668 1.118 oster if (raidPtr->Layout.numDataCol <
669 1.118 oster raidPtr->numCol - raidPtr->Layout.numParityCol) {
670 1.118 oster /* numDataCol is at least 2 less than numCol, so
671 1.118 oster should be RAID 5 with Rotated Spares */
672 1.118 oster
673 1.118 oster /* XXX need to update for RAID 6 */
674 1.118 oster
675 1.118 oster startPSID = raidPtr->reconControl->lastPSID - pending_writes + 1;
676 1.118 oster endPSID = raidPtr->reconControl->lastPSID;
677 1.118 oster
678 1.118 oster offPSID = raidPtr->numCol - col - 1;
679 1.118 oster
680 1.118 oster aPSID = startPSID - startPSID % raidPtr->numCol + offPSID;
681 1.118 oster if (aPSID < startPSID) {
682 1.118 oster aPSID += raidPtr->numCol;
683 1.118 oster }
684 1.118 oster
685 1.118 oster bPSID = endPSID - ((endPSID - offPSID) % raidPtr->numCol);
686 1.118 oster
687 1.118 oster if (aPSID < endPSID) {
688 1.118 oster num_writes = ((bPSID - aPSID) / raidPtr->numCol) + 1;
689 1.118 oster }
690 1.118 oster
691 1.118 oster if ((aPSID == endPSID) && (bPSID == endPSID)) {
692 1.118 oster num_writes++;
693 1.118 oster }
694 1.118 oster }
695 1.118 oster #endif
696 1.104 oster
697 1.104 oster /* issue a read for each surviving disk */
698 1.104 oster
699 1.104 oster reconDesc->numDisksDone = 0;
700 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
701 1.104 oster if (i != col) {
702 1.104 oster /* find and issue the next I/O on the
703 1.104 oster * indicated disk */
704 1.104 oster if (IssueNextReadRequest(raidPtr, i)) {
705 1.104 oster Dprintf1("RECON: done issuing for c%d\n", i);
706 1.104 oster reconDesc->numDisksDone++;
707 1.104 oster }
708 1.104 oster }
709 1.104 oster }
710 1.87 perry
711 1.104 oster /* process reconstruction events until all disks report that
712 1.104 oster * they've completed all work */
713 1.82 oster
714 1.104 oster while (reconDesc->numDisksDone < raidPtr->numCol - 1) {
715 1.82 oster
716 1.104 oster event = rf_GetNextReconEvent(reconDesc);
717 1.104 oster status = ProcessReconEvent(raidPtr, event);
718 1.104 oster
719 1.104 oster /* the normal case is that a read completes, and all is well. */
720 1.104 oster if (status == RF_RECON_DONE_READS) {
721 1.104 oster reconDesc->numDisksDone++;
722 1.104 oster } else if ((status == RF_RECON_READ_ERROR) ||
723 1.104 oster (status == RF_RECON_WRITE_ERROR)) {
724 1.104 oster /* an error was encountered while reconstructing...
725 1.104 oster Pretend we've finished this disk.
726 1.104 oster */
727 1.104 oster recon_error = 1;
728 1.104 oster raidPtr->reconControl->error = 1;
729 1.104 oster
730 1.104 oster /* bump the numDisksDone count for reads,
731 1.104 oster but not for writes */
732 1.104 oster if (status == RF_RECON_READ_ERROR)
733 1.104 oster reconDesc->numDisksDone++;
734 1.104 oster
735 1.104 oster /* write errors are special -- when we are
736 1.104 oster done dealing with the reads that are
737 1.104 oster finished, we don't want to wait for any
738 1.104 oster writes */
739 1.107 oster if (status == RF_RECON_WRITE_ERROR) {
740 1.104 oster write_error = 1;
741 1.107 oster num_writes++;
742 1.107 oster }
743 1.104 oster
744 1.104 oster } else if (status == RF_RECON_READ_STOPPED) {
745 1.104 oster /* count this component as being "done" */
746 1.82 oster reconDesc->numDisksDone++;
747 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
748 1.104 oster num_writes++;
749 1.104 oster }
750 1.104 oster
751 1.104 oster if (recon_error) {
752 1.104 oster /* make sure any stragglers are woken up so that
753 1.104 oster their theads will complete, and we can get out
754 1.104 oster of here with all IO processed */
755 1.104 oster
756 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
757 1.104 oster }
758 1.104 oster
759 1.104 oster raidPtr->reconControl->numRUsTotal =
760 1.104 oster mapPtr->totalRUs;
761 1.104 oster raidPtr->reconControl->numRUsComplete =
762 1.104 oster mapPtr->totalRUs -
763 1.104 oster rf_UnitsLeftToReconstruct(mapPtr);
764 1.82 oster
765 1.104 oster #if RF_DEBUG_RECON
766 1.104 oster raidPtr->reconControl->percentComplete =
767 1.104 oster (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
768 1.104 oster if (rf_prReconSched) {
769 1.104 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
770 1.82 oster }
771 1.104 oster #endif
772 1.82 oster }
773 1.82 oster
774 1.118 oster /* reads done, wakeup any waiters, and then wait for writes */
775 1.82 oster
776 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
777 1.104 oster
778 1.104 oster while (!recon_error && (num_writes < pending_writes)) {
779 1.104 oster event = rf_GetNextReconEvent(reconDesc);
780 1.104 oster status = ProcessReconEvent(raidPtr, event);
781 1.104 oster
782 1.104 oster if (status == RF_RECON_WRITE_ERROR) {
783 1.107 oster num_writes++;
784 1.104 oster recon_error = 1;
785 1.104 oster raidPtr->reconControl->error = 1;
786 1.104 oster /* an error was encountered at the very end... bail */
787 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
788 1.104 oster num_writes++;
789 1.107 oster } /* else it's something else, and we don't care */
790 1.104 oster }
791 1.104 oster if (recon_error ||
792 1.104 oster (raidPtr->reconControl->lastPSID == lastPSID)) {
793 1.104 oster done = 1;
794 1.104 oster break;
795 1.104 oster }
796 1.104 oster
797 1.104 oster prev = raidPtr->reconControl->lastPSID;
798 1.104 oster raidPtr->reconControl->lastPSID += incPSID;
799 1.104 oster
800 1.104 oster if (raidPtr->reconControl->lastPSID > lastPSID) {
801 1.104 oster pending_writes = lastPSID - prev;
802 1.104 oster raidPtr->reconControl->lastPSID = lastPSID;
803 1.104 oster }
804 1.104 oster /* back down curPSID to get ready for the next round... */
805 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
806 1.104 oster if (i != col) {
807 1.104 oster raidPtr->reconControl->perDiskInfo[i].curPSID--;
808 1.104 oster raidPtr->reconControl->perDiskInfo[i].ru_count = RUsPerPU - 1;
809 1.104 oster }
810 1.78 oster }
811 1.78 oster }
812 1.87 perry
813 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
814 1.78 oster if (rf_reconDebug) {
815 1.78 oster printf("RECON: all reads completed\n");
816 1.78 oster }
817 1.78 oster /* at this point all the reads have completed. We now wait
818 1.78 oster * for any pending writes to complete, and then we're done */
819 1.82 oster
820 1.82 oster while (!recon_error && rf_UnitsLeftToReconstruct(raidPtr->reconControl->reconMap) > 0) {
821 1.87 perry
822 1.78 oster event = rf_GetNextReconEvent(reconDesc);
823 1.83 oster status = ProcessReconEvent(raidPtr, event);
824 1.82 oster
825 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
826 1.82 oster recon_error = 1;
827 1.87 perry raidPtr->reconControl->error = 1;
828 1.82 oster /* an error was encountered at the very end... bail */
829 1.82 oster } else {
830 1.82 oster #if RF_DEBUG_RECON
831 1.82 oster raidPtr->reconControl->percentComplete = 100 - (rf_UnitsLeftToReconstruct(mapPtr) * 100 / mapPtr->totalRUs);
832 1.82 oster if (rf_prReconSched) {
833 1.82 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
834 1.82 oster }
835 1.82 oster #endif
836 1.82 oster }
837 1.82 oster }
838 1.82 oster
839 1.82 oster if (recon_error) {
840 1.82 oster /* we've encountered an error in reconstructing. */
841 1.82 oster printf("raid%d: reconstruction failed.\n", raidPtr->raidid);
842 1.87 perry
843 1.82 oster /* we start by blocking IO to the RAID set. */
844 1.82 oster rf_SuspendNewRequestsAndWait(raidPtr);
845 1.87 perry
846 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
847 1.82 oster /* mark set as being degraded, rather than
848 1.82 oster rf_rs_reconstructing as we were before the problem.
849 1.82 oster After this is done we can update status of the
850 1.82 oster component disks without worrying about someone
851 1.82 oster trying to read from a failed component.
852 1.82 oster */
853 1.82 oster raidPtr->status = rf_rs_degraded;
854 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
855 1.87 perry
856 1.82 oster /* resume IO */
857 1.87 perry rf_ResumeNewRequests(raidPtr);
858 1.87 perry
859 1.82 oster /* At this point there are two cases:
860 1.82 oster 1) If we've experienced a read error, then we've
861 1.82 oster already waited for all the reads we're going to get,
862 1.82 oster and we just need to wait for the writes.
863 1.82 oster
864 1.82 oster 2) If we've experienced a write error, we've also
865 1.82 oster already waited for all the reads to complete,
866 1.82 oster but there is little point in waiting for the writes --
867 1.82 oster when they do complete, they will just be ignored.
868 1.82 oster
869 1.87 perry So we just wait for writes to complete if we didn't have a
870 1.82 oster write error.
871 1.82 oster */
872 1.82 oster
873 1.82 oster if (!write_error) {
874 1.82 oster /* wait for writes to complete */
875 1.82 oster while (raidPtr->reconControl->pending_writes > 0) {
876 1.83 oster
877 1.82 oster event = rf_GetNextReconEvent(reconDesc);
878 1.82 oster status = ProcessReconEvent(raidPtr, event);
879 1.82 oster
880 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
881 1.87 perry raidPtr->reconControl->error = 1;
882 1.82 oster /* an error was encountered at the very end... bail.
883 1.82 oster This will be very bad news for the user, since
884 1.82 oster at this point there will have been a read error
885 1.82 oster on one component, and a write error on another!
886 1.82 oster */
887 1.82 oster break;
888 1.82 oster }
889 1.82 oster }
890 1.4 oster }
891 1.82 oster
892 1.87 perry
893 1.82 oster /* cleanup */
894 1.82 oster
895 1.82 oster /* drain the event queue - after waiting for the writes above,
896 1.82 oster there shouldn't be much (if anything!) left in the queue. */
897 1.82 oster
898 1.82 oster rf_DrainReconEventQueue(reconDesc);
899 1.87 perry
900 1.82 oster /* XXX As much as we'd like to free the recon control structure
901 1.82 oster and the reconDesc, we have no way of knowing if/when those will
902 1.82 oster be touched by IO that has yet to occur. It is rather poor to be
903 1.82 oster basically causing a 'memory leak' here, but there doesn't seem to be
904 1.82 oster a cleaner alternative at this time. Perhaps when the reconstruct code
905 1.82 oster gets a makeover this problem will go away.
906 1.82 oster */
907 1.82 oster #if 0
908 1.82 oster rf_FreeReconControl(raidPtr);
909 1.82 oster #endif
910 1.82 oster
911 1.82 oster #if RF_ACC_TRACE > 0
912 1.82 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
913 1.41 oster #endif
914 1.82 oster /* XXX see comment above */
915 1.82 oster #if 0
916 1.82 oster FreeReconDesc(reconDesc);
917 1.82 oster #endif
918 1.82 oster
919 1.82 oster return (1);
920 1.78 oster }
921 1.14 oster
922 1.78 oster /* Success: mark the dead disk as reconstructed. We quiesce
923 1.78 oster * the array here to assure no nasty interactions with pending
924 1.78 oster * user accesses when we free up the psstatus structure as
925 1.78 oster * part of FreeReconControl() */
926 1.87 perry
927 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
928 1.87 perry
929 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
930 1.78 oster raidPtr->numFailures--;
931 1.78 oster ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
932 1.78 oster raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared;
933 1.78 oster raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal;
934 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
935 1.78 oster RF_GETTIME(etime);
936 1.78 oster RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd);
937 1.87 perry
938 1.78 oster rf_ResumeNewRequests(raidPtr);
939 1.87 perry
940 1.87 perry printf("raid%d: Reconstruction of disk at col %d completed\n",
941 1.78 oster raidPtr->raidid, col);
942 1.78 oster xor_s = raidPtr->accumXorTimeUs / 1000000;
943 1.78 oster xor_resid_us = raidPtr->accumXorTimeUs % 1000000;
944 1.78 oster printf("raid%d: Recon time was %d.%06d seconds, accumulated XOR time was %ld us (%ld.%06ld)\n",
945 1.87 perry raidPtr->raidid,
946 1.87 perry (int) elpsd.tv_sec, (int) elpsd.tv_usec,
947 1.78 oster raidPtr->accumXorTimeUs, xor_s, xor_resid_us);
948 1.78 oster printf("raid%d: (start time %d sec %d usec, end time %d sec %d usec)\n",
949 1.78 oster raidPtr->raidid,
950 1.78 oster (int) raidPtr->reconControl->starttime.tv_sec,
951 1.78 oster (int) raidPtr->reconControl->starttime.tv_usec,
952 1.78 oster (int) etime.tv_sec, (int) etime.tv_usec);
953 1.1 oster #if RF_RECON_STATS > 0
954 1.78 oster printf("raid%d: Total head-sep stall count was %d\n",
955 1.78 oster raidPtr->raidid, (int) reconDesc->hsStallCount);
956 1.4 oster #endif /* RF_RECON_STATS > 0 */
957 1.78 oster rf_FreeReconControl(raidPtr);
958 1.67 oster #if RF_ACC_TRACE > 0
959 1.78 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
960 1.67 oster #endif
961 1.78 oster FreeReconDesc(reconDesc);
962 1.87 perry
963 1.4 oster return (0);
964 1.82 oster
965 1.1 oster }
966 1.13 oster /*****************************************************************************
967 1.1 oster * do the right thing upon each reconstruction event.
968 1.13 oster *****************************************************************************/
969 1.87 perry static int
970 1.60 oster ProcessReconEvent(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
971 1.4 oster {
972 1.4 oster int retcode = 0, submitblocked;
973 1.4 oster RF_ReconBuffer_t *rbuf;
974 1.4 oster RF_SectorCount_t sectorsPerRU;
975 1.4 oster
976 1.82 oster retcode = RF_RECON_READ_STOPPED;
977 1.82 oster
978 1.4 oster Dprintf1("RECON: ProcessReconEvent type %d\n", event->type);
979 1.104 oster
980 1.4 oster switch (event->type) {
981 1.4 oster
982 1.4 oster /* a read I/O has completed */
983 1.4 oster case RF_REVENT_READDONE:
984 1.57 oster rbuf = raidPtr->reconControl->perDiskInfo[event->col].rbuf;
985 1.57 oster Dprintf2("RECON: READDONE EVENT: col %d psid %ld\n",
986 1.57 oster event->col, rbuf->parityStripeID);
987 1.4 oster Dprintf7("RECON: done read psid %ld buf %lx %02x %02x %02x %02x %02x\n",
988 1.4 oster rbuf->parityStripeID, rbuf->buffer, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
989 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
990 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
991 1.82 oster if (!raidPtr->reconControl->error) {
992 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 0, 0);
993 1.82 oster Dprintf1("RECON: submitblocked=%d\n", submitblocked);
994 1.82 oster if (!submitblocked)
995 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
996 1.89 oster else
997 1.89 oster retcode = 0;
998 1.82 oster }
999 1.4 oster break;
1000 1.4 oster
1001 1.4 oster /* a write I/O has completed */
1002 1.4 oster case RF_REVENT_WRITEDONE:
1003 1.40 oster #if RF_DEBUG_RECON
1004 1.4 oster if (rf_floatingRbufDebug) {
1005 1.4 oster rf_CheckFloatingRbufCount(raidPtr, 1);
1006 1.4 oster }
1007 1.38 oster #endif
1008 1.4 oster sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1009 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1010 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1011 1.4 oster Dprintf3("RECON: WRITEDONE EVENT: psid %d ru %d (%d %% complete)\n",
1012 1.57 oster rbuf->parityStripeID, rbuf->which_ru, raidPtr->reconControl->percentComplete);
1013 1.57 oster rf_ReconMapUpdate(raidPtr, raidPtr->reconControl->reconMap,
1014 1.4 oster rbuf->failedDiskSectorOffset, rbuf->failedDiskSectorOffset + sectorsPerRU - 1);
1015 1.57 oster rf_RemoveFromActiveReconTable(raidPtr, rbuf->parityStripeID, rbuf->which_ru);
1016 1.4 oster
1017 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1018 1.82 oster raidPtr->reconControl->pending_writes--;
1019 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1020 1.82 oster
1021 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FLOATING) {
1022 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1023 1.76 oster while(raidPtr->reconControl->rb_lock) {
1024 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1025 1.112 mrg raidPtr->reconControl->rb_mutex);
1026 1.76 oster }
1027 1.76 oster raidPtr->reconControl->rb_lock = 1;
1028 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1029 1.76 oster
1030 1.4 oster raidPtr->numFullReconBuffers--;
1031 1.57 oster rf_ReleaseFloatingReconBuffer(raidPtr, rbuf);
1032 1.76 oster
1033 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1034 1.76 oster raidPtr->reconControl->rb_lock = 0;
1035 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1036 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1037 1.4 oster } else
1038 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FORCED)
1039 1.4 oster rf_FreeReconBuffer(rbuf);
1040 1.4 oster else
1041 1.4 oster RF_ASSERT(0);
1042 1.104 oster retcode = RF_RECON_WRITE_DONE;
1043 1.4 oster break;
1044 1.4 oster
1045 1.4 oster case RF_REVENT_BUFCLEAR: /* A buffer-stall condition has been
1046 1.4 oster * cleared */
1047 1.57 oster Dprintf1("RECON: BUFCLEAR EVENT: col %d\n", event->col);
1048 1.82 oster if (!raidPtr->reconControl->error) {
1049 1.87 perry submitblocked = rf_SubmitReconBuffer(raidPtr->reconControl->perDiskInfo[event->col].rbuf,
1050 1.82 oster 0, (int) (long) event->arg);
1051 1.82 oster RF_ASSERT(!submitblocked); /* we wouldn't have gotten the
1052 1.82 oster * BUFCLEAR event if we
1053 1.82 oster * couldn't submit */
1054 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1055 1.82 oster }
1056 1.4 oster break;
1057 1.4 oster
1058 1.4 oster case RF_REVENT_BLOCKCLEAR: /* A user-write reconstruction
1059 1.4 oster * blockage has been cleared */
1060 1.57 oster DDprintf1("RECON: BLOCKCLEAR EVENT: col %d\n", event->col);
1061 1.82 oster if (!raidPtr->reconControl->error) {
1062 1.82 oster retcode = TryToRead(raidPtr, event->col);
1063 1.82 oster }
1064 1.4 oster break;
1065 1.4 oster
1066 1.4 oster case RF_REVENT_HEADSEPCLEAR: /* A max-head-separation
1067 1.4 oster * reconstruction blockage has been
1068 1.4 oster * cleared */
1069 1.57 oster Dprintf1("RECON: HEADSEPCLEAR EVENT: col %d\n", event->col);
1070 1.82 oster if (!raidPtr->reconControl->error) {
1071 1.82 oster retcode = TryToRead(raidPtr, event->col);
1072 1.82 oster }
1073 1.4 oster break;
1074 1.4 oster
1075 1.4 oster /* a buffer has become ready to write */
1076 1.4 oster case RF_REVENT_BUFREADY:
1077 1.57 oster Dprintf1("RECON: BUFREADY EVENT: col %d\n", event->col);
1078 1.82 oster if (!raidPtr->reconControl->error) {
1079 1.82 oster retcode = IssueNextWriteRequest(raidPtr);
1080 1.40 oster #if RF_DEBUG_RECON
1081 1.82 oster if (rf_floatingRbufDebug) {
1082 1.82 oster rf_CheckFloatingRbufCount(raidPtr, 1);
1083 1.82 oster }
1084 1.82 oster #endif
1085 1.4 oster }
1086 1.4 oster break;
1087 1.4 oster
1088 1.4 oster /* we need to skip the current RU entirely because it got
1089 1.4 oster * recon'd while we were waiting for something else to happen */
1090 1.4 oster case RF_REVENT_SKIP:
1091 1.57 oster DDprintf1("RECON: SKIP EVENT: col %d\n", event->col);
1092 1.87 perry if (!raidPtr->reconControl->error) {
1093 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1094 1.82 oster }
1095 1.4 oster break;
1096 1.4 oster
1097 1.4 oster /* a forced-reconstruction read access has completed. Just
1098 1.4 oster * submit the buffer */
1099 1.4 oster case RF_REVENT_FORCEDREADDONE:
1100 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1101 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1102 1.57 oster DDprintf1("RECON: FORCEDREADDONE EVENT: col %d\n", event->col);
1103 1.82 oster if (!raidPtr->reconControl->error) {
1104 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 1, 0);
1105 1.82 oster RF_ASSERT(!submitblocked);
1106 1.103 oster retcode = 0;
1107 1.82 oster }
1108 1.4 oster break;
1109 1.4 oster
1110 1.70 oster /* A read I/O failed to complete */
1111 1.70 oster case RF_REVENT_READ_FAILED:
1112 1.82 oster retcode = RF_RECON_READ_ERROR;
1113 1.82 oster break;
1114 1.70 oster
1115 1.70 oster /* A write I/O failed to complete */
1116 1.70 oster case RF_REVENT_WRITE_FAILED:
1117 1.82 oster retcode = RF_RECON_WRITE_ERROR;
1118 1.82 oster
1119 1.107 oster /* This is an error, but it was a pending write.
1120 1.107 oster Account for it. */
1121 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1122 1.107 oster raidPtr->reconControl->pending_writes--;
1123 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1124 1.107 oster
1125 1.82 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1126 1.82 oster
1127 1.82 oster /* cleanup the disk queue data */
1128 1.82 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1129 1.82 oster
1130 1.82 oster /* At this point we're erroring out, badly, and floatingRbufs
1131 1.82 oster may not even be valid. Rather than putting this back onto
1132 1.82 oster the floatingRbufs list, just arrange for its immediate
1133 1.82 oster destruction.
1134 1.82 oster */
1135 1.82 oster rf_FreeReconBuffer(rbuf);
1136 1.82 oster break;
1137 1.70 oster
1138 1.70 oster /* a forced read I/O failed to complete */
1139 1.70 oster case RF_REVENT_FORCEDREAD_FAILED:
1140 1.82 oster retcode = RF_RECON_READ_ERROR;
1141 1.82 oster break;
1142 1.70 oster
1143 1.4 oster default:
1144 1.4 oster RF_PANIC();
1145 1.4 oster }
1146 1.125.4.1 thorpej rf_FreeReconEventDesc(raidPtr, event);
1147 1.4 oster return (retcode);
1148 1.1 oster }
1149 1.13 oster /*****************************************************************************
1150 1.1 oster *
1151 1.13 oster * find the next thing that's needed on the indicated disk, and issue
1152 1.13 oster * a read request for it. We assume that the reconstruction buffer
1153 1.13 oster * associated with this process is free to receive the data. If
1154 1.13 oster * reconstruction is blocked on the indicated RU, we issue a
1155 1.13 oster * blockage-release request instead of a physical disk read request.
1156 1.13 oster * If the current disk gets too far ahead of the others, we issue a
1157 1.13 oster * head-separation wait request and return.
1158 1.13 oster *
1159 1.13 oster * ctrl->{ru_count, curPSID, diskOffset} and
1160 1.22 soren * rbuf->failedDiskSectorOffset are maintained to point to the unit
1161 1.13 oster * we're currently accessing. Note that this deviates from the
1162 1.13 oster * standard C idiom of having counters point to the next thing to be
1163 1.13 oster * accessed. This allows us to easily retry when we're blocked by
1164 1.13 oster * head separation or reconstruction-blockage events.
1165 1.1 oster *
1166 1.13 oster *****************************************************************************/
1167 1.87 perry static int
1168 1.60 oster IssueNextReadRequest(RF_Raid_t *raidPtr, RF_RowCol_t col)
1169 1.4 oster {
1170 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1171 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1172 1.4 oster RF_ReconBuffer_t *rbuf = ctrl->rbuf;
1173 1.4 oster RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
1174 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1175 1.4 oster int do_new_check = 0, retcode = 0, status;
1176 1.4 oster
1177 1.4 oster /* if we are currently the slowest disk, mark that we have to do a new
1178 1.4 oster * check */
1179 1.57 oster if (ctrl->headSepCounter <= raidPtr->reconControl->minHeadSepCounter)
1180 1.4 oster do_new_check = 1;
1181 1.4 oster
1182 1.4 oster while (1) {
1183 1.4 oster
1184 1.4 oster ctrl->ru_count++;
1185 1.4 oster if (ctrl->ru_count < RUsPerPU) {
1186 1.4 oster ctrl->diskOffset += sectorsPerRU;
1187 1.4 oster rbuf->failedDiskSectorOffset += sectorsPerRU;
1188 1.4 oster } else {
1189 1.4 oster ctrl->curPSID++;
1190 1.4 oster ctrl->ru_count = 0;
1191 1.4 oster /* code left over from when head-sep was based on
1192 1.4 oster * parity stripe id */
1193 1.118 oster if (ctrl->curPSID > raidPtr->reconControl->lastPSID) {
1194 1.57 oster CheckForNewMinHeadSep(raidPtr, ++(ctrl->headSepCounter));
1195 1.82 oster return (RF_RECON_DONE_READS); /* finito! */
1196 1.4 oster }
1197 1.4 oster /* find the disk offsets of the start of the parity
1198 1.4 oster * stripe on both the current disk and the failed
1199 1.4 oster * disk. skip this entire parity stripe if either disk
1200 1.4 oster * does not appear in the indicated PS */
1201 1.57 oster status = ComputePSDiskOffsets(raidPtr, ctrl->curPSID, col, &ctrl->diskOffset, &rbuf->failedDiskSectorOffset,
1202 1.57 oster &rbuf->spCol, &rbuf->spOffset);
1203 1.4 oster if (status) {
1204 1.4 oster ctrl->ru_count = RUsPerPU - 1;
1205 1.4 oster continue;
1206 1.4 oster }
1207 1.4 oster }
1208 1.4 oster rbuf->which_ru = ctrl->ru_count;
1209 1.4 oster
1210 1.4 oster /* skip this RU if it's already been reconstructed */
1211 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, rbuf->failedDiskSectorOffset)) {
1212 1.4 oster Dprintf2("Skipping psid %ld ru %d: already reconstructed\n", ctrl->curPSID, ctrl->ru_count);
1213 1.4 oster continue;
1214 1.4 oster }
1215 1.4 oster break;
1216 1.4 oster }
1217 1.4 oster ctrl->headSepCounter++;
1218 1.4 oster if (do_new_check)
1219 1.57 oster CheckForNewMinHeadSep(raidPtr, ctrl->headSepCounter); /* update min if needed */
1220 1.4 oster
1221 1.4 oster
1222 1.4 oster /* at this point, we have definitely decided what to do, and we have
1223 1.4 oster * only to see if we can actually do it now */
1224 1.4 oster rbuf->parityStripeID = ctrl->curPSID;
1225 1.4 oster rbuf->which_ru = ctrl->ru_count;
1226 1.67 oster #if RF_ACC_TRACE > 0
1227 1.122 christos memset(&raidPtr->recon_tracerecs[col], 0,
1228 1.29 thorpej sizeof(raidPtr->recon_tracerecs[col]));
1229 1.4 oster raidPtr->recon_tracerecs[col].reconacc = 1;
1230 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1231 1.67 oster #endif
1232 1.57 oster retcode = TryToRead(raidPtr, col);
1233 1.4 oster return (retcode);
1234 1.1 oster }
1235 1.13 oster
1236 1.13 oster /*
1237 1.13 oster * tries to issue the next read on the indicated disk. We may be
1238 1.13 oster * blocked by (a) the heads being too far apart, or (b) recon on the
1239 1.13 oster * indicated RU being blocked due to a write by a user thread. In
1240 1.13 oster * this case, we issue a head-sep or blockage wait request, which will
1241 1.13 oster * cause this same routine to be invoked again later when the blockage
1242 1.87 perry * has cleared.
1243 1.1 oster */
1244 1.13 oster
1245 1.87 perry static int
1246 1.60 oster TryToRead(RF_Raid_t *raidPtr, RF_RowCol_t col)
1247 1.4 oster {
1248 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1249 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1250 1.4 oster RF_StripeNum_t psid = ctrl->curPSID;
1251 1.4 oster RF_ReconUnitNum_t which_ru = ctrl->ru_count;
1252 1.4 oster RF_DiskQueueData_t *req;
1253 1.68 oster int status;
1254 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;
1255 1.4 oster
1256 1.4 oster /* if the current disk is too far ahead of the others, issue a
1257 1.4 oster * head-separation wait and return */
1258 1.57 oster if (CheckHeadSeparation(raidPtr, ctrl, col, ctrl->headSepCounter, which_ru))
1259 1.4 oster return (0);
1260 1.68 oster
1261 1.68 oster /* allocate a new PSS in case we need it */
1262 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1263 1.68 oster
1264 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1265 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE, newpssPtr);
1266 1.68 oster
1267 1.68 oster if (pssPtr != newpssPtr) {
1268 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1269 1.68 oster }
1270 1.4 oster
1271 1.4 oster /* if recon is blocked on the indicated parity stripe, issue a
1272 1.4 oster * block-wait request and return. this also must mark the indicated RU
1273 1.4 oster * in the stripe as under reconstruction if not blocked. */
1274 1.57 oster status = CheckForcedOrBlockedReconstruction(raidPtr, pssPtr, ctrl, col, psid, which_ru);
1275 1.4 oster if (status == RF_PSS_RECON_BLOCKED) {
1276 1.4 oster Dprintf2("RECON: Stalling psid %ld ru %d: recon blocked\n", psid, which_ru);
1277 1.4 oster goto out;
1278 1.4 oster } else
1279 1.4 oster if (status == RF_PSS_FORCED_ON_WRITE) {
1280 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1281 1.4 oster goto out;
1282 1.4 oster }
1283 1.4 oster /* make one last check to be sure that the indicated RU didn't get
1284 1.4 oster * reconstructed while we were waiting for something else to happen.
1285 1.4 oster * This is unfortunate in that it causes us to make this check twice
1286 1.4 oster * in the normal case. Might want to make some attempt to re-work
1287 1.4 oster * this so that we only do this check if we've definitely blocked on
1288 1.4 oster * one of the above checks. When this condition is detected, we may
1289 1.4 oster * have just created a bogus status entry, which we need to delete. */
1290 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, ctrl->rbuf->failedDiskSectorOffset)) {
1291 1.4 oster Dprintf2("RECON: Skipping psid %ld ru %d: prior recon after stall\n", psid, which_ru);
1292 1.68 oster if (pssPtr == newpssPtr)
1293 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1294 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1295 1.4 oster goto out;
1296 1.4 oster }
1297 1.4 oster /* found something to read. issue the I/O */
1298 1.57 oster Dprintf4("RECON: Read for psid %ld on col %d offset %ld buf %lx\n",
1299 1.57 oster psid, col, ctrl->diskOffset, ctrl->rbuf->buffer);
1300 1.67 oster #if RF_ACC_TRACE > 0
1301 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[col].recon_timer);
1302 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[col].recon_timer);
1303 1.4 oster raidPtr->recon_tracerecs[col].specific.recon.recon_start_to_fetch_us =
1304 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[col].recon_timer);
1305 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1306 1.67 oster #endif
1307 1.4 oster /* should be ok to use a NULL proc pointer here, all the bufs we use
1308 1.4 oster * should be in kernel space */
1309 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, ctrl->diskOffset, sectorsPerRU, ctrl->rbuf->buffer, psid, which_ru,
1310 1.86 oster ReconReadDoneProc, (void *) ctrl,
1311 1.67 oster #if RF_ACC_TRACE > 0
1312 1.67 oster &raidPtr->recon_tracerecs[col],
1313 1.67 oster #else
1314 1.67 oster NULL,
1315 1.67 oster #endif
1316 1.125.4.1 thorpej (void *) raidPtr, 0, NULL);
1317 1.4 oster
1318 1.4 oster ctrl->rbuf->arg = (void *) req;
1319 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[col], req, RF_IO_RECON_PRIORITY);
1320 1.4 oster pssPtr->issued[col] = 1;
1321 1.1 oster
1322 1.1 oster out:
1323 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1324 1.4 oster return (0);
1325 1.1 oster }
1326 1.1 oster
1327 1.1 oster
1328 1.13 oster /*
1329 1.13 oster * given a parity stripe ID, we want to find out whether both the
1330 1.13 oster * current disk and the failed disk exist in that parity stripe. If
1331 1.13 oster * not, we want to skip this whole PS. If so, we want to find the
1332 1.13 oster * disk offset of the start of the PS on both the current disk and the
1333 1.13 oster * failed disk.
1334 1.13 oster *
1335 1.13 oster * this works by getting a list of disks comprising the indicated
1336 1.13 oster * parity stripe, and searching the list for the current and failed
1337 1.13 oster * disks. Once we've decided they both exist in the parity stripe, we
1338 1.13 oster * need to decide whether each is data or parity, so that we'll know
1339 1.13 oster * which mapping function to call to get the corresponding disk
1340 1.1 oster * offsets.
1341 1.1 oster *
1342 1.13 oster * this is kind of unpleasant, but doing it this way allows the
1343 1.13 oster * reconstruction code to use parity stripe IDs rather than physical
1344 1.13 oster * disks address to march through the failed disk, which greatly
1345 1.13 oster * simplifies a lot of code, as well as eliminating the need for a
1346 1.13 oster * reverse-mapping function. I also think it will execute faster,
1347 1.13 oster * since the calls to the mapping module are kept to a minimum.
1348 1.1 oster *
1349 1.13 oster * ASSUMES THAT THE STRIPE IDENTIFIER IDENTIFIES THE DISKS COMPRISING
1350 1.87 perry * THE STRIPE IN THE CORRECT ORDER
1351 1.87 perry *
1352 1.60 oster * raidPtr - raid descriptor
1353 1.60 oster * psid - parity stripe identifier
1354 1.60 oster * col - column of disk to find the offsets for
1355 1.60 oster * spCol - out: col of spare unit for failed unit
1356 1.60 oster * spOffset - out: offset into disk containing spare unit
1357 1.60 oster *
1358 1.60 oster */
1359 1.13 oster
1360 1.13 oster
1361 1.87 perry static int
1362 1.60 oster ComputePSDiskOffsets(RF_Raid_t *raidPtr, RF_StripeNum_t psid,
1363 1.60 oster RF_RowCol_t col, RF_SectorNum_t *outDiskOffset,
1364 1.60 oster RF_SectorNum_t *outFailedDiskSectorOffset,
1365 1.60 oster RF_RowCol_t *spCol, RF_SectorNum_t *spOffset)
1366 1.60 oster {
1367 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1368 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1369 1.4 oster RF_RaidAddr_t sosRaidAddress; /* start-of-stripe */
1370 1.4 oster RF_RowCol_t *diskids;
1371 1.4 oster u_int i, j, k, i_offset, j_offset;
1372 1.57 oster RF_RowCol_t pcol;
1373 1.57 oster int testcol;
1374 1.4 oster RF_SectorNum_t poffset;
1375 1.4 oster char i_is_parity = 0, j_is_parity = 0;
1376 1.4 oster RF_RowCol_t stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
1377 1.4 oster
1378 1.4 oster /* get a listing of the disks comprising that stripe */
1379 1.4 oster sosRaidAddress = rf_ParityStripeIDToRaidAddress(layoutPtr, psid);
1380 1.57 oster (layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids);
1381 1.4 oster RF_ASSERT(diskids);
1382 1.4 oster
1383 1.4 oster /* reject this entire parity stripe if it does not contain the
1384 1.4 oster * indicated disk or it does not contain the failed disk */
1385 1.57 oster
1386 1.4 oster for (i = 0; i < stripeWidth; i++) {
1387 1.4 oster if (col == diskids[i])
1388 1.4 oster break;
1389 1.4 oster }
1390 1.4 oster if (i == stripeWidth)
1391 1.4 oster goto skipit;
1392 1.4 oster for (j = 0; j < stripeWidth; j++) {
1393 1.4 oster if (fcol == diskids[j])
1394 1.4 oster break;
1395 1.4 oster }
1396 1.4 oster if (j == stripeWidth) {
1397 1.4 oster goto skipit;
1398 1.4 oster }
1399 1.4 oster /* find out which disk the parity is on */
1400 1.57 oster (layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &pcol, &poffset, RF_DONT_REMAP);
1401 1.4 oster
1402 1.4 oster /* find out if either the current RU or the failed RU is parity */
1403 1.4 oster /* also, if the parity occurs in this stripe prior to the data and/or
1404 1.4 oster * failed col, we need to decrement i and/or j */
1405 1.4 oster for (k = 0; k < stripeWidth; k++)
1406 1.4 oster if (diskids[k] == pcol)
1407 1.4 oster break;
1408 1.4 oster RF_ASSERT(k < stripeWidth);
1409 1.4 oster i_offset = i;
1410 1.4 oster j_offset = j;
1411 1.4 oster if (k < i)
1412 1.4 oster i_offset--;
1413 1.4 oster else
1414 1.4 oster if (k == i) {
1415 1.4 oster i_is_parity = 1;
1416 1.4 oster i_offset = 0;
1417 1.4 oster } /* set offsets to zero to disable multiply
1418 1.4 oster * below */
1419 1.4 oster if (k < j)
1420 1.4 oster j_offset--;
1421 1.4 oster else
1422 1.4 oster if (k == j) {
1423 1.4 oster j_is_parity = 1;
1424 1.4 oster j_offset = 0;
1425 1.4 oster }
1426 1.4 oster /* at this point, [ij]_is_parity tells us whether the [current,failed]
1427 1.4 oster * disk is parity at the start of this RU, and, if data, "[ij]_offset"
1428 1.4 oster * tells us how far into the stripe the [current,failed] disk is. */
1429 1.4 oster
1430 1.4 oster /* call the mapping routine to get the offset into the current disk,
1431 1.4 oster * repeat for failed disk. */
1432 1.4 oster if (i_is_parity)
1433 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1434 1.4 oster else
1435 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1436 1.4 oster
1437 1.57 oster RF_ASSERT(col == testcol);
1438 1.4 oster
1439 1.4 oster if (j_is_parity)
1440 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1441 1.4 oster else
1442 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1443 1.57 oster RF_ASSERT(fcol == testcol);
1444 1.4 oster
1445 1.4 oster /* now locate the spare unit for the failed unit */
1446 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1447 1.4 oster if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
1448 1.4 oster if (j_is_parity)
1449 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1450 1.4 oster else
1451 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1452 1.4 oster } else {
1453 1.72 oster #endif
1454 1.57 oster *spCol = raidPtr->reconControl->spareCol;
1455 1.4 oster *spOffset = *outFailedDiskSectorOffset;
1456 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1457 1.4 oster }
1458 1.72 oster #endif
1459 1.4 oster return (0);
1460 1.1 oster
1461 1.1 oster skipit:
1462 1.99 oster Dprintf2("RECON: Skipping psid %ld: nothing needed from c%d\n",
1463 1.57 oster psid, col);
1464 1.4 oster return (1);
1465 1.1 oster }
1466 1.4 oster /* this is called when a buffer has become ready to write to the replacement disk */
1467 1.87 perry static int
1468 1.60 oster IssueNextWriteRequest(RF_Raid_t *raidPtr)
1469 1.4 oster {
1470 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1471 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1472 1.67 oster #if RF_ACC_TRACE > 0
1473 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1474 1.67 oster #endif
1475 1.4 oster RF_ReconBuffer_t *rbuf;
1476 1.4 oster RF_DiskQueueData_t *req;
1477 1.4 oster
1478 1.57 oster rbuf = rf_GetFullReconBuffer(raidPtr->reconControl);
1479 1.4 oster RF_ASSERT(rbuf); /* there must be one available, or we wouldn't
1480 1.4 oster * have gotten the event that sent us here */
1481 1.4 oster RF_ASSERT(rbuf->pssPtr);
1482 1.4 oster
1483 1.4 oster rbuf->pssPtr->writeRbuf = rbuf;
1484 1.4 oster rbuf->pssPtr = NULL;
1485 1.4 oster
1486 1.57 oster Dprintf6("RECON: New write (c %d offs %d) for psid %ld ru %d (failed disk offset %ld) buf %lx\n",
1487 1.57 oster rbuf->spCol, rbuf->spOffset, rbuf->parityStripeID,
1488 1.4 oster rbuf->which_ru, rbuf->failedDiskSectorOffset, rbuf->buffer);
1489 1.4 oster Dprintf6("RECON: new write psid %ld %02x %02x %02x %02x %02x\n",
1490 1.4 oster rbuf->parityStripeID, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
1491 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
1492 1.4 oster
1493 1.4 oster /* should be ok to use a NULL b_proc here b/c all addrs should be in
1494 1.4 oster * kernel space */
1495 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_WRITE, rbuf->spOffset,
1496 1.4 oster sectorsPerRU, rbuf->buffer,
1497 1.4 oster rbuf->parityStripeID, rbuf->which_ru,
1498 1.86 oster ReconWriteDoneProc, (void *) rbuf,
1499 1.67 oster #if RF_ACC_TRACE > 0
1500 1.4 oster &raidPtr->recon_tracerecs[fcol],
1501 1.67 oster #else
1502 1.87 perry NULL,
1503 1.67 oster #endif
1504 1.125.4.1 thorpej (void *) raidPtr, 0, NULL);
1505 1.1 oster
1506 1.4 oster rbuf->arg = (void *) req;
1507 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1508 1.82 oster raidPtr->reconControl->pending_writes++;
1509 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1510 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[rbuf->spCol], req, RF_IO_RECON_PRIORITY);
1511 1.1 oster
1512 1.4 oster return (0);
1513 1.1 oster }
1514 1.13 oster
1515 1.13 oster /*
1516 1.13 oster * this gets called upon the completion of a reconstruction read
1517 1.13 oster * operation the arg is a pointer to the per-disk reconstruction
1518 1.13 oster * control structure for the process that just finished a read.
1519 1.1 oster *
1520 1.13 oster * called at interrupt context in the kernel, so don't do anything
1521 1.87 perry * illegal here.
1522 1.1 oster */
1523 1.123 christos static void
1524 1.60 oster ReconReadDoneProc(void *arg, int status)
1525 1.4 oster {
1526 1.4 oster RF_PerDiskReconCtrl_t *ctrl = (RF_PerDiskReconCtrl_t *) arg;
1527 1.82 oster RF_Raid_t *raidPtr;
1528 1.82 oster
1529 1.82 oster /* Detect that reconCtrl is no longer valid, and if that
1530 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1531 1.82 oster There won't be anyone listening for this event anyway */
1532 1.82 oster
1533 1.82 oster if (ctrl->reconCtrl == NULL)
1534 1.123 christos return;
1535 1.82 oster
1536 1.82 oster raidPtr = ctrl->reconCtrl->reconDesc->raidPtr;
1537 1.4 oster
1538 1.4 oster if (status) {
1539 1.102 oster printf("raid%d: Recon read failed: %d\n", raidPtr->raidid, status);
1540 1.70 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READ_FAILED);
1541 1.123 christos return;
1542 1.4 oster }
1543 1.67 oster #if RF_ACC_TRACE > 0
1544 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1545 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1546 1.4 oster raidPtr->recon_tracerecs[ctrl->col].specific.recon.recon_fetch_to_return_us =
1547 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1548 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1549 1.67 oster #endif
1550 1.57 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READDONE);
1551 1.123 christos return;
1552 1.1 oster }
1553 1.1 oster /* this gets called upon the completion of a reconstruction write operation.
1554 1.1 oster * the arg is a pointer to the rbuf that was just written
1555 1.1 oster *
1556 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1557 1.1 oster */
1558 1.123 christos static void
1559 1.60 oster ReconWriteDoneProc(void *arg, int status)
1560 1.4 oster {
1561 1.4 oster RF_ReconBuffer_t *rbuf = (RF_ReconBuffer_t *) arg;
1562 1.4 oster
1563 1.82 oster /* Detect that reconControl is no longer valid, and if that
1564 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1565 1.82 oster There won't be anyone listening for this event anyway */
1566 1.82 oster
1567 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1568 1.123 christos return;
1569 1.82 oster
1570 1.4 oster Dprintf2("Reconstruction completed on psid %ld ru %d\n", rbuf->parityStripeID, rbuf->which_ru);
1571 1.4 oster if (status) {
1572 1.119 yamt printf("raid%d: Recon write failed (status %d(0x%x))!\n", rbuf->raidPtr->raidid,status,status);
1573 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITE_FAILED);
1574 1.123 christos return;
1575 1.4 oster }
1576 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITEDONE);
1577 1.1 oster }
1578 1.1 oster
1579 1.1 oster
1580 1.87 perry /*
1581 1.13 oster * computes a new minimum head sep, and wakes up anyone who needs to
1582 1.87 perry * be woken as a result
1583 1.13 oster */
1584 1.87 perry static void
1585 1.95 christos CheckForNewMinHeadSep(RF_Raid_t *raidPtr, RF_HeadSepLimit_t hsCtr)
1586 1.4 oster {
1587 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1588 1.4 oster RF_HeadSepLimit_t new_min;
1589 1.4 oster RF_RowCol_t i;
1590 1.123 christos RF_CallbackValueDesc_t *p;
1591 1.4 oster RF_ASSERT(hsCtr >= reconCtrlPtr->minHeadSepCounter); /* from the definition
1592 1.4 oster * of a minimum */
1593 1.4 oster
1594 1.4 oster
1595 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1596 1.76 oster while(reconCtrlPtr->rb_lock) {
1597 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1598 1.76 oster }
1599 1.76 oster reconCtrlPtr->rb_lock = 1;
1600 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1601 1.4 oster
1602 1.4 oster new_min = ~(1L << (8 * sizeof(long) - 1)); /* 0x7FFF....FFF */
1603 1.4 oster for (i = 0; i < raidPtr->numCol; i++)
1604 1.4 oster if (i != reconCtrlPtr->fcol) {
1605 1.4 oster if (reconCtrlPtr->perDiskInfo[i].headSepCounter < new_min)
1606 1.4 oster new_min = reconCtrlPtr->perDiskInfo[i].headSepCounter;
1607 1.4 oster }
1608 1.4 oster /* set the new minimum and wake up anyone who can now run again */
1609 1.4 oster if (new_min != reconCtrlPtr->minHeadSepCounter) {
1610 1.4 oster reconCtrlPtr->minHeadSepCounter = new_min;
1611 1.4 oster Dprintf1("RECON: new min head pos counter val is %ld\n", new_min);
1612 1.4 oster while (reconCtrlPtr->headSepCBList) {
1613 1.123 christos if (reconCtrlPtr->headSepCBList->v > new_min)
1614 1.4 oster break;
1615 1.4 oster p = reconCtrlPtr->headSepCBList;
1616 1.4 oster reconCtrlPtr->headSepCBList = p->next;
1617 1.4 oster p->next = NULL;
1618 1.57 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1619 1.125.4.1 thorpej rf_FreeCallbackValueDesc(raidPtr, p);
1620 1.4 oster }
1621 1.1 oster
1622 1.4 oster }
1623 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1624 1.76 oster reconCtrlPtr->rb_lock = 0;
1625 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1626 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1627 1.1 oster }
1628 1.13 oster
1629 1.13 oster /*
1630 1.13 oster * checks to see that the maximum head separation will not be violated
1631 1.13 oster * if we initiate a reconstruction I/O on the indicated disk.
1632 1.13 oster * Limiting the maximum head separation between two disks eliminates
1633 1.13 oster * the nasty buffer-stall conditions that occur when one disk races
1634 1.13 oster * ahead of the others and consumes all of the floating recon buffers.
1635 1.13 oster * This code is complex and unpleasant but it's necessary to avoid
1636 1.13 oster * some very nasty, albeit fairly rare, reconstruction behavior.
1637 1.1 oster *
1638 1.13 oster * returns non-zero if and only if we have to stop working on the
1639 1.87 perry * indicated disk due to a head-separation delay.
1640 1.1 oster */
1641 1.87 perry static int
1642 1.60 oster CheckHeadSeparation(RF_Raid_t *raidPtr, RF_PerDiskReconCtrl_t *ctrl,
1643 1.95 christos RF_RowCol_t col, RF_HeadSepLimit_t hsCtr,
1644 1.95 christos RF_ReconUnitNum_t which_ru)
1645 1.4 oster {
1646 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1647 1.123 christos RF_CallbackValueDesc_t *cb, *p, *pt;
1648 1.10 oster int retval = 0;
1649 1.4 oster
1650 1.4 oster /* if we're too far ahead of the slowest disk, stop working on this
1651 1.4 oster * disk until the slower ones catch up. We do this by scheduling a
1652 1.4 oster * wakeup callback for the time when the slowest disk has caught up.
1653 1.4 oster * We define "caught up" with 20% hysteresis, i.e. the head separation
1654 1.4 oster * must have fallen to at most 80% of the max allowable head
1655 1.4 oster * separation before we'll wake up.
1656 1.87 perry *
1657 1.4 oster */
1658 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1659 1.76 oster while(reconCtrlPtr->rb_lock) {
1660 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1661 1.76 oster }
1662 1.76 oster reconCtrlPtr->rb_lock = 1;
1663 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1664 1.4 oster if ((raidPtr->headSepLimit >= 0) &&
1665 1.4 oster ((ctrl->headSepCounter - reconCtrlPtr->minHeadSepCounter) > raidPtr->headSepLimit)) {
1666 1.57 oster Dprintf5("raid%d: RECON: head sep stall: col %d hsCtr %ld minHSCtr %ld limit %ld\n",
1667 1.87 perry raidPtr->raidid, col, ctrl->headSepCounter,
1668 1.87 perry reconCtrlPtr->minHeadSepCounter,
1669 1.10 oster raidPtr->headSepLimit);
1670 1.125.4.1 thorpej cb = rf_AllocCallbackValueDesc(raidPtr);
1671 1.4 oster /* the minHeadSepCounter value we have to get to before we'll
1672 1.4 oster * wake up. build in 20% hysteresis. */
1673 1.123 christos cb->v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
1674 1.4 oster cb->col = col;
1675 1.4 oster cb->next = NULL;
1676 1.4 oster
1677 1.4 oster /* insert this callback descriptor into the sorted list of
1678 1.4 oster * pending head-sep callbacks */
1679 1.4 oster p = reconCtrlPtr->headSepCBList;
1680 1.4 oster if (!p)
1681 1.4 oster reconCtrlPtr->headSepCBList = cb;
1682 1.4 oster else
1683 1.123 christos if (cb->v < p->v) {
1684 1.4 oster cb->next = reconCtrlPtr->headSepCBList;
1685 1.4 oster reconCtrlPtr->headSepCBList = cb;
1686 1.4 oster } else {
1687 1.123 christos for (pt = p, p = p->next; p && (p->v < cb->v); pt = p, p = p->next);
1688 1.4 oster cb->next = p;
1689 1.4 oster pt->next = cb;
1690 1.4 oster }
1691 1.4 oster retval = 1;
1692 1.1 oster #if RF_RECON_STATS > 0
1693 1.4 oster ctrl->reconCtrl->reconDesc->hsStallCount++;
1694 1.4 oster #endif /* RF_RECON_STATS > 0 */
1695 1.4 oster }
1696 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1697 1.76 oster reconCtrlPtr->rb_lock = 0;
1698 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1699 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1700 1.1 oster
1701 1.4 oster return (retval);
1702 1.1 oster }
1703 1.87 perry /*
1704 1.13 oster * checks to see if reconstruction has been either forced or blocked
1705 1.13 oster * by a user operation. if forced, we skip this RU entirely. else if
1706 1.13 oster * blocked, put ourselves on the wait list. else return 0.
1707 1.1 oster *
1708 1.87 perry * ASSUMES THE PSS MUTEX IS LOCKED UPON ENTRY
1709 1.1 oster */
1710 1.87 perry static int
1711 1.95 christos CheckForcedOrBlockedReconstruction(RF_Raid_t *raidPtr,
1712 1.60 oster RF_ReconParityStripeStatus_t *pssPtr,
1713 1.95 christos RF_PerDiskReconCtrl_t *ctrl,
1714 1.94 christos RF_RowCol_t col,
1715 1.95 christos RF_StripeNum_t psid,
1716 1.95 christos RF_ReconUnitNum_t which_ru)
1717 1.4 oster {
1718 1.123 christos RF_CallbackValueDesc_t *cb;
1719 1.4 oster int retcode = 0;
1720 1.4 oster
1721 1.4 oster if ((pssPtr->flags & RF_PSS_FORCED_ON_READ) || (pssPtr->flags & RF_PSS_FORCED_ON_WRITE))
1722 1.4 oster retcode = RF_PSS_FORCED_ON_WRITE;
1723 1.4 oster else
1724 1.4 oster if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
1725 1.57 oster Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
1726 1.125.4.1 thorpej cb = rf_AllocCallbackValueDesc(raidPtr); /* append ourselves to
1727 1.125.4.1 thorpej * the blockage-wait
1728 1.125.4.1 thorpej * list */
1729 1.4 oster cb->col = col;
1730 1.4 oster cb->next = pssPtr->blockWaitList;
1731 1.4 oster pssPtr->blockWaitList = cb;
1732 1.4 oster retcode = RF_PSS_RECON_BLOCKED;
1733 1.4 oster }
1734 1.4 oster if (!retcode)
1735 1.4 oster pssPtr->flags |= RF_PSS_UNDER_RECON; /* mark this RU as under
1736 1.4 oster * reconstruction */
1737 1.4 oster
1738 1.4 oster return (retcode);
1739 1.1 oster }
1740 1.13 oster /*
1741 1.13 oster * if reconstruction is currently ongoing for the indicated stripeID,
1742 1.13 oster * reconstruction is forced to completion and we return non-zero to
1743 1.13 oster * indicate that the caller must wait. If not, then reconstruction is
1744 1.13 oster * blocked on the indicated stripe and the routine returns zero. If
1745 1.13 oster * and only if we return non-zero, we'll cause the cbFunc to get
1746 1.87 perry * invoked with the cbArg when the reconstruction has completed.
1747 1.1 oster */
1748 1.87 perry int
1749 1.60 oster rf_ForceOrBlockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap,
1750 1.123 christos void (*cbFunc)(void *), void *cbArg)
1751 1.4 oster {
1752 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID; /* the stripe ID we're
1753 1.4 oster * forcing recon on */
1754 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU; /* num sects in one RU */
1755 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr; /* a pointer to the parity
1756 1.4 oster * stripe status structure */
1757 1.4 oster RF_StripeNum_t psid; /* parity stripe id */
1758 1.4 oster RF_SectorNum_t offset, fd_offset; /* disk offset, failed-disk
1759 1.4 oster * offset */
1760 1.4 oster RF_RowCol_t *diskids;
1761 1.4 oster RF_ReconUnitNum_t which_ru; /* RU within parity stripe */
1762 1.4 oster RF_RowCol_t fcol, diskno, i;
1763 1.4 oster RF_ReconBuffer_t *new_rbuf; /* ptr to newly allocated rbufs */
1764 1.4 oster RF_DiskQueueData_t *req;/* disk I/O req to be enqueued */
1765 1.123 christos RF_CallbackFuncDesc_t *cb;
1766 1.68 oster int nPromoted;
1767 1.4 oster
1768 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1769 1.4 oster
1770 1.68 oster /* allocate a new PSS in case we need it */
1771 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1772 1.68 oster
1773 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1774 1.4 oster
1775 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE | RF_PSS_RECON_BLOCKED, newpssPtr);
1776 1.68 oster
1777 1.68 oster if (pssPtr != newpssPtr) {
1778 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1779 1.68 oster }
1780 1.4 oster
1781 1.4 oster /* if recon is not ongoing on this PS, just return */
1782 1.4 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1783 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1784 1.4 oster return (0);
1785 1.4 oster }
1786 1.4 oster /* otherwise, we have to wait for reconstruction to complete on this
1787 1.4 oster * RU. */
1788 1.4 oster /* In order to avoid waiting for a potentially large number of
1789 1.4 oster * low-priority accesses to complete, we force a normal-priority (i.e.
1790 1.4 oster * not low-priority) reconstruction on this RU. */
1791 1.4 oster if (!(pssPtr->flags & RF_PSS_FORCED_ON_WRITE) && !(pssPtr->flags & RF_PSS_FORCED_ON_READ)) {
1792 1.4 oster DDprintf1("Forcing recon on psid %ld\n", psid);
1793 1.4 oster pssPtr->flags |= RF_PSS_FORCED_ON_WRITE; /* mark this RU as under
1794 1.4 oster * forced recon */
1795 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED; /* clear the blockage
1796 1.4 oster * that we just set */
1797 1.57 oster fcol = raidPtr->reconControl->fcol;
1798 1.4 oster
1799 1.4 oster /* get a listing of the disks comprising the indicated stripe */
1800 1.57 oster (raidPtr->Layout.map->IdentifyStripe) (raidPtr, asmap->raidAddress, &diskids);
1801 1.4 oster
1802 1.4 oster /* For previously issued reads, elevate them to normal
1803 1.4 oster * priority. If the I/O has already completed, it won't be
1804 1.4 oster * found in the queue, and hence this will be a no-op. For
1805 1.4 oster * unissued reads, allocate buffers and issue new reads. The
1806 1.4 oster * fact that we've set the FORCED bit means that the regular
1807 1.4 oster * recon procs will not re-issue these reqs */
1808 1.4 oster for (i = 0; i < raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol; i++)
1809 1.4 oster if ((diskno = diskids[i]) != fcol) {
1810 1.4 oster if (pssPtr->issued[diskno]) {
1811 1.57 oster nPromoted = rf_DiskIOPromote(&raidPtr->Queues[diskno], psid, which_ru);
1812 1.4 oster if (rf_reconDebug && nPromoted)
1813 1.57 oster printf("raid%d: promoted read from col %d\n", raidPtr->raidid, diskno);
1814 1.4 oster } else {
1815 1.57 oster new_rbuf = rf_MakeReconBuffer(raidPtr, diskno, RF_RBUF_TYPE_FORCED); /* create new buf */
1816 1.57 oster ComputePSDiskOffsets(raidPtr, psid, diskno, &offset, &fd_offset,
1817 1.57 oster &new_rbuf->spCol, &new_rbuf->spOffset); /* find offsets & spare
1818 1.4 oster * location */
1819 1.4 oster new_rbuf->parityStripeID = psid; /* fill in the buffer */
1820 1.4 oster new_rbuf->which_ru = which_ru;
1821 1.4 oster new_rbuf->failedDiskSectorOffset = fd_offset;
1822 1.4 oster new_rbuf->priority = RF_IO_NORMAL_PRIORITY;
1823 1.4 oster
1824 1.4 oster /* use NULL b_proc b/c all addrs
1825 1.4 oster * should be in kernel space */
1826 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, offset + which_ru * sectorsPerRU, sectorsPerRU, new_rbuf->buffer,
1827 1.123 christos psid, which_ru,
1828 1.123 christos ForceReconReadDoneProc,
1829 1.123 christos (void *) new_rbuf,
1830 1.125.4.1 thorpej NULL, (void *) raidPtr, 0, NULL);
1831 1.4 oster
1832 1.4 oster new_rbuf->arg = req;
1833 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[diskno], req, RF_IO_NORMAL_PRIORITY); /* enqueue the I/O */
1834 1.57 oster Dprintf2("raid%d: Issued new read req on col %d\n", raidPtr->raidid, diskno);
1835 1.4 oster }
1836 1.4 oster }
1837 1.4 oster /* if the write is sitting in the disk queue, elevate its
1838 1.4 oster * priority */
1839 1.57 oster if (rf_DiskIOPromote(&raidPtr->Queues[fcol], psid, which_ru))
1840 1.102 oster if (rf_reconDebug)
1841 1.102 oster printf("raid%d: promoted write to col %d\n",
1842 1.102 oster raidPtr->raidid, fcol);
1843 1.4 oster }
1844 1.4 oster /* install a callback descriptor to be invoked when recon completes on
1845 1.4 oster * this parity stripe. */
1846 1.125.4.1 thorpej cb = rf_AllocCallbackFuncDesc(raidPtr);
1847 1.123 christos cb->callbackFunc = cbFunc;
1848 1.123 christos cb->callbackArg = cbArg;
1849 1.4 oster cb->next = pssPtr->procWaitList;
1850 1.4 oster pssPtr->procWaitList = cb;
1851 1.87 perry DDprintf2("raid%d: Waiting for forced recon on psid %ld\n",
1852 1.10 oster raidPtr->raidid, psid);
1853 1.4 oster
1854 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1855 1.4 oster return (1);
1856 1.1 oster }
1857 1.1 oster /* called upon the completion of a forced reconstruction read.
1858 1.1 oster * all we do is schedule the FORCEDREADONE event.
1859 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1860 1.1 oster */
1861 1.87 perry static void
1862 1.60 oster ForceReconReadDoneProc(void *arg, int status)
1863 1.4 oster {
1864 1.4 oster RF_ReconBuffer_t *rbuf = arg;
1865 1.4 oster
1866 1.82 oster /* Detect that reconControl is no longer valid, and if that
1867 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1868 1.82 oster There won't be anyone listening for this event anyway */
1869 1.82 oster
1870 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1871 1.82 oster return;
1872 1.82 oster
1873 1.4 oster if (status) {
1874 1.70 oster printf("raid%d: Forced recon read failed!\n", rbuf->raidPtr->raidid);
1875 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREAD_FAILED);
1876 1.79 oster return;
1877 1.4 oster }
1878 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREADDONE);
1879 1.1 oster }
1880 1.1 oster /* releases a block on the reconstruction of the indicated stripe */
1881 1.87 perry int
1882 1.60 oster rf_UnblockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
1883 1.4 oster {
1884 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID;
1885 1.4 oster RF_ReconParityStripeStatus_t *pssPtr;
1886 1.4 oster RF_ReconUnitNum_t which_ru;
1887 1.4 oster RF_StripeNum_t psid;
1888 1.123 christos RF_CallbackValueDesc_t *cb;
1889 1.4 oster
1890 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1891 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1892 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_NONE, NULL);
1893 1.4 oster
1894 1.4 oster /* When recon is forced, the pss desc can get deleted before we get
1895 1.4 oster * back to unblock recon. But, this can _only_ happen when recon is
1896 1.4 oster * forced. It would be good to put some kind of sanity check here, but
1897 1.4 oster * how to decide if recon was just forced or not? */
1898 1.4 oster if (!pssPtr) {
1899 1.4 oster /* printf("Warning: no pss descriptor upon unblock on psid %ld
1900 1.4 oster * RU %d\n",psid,which_ru); */
1901 1.43 oster #if (RF_DEBUG_RECON > 0) || (RF_DEBUG_PSS > 0)
1902 1.4 oster if (rf_reconDebug || rf_pssDebug)
1903 1.4 oster printf("Warning: no pss descriptor upon unblock on psid %ld RU %d\n", (long) psid, which_ru);
1904 1.43 oster #endif
1905 1.4 oster goto out;
1906 1.4 oster }
1907 1.4 oster pssPtr->blockCount--;
1908 1.10 oster Dprintf3("raid%d: unblocking recon on psid %ld: blockcount is %d\n",
1909 1.10 oster raidPtr->raidid, psid, pssPtr->blockCount);
1910 1.4 oster if (pssPtr->blockCount == 0) { /* if recon blockage has been released */
1911 1.4 oster
1912 1.4 oster /* unblock recon before calling CauseReconEvent in case
1913 1.4 oster * CauseReconEvent causes us to try to issue a new read before
1914 1.4 oster * returning here. */
1915 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;
1916 1.4 oster
1917 1.4 oster
1918 1.87 perry while (pssPtr->blockWaitList) {
1919 1.13 oster /* spin through the block-wait list and
1920 1.13 oster release all the waiters */
1921 1.4 oster cb = pssPtr->blockWaitList;
1922 1.4 oster pssPtr->blockWaitList = cb->next;
1923 1.4 oster cb->next = NULL;
1924 1.57 oster rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
1925 1.125.4.1 thorpej rf_FreeCallbackValueDesc(raidPtr, cb);
1926 1.4 oster }
1927 1.13 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1928 1.13 oster /* if no recon was requested while recon was blocked */
1929 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1930 1.4 oster }
1931 1.4 oster }
1932 1.1 oster out:
1933 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1934 1.4 oster return (0);
1935 1.1 oster }
1936 1.104 oster
1937 1.104 oster void
1938 1.104 oster rf_WakeupHeadSepCBWaiters(RF_Raid_t *raidPtr)
1939 1.104 oster {
1940 1.123 christos RF_CallbackValueDesc_t *p;
1941 1.104 oster
1942 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1943 1.104 oster while(raidPtr->reconControl->rb_lock) {
1944 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1945 1.112 mrg raidPtr->reconControl->rb_mutex);
1946 1.104 oster }
1947 1.104 oster
1948 1.104 oster raidPtr->reconControl->rb_lock = 1;
1949 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1950 1.104 oster
1951 1.104 oster while (raidPtr->reconControl->headSepCBList) {
1952 1.104 oster p = raidPtr->reconControl->headSepCBList;
1953 1.104 oster raidPtr->reconControl->headSepCBList = p->next;
1954 1.104 oster p->next = NULL;
1955 1.104 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1956 1.125.4.1 thorpej rf_FreeCallbackValueDesc(raidPtr, p);
1957 1.104 oster }
1958 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1959 1.104 oster raidPtr->reconControl->rb_lock = 0;
1960 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1961 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1962 1.104 oster
1963 1.104 oster }
1964 1.104 oster
1965