rf_reconstruct.c revision 1.120 1 1.120 hannken /* $NetBSD: rf_reconstruct.c,v 1.120 2014/06/14 07:39:00 hannken Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author: Mark Holland
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster
29 1.1 oster /************************************************************
30 1.1 oster *
31 1.1 oster * rf_reconstruct.c -- code to perform on-line reconstruction
32 1.1 oster *
33 1.1 oster ************************************************************/
34 1.31 lukem
35 1.31 lukem #include <sys/cdefs.h>
36 1.120 hannken __KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.120 2014/06/14 07:39:00 hannken Exp $");
37 1.1 oster
38 1.97 ad #include <sys/param.h>
39 1.1 oster #include <sys/time.h>
40 1.1 oster #include <sys/buf.h>
41 1.1 oster #include <sys/errno.h>
42 1.5 oster #include <sys/systm.h>
43 1.5 oster #include <sys/proc.h>
44 1.5 oster #include <sys/ioctl.h>
45 1.5 oster #include <sys/fcntl.h>
46 1.5 oster #include <sys/vnode.h>
47 1.110 dholland #include <sys/namei.h> /* for pathbuf */
48 1.30 oster #include <dev/raidframe/raidframevar.h>
49 1.5 oster
50 1.120 hannken #include <miscfs/specfs/specdev.h> /* for v_rdev */
51 1.120 hannken
52 1.1 oster #include "rf_raid.h"
53 1.1 oster #include "rf_reconutil.h"
54 1.1 oster #include "rf_revent.h"
55 1.1 oster #include "rf_reconbuffer.h"
56 1.1 oster #include "rf_acctrace.h"
57 1.1 oster #include "rf_etimer.h"
58 1.1 oster #include "rf_dag.h"
59 1.1 oster #include "rf_desc.h"
60 1.36 oster #include "rf_debugprint.h"
61 1.1 oster #include "rf_general.h"
62 1.1 oster #include "rf_driver.h"
63 1.1 oster #include "rf_utils.h"
64 1.1 oster #include "rf_shutdown.h"
65 1.1 oster
66 1.1 oster #include "rf_kintf.h"
67 1.1 oster
68 1.1 oster /* setting these to -1 causes them to be set to their default values if not set by debug options */
69 1.1 oster
70 1.41 oster #if RF_DEBUG_RECON
71 1.1 oster #define Dprintf(s) if (rf_reconDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
72 1.1 oster #define Dprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
73 1.1 oster #define Dprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
74 1.1 oster #define Dprintf3(s,a,b,c) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL)
75 1.1 oster #define Dprintf4(s,a,b,c,d) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL)
76 1.1 oster #define Dprintf5(s,a,b,c,d,e) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL)
77 1.1 oster #define Dprintf6(s,a,b,c,d,e,f) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),NULL,NULL)
78 1.1 oster #define Dprintf7(s,a,b,c,d,e,f,g) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),(void *)((unsigned long)g),NULL)
79 1.1 oster
80 1.1 oster #define DDprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
81 1.1 oster #define DDprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
82 1.33 oster
83 1.41 oster #else /* RF_DEBUG_RECON */
84 1.33 oster
85 1.33 oster #define Dprintf(s) {}
86 1.33 oster #define Dprintf1(s,a) {}
87 1.33 oster #define Dprintf2(s,a,b) {}
88 1.33 oster #define Dprintf3(s,a,b,c) {}
89 1.33 oster #define Dprintf4(s,a,b,c,d) {}
90 1.33 oster #define Dprintf5(s,a,b,c,d,e) {}
91 1.33 oster #define Dprintf6(s,a,b,c,d,e,f) {}
92 1.33 oster #define Dprintf7(s,a,b,c,d,e,f,g) {}
93 1.33 oster
94 1.33 oster #define DDprintf1(s,a) {}
95 1.33 oster #define DDprintf2(s,a,b) {}
96 1.33 oster
97 1.41 oster #endif /* RF_DEBUG_RECON */
98 1.33 oster
99 1.82 oster #define RF_RECON_DONE_READS 1
100 1.82 oster #define RF_RECON_READ_ERROR 2
101 1.82 oster #define RF_RECON_WRITE_ERROR 3
102 1.82 oster #define RF_RECON_READ_STOPPED 4
103 1.104 oster #define RF_RECON_WRITE_DONE 5
104 1.82 oster
105 1.73 oster #define RF_MAX_FREE_RECONBUFFER 32
106 1.73 oster #define RF_MIN_FREE_RECONBUFFER 16
107 1.1 oster
108 1.69 oster static RF_RaidReconDesc_t *AllocRaidReconDesc(RF_Raid_t *, RF_RowCol_t,
109 1.69 oster RF_RaidDisk_t *, int, RF_RowCol_t);
110 1.69 oster static void FreeReconDesc(RF_RaidReconDesc_t *);
111 1.69 oster static int ProcessReconEvent(RF_Raid_t *, RF_ReconEvent_t *);
112 1.69 oster static int IssueNextReadRequest(RF_Raid_t *, RF_RowCol_t);
113 1.69 oster static int TryToRead(RF_Raid_t *, RF_RowCol_t);
114 1.87 perry static int ComputePSDiskOffsets(RF_Raid_t *, RF_StripeNum_t, RF_RowCol_t,
115 1.69 oster RF_SectorNum_t *, RF_SectorNum_t *, RF_RowCol_t *,
116 1.69 oster RF_SectorNum_t *);
117 1.69 oster static int IssueNextWriteRequest(RF_Raid_t *);
118 1.69 oster static int ReconReadDoneProc(void *, int);
119 1.69 oster static int ReconWriteDoneProc(void *, int);
120 1.69 oster static void CheckForNewMinHeadSep(RF_Raid_t *, RF_HeadSepLimit_t);
121 1.69 oster static int CheckHeadSeparation(RF_Raid_t *, RF_PerDiskReconCtrl_t *,
122 1.69 oster RF_RowCol_t, RF_HeadSepLimit_t,
123 1.69 oster RF_ReconUnitNum_t);
124 1.69 oster static int CheckForcedOrBlockedReconstruction(RF_Raid_t *,
125 1.69 oster RF_ReconParityStripeStatus_t *,
126 1.69 oster RF_PerDiskReconCtrl_t *,
127 1.69 oster RF_RowCol_t, RF_StripeNum_t,
128 1.69 oster RF_ReconUnitNum_t);
129 1.69 oster static void ForceReconReadDoneProc(void *, int);
130 1.1 oster static void rf_ShutdownReconstruction(void *);
131 1.1 oster
132 1.1 oster struct RF_ReconDoneProc_s {
133 1.4 oster void (*proc) (RF_Raid_t *, void *);
134 1.4 oster void *arg;
135 1.4 oster RF_ReconDoneProc_t *next;
136 1.1 oster };
137 1.1 oster
138 1.13 oster /**************************************************************************
139 1.1 oster *
140 1.1 oster * sets up the parameters that will be used by the reconstruction process
141 1.1 oster * currently there are none, except for those that the layout-specific
142 1.1 oster * configuration (e.g. rf_ConfigureDeclustered) routine sets up.
143 1.1 oster *
144 1.1 oster * in the kernel, we fire off the recon thread.
145 1.1 oster *
146 1.13 oster **************************************************************************/
147 1.87 perry static void
148 1.95 christos rf_ShutdownReconstruction(void *ignored)
149 1.4 oster {
150 1.74 oster pool_destroy(&rf_pools.reconbuffer);
151 1.4 oster }
152 1.4 oster
153 1.87 perry int
154 1.60 oster rf_ConfigureReconstruction(RF_ShutdownList_t **listp)
155 1.4 oster {
156 1.4 oster
157 1.74 oster rf_pool_init(&rf_pools.reconbuffer, sizeof(RF_ReconBuffer_t),
158 1.74 oster "rf_reconbuffer_pl", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
159 1.66 oster rf_ShutdownCreate(listp, rf_ShutdownReconstruction, NULL);
160 1.66 oster
161 1.4 oster return (0);
162 1.4 oster }
163 1.4 oster
164 1.4 oster static RF_RaidReconDesc_t *
165 1.87 perry AllocRaidReconDesc(RF_Raid_t *raidPtr, RF_RowCol_t col,
166 1.60 oster RF_RaidDisk_t *spareDiskPtr, int numDisksDone,
167 1.60 oster RF_RowCol_t scol)
168 1.1 oster {
169 1.1 oster
170 1.4 oster RF_RaidReconDesc_t *reconDesc;
171 1.4 oster
172 1.80 oster RF_Malloc(reconDesc, sizeof(RF_RaidReconDesc_t),
173 1.80 oster (RF_RaidReconDesc_t *));
174 1.4 oster reconDesc->raidPtr = raidPtr;
175 1.4 oster reconDesc->col = col;
176 1.4 oster reconDesc->spareDiskPtr = spareDiskPtr;
177 1.4 oster reconDesc->numDisksDone = numDisksDone;
178 1.4 oster reconDesc->scol = scol;
179 1.4 oster reconDesc->next = NULL;
180 1.1 oster
181 1.4 oster return (reconDesc);
182 1.1 oster }
183 1.1 oster
184 1.87 perry static void
185 1.60 oster FreeReconDesc(RF_RaidReconDesc_t *reconDesc)
186 1.1 oster {
187 1.1 oster #if RF_RECON_STATS > 0
188 1.50 oster printf("raid%d: %lu recon event waits, %lu recon delays\n",
189 1.50 oster reconDesc->raidPtr->raidid,
190 1.87 perry (long) reconDesc->numReconEventWaits,
191 1.50 oster (long) reconDesc->numReconExecDelays);
192 1.4 oster #endif /* RF_RECON_STATS > 0 */
193 1.50 oster printf("raid%d: %lu max exec ticks\n",
194 1.50 oster reconDesc->raidPtr->raidid,
195 1.50 oster (long) reconDesc->maxReconExecTicks);
196 1.80 oster RF_Free(reconDesc, sizeof(RF_RaidReconDesc_t));
197 1.1 oster }
198 1.1 oster
199 1.1 oster
200 1.13 oster /*****************************************************************************
201 1.1 oster *
202 1.1 oster * primary routine to reconstruct a failed disk. This should be called from
203 1.1 oster * within its own thread. It won't return until reconstruction completes,
204 1.1 oster * fails, or is aborted.
205 1.13 oster *****************************************************************************/
206 1.87 perry int
207 1.60 oster rf_ReconstructFailedDisk(RF_Raid_t *raidPtr, RF_RowCol_t col)
208 1.4 oster {
209 1.52 jdolecek const RF_LayoutSW_t *lp;
210 1.4 oster int rc;
211 1.4 oster
212 1.4 oster lp = raidPtr->Layout.map;
213 1.4 oster if (lp->SubmitReconBuffer) {
214 1.4 oster /*
215 1.4 oster * The current infrastructure only supports reconstructing one
216 1.4 oster * disk at a time for each array.
217 1.4 oster */
218 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
219 1.4 oster while (raidPtr->reconInProgress) {
220 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
221 1.4 oster }
222 1.4 oster raidPtr->reconInProgress++;
223 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
224 1.57 oster rc = rf_ReconstructFailedDiskBasic(raidPtr, col);
225 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
226 1.6 oster raidPtr->reconInProgress--;
227 1.4 oster } else {
228 1.4 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
229 1.4 oster lp->parityConfig);
230 1.4 oster rc = EIO;
231 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
232 1.4 oster }
233 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
234 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
235 1.4 oster return (rc);
236 1.4 oster }
237 1.4 oster
238 1.87 perry int
239 1.60 oster rf_ReconstructFailedDiskBasic(RF_Raid_t *raidPtr, RF_RowCol_t col)
240 1.4 oster {
241 1.108 jld RF_ComponentLabel_t *c_label;
242 1.4 oster RF_RaidDisk_t *spareDiskPtr = NULL;
243 1.4 oster RF_RaidReconDesc_t *reconDesc;
244 1.57 oster RF_RowCol_t scol;
245 1.4 oster int numDisksDone = 0, rc;
246 1.4 oster
247 1.4 oster /* first look for a spare drive onto which to reconstruct the data */
248 1.4 oster /* spare disk descriptors are stored in row 0. This may have to
249 1.4 oster * change eventually */
250 1.4 oster
251 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
252 1.57 oster RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed);
253 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
254 1.4 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
255 1.57 oster if (raidPtr->status != rf_rs_degraded) {
256 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col);
257 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
258 1.4 oster return (EINVAL);
259 1.4 oster }
260 1.4 oster scol = (-1);
261 1.4 oster } else {
262 1.72 oster #endif
263 1.4 oster for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) {
264 1.57 oster if (raidPtr->Disks[scol].status == rf_ds_spare) {
265 1.57 oster spareDiskPtr = &raidPtr->Disks[scol];
266 1.4 oster spareDiskPtr->status = rf_ds_used_spare;
267 1.4 oster break;
268 1.4 oster }
269 1.4 oster }
270 1.4 oster if (!spareDiskPtr) {
271 1.57 oster RF_ERRORMSG1("Unable to reconstruct disk at col %d because no spares are available\n", col);
272 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
273 1.4 oster return (ENOSPC);
274 1.4 oster }
275 1.57 oster printf("RECON: initiating reconstruction on col %d -> spare at col %d\n", col, scol);
276 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
277 1.4 oster }
278 1.72 oster #endif
279 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
280 1.1 oster
281 1.57 oster reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr, numDisksDone, scol);
282 1.4 oster raidPtr->reconDesc = (void *) reconDesc;
283 1.1 oster #if RF_RECON_STATS > 0
284 1.4 oster reconDesc->hsStallCount = 0;
285 1.4 oster reconDesc->numReconExecDelays = 0;
286 1.4 oster reconDesc->numReconEventWaits = 0;
287 1.4 oster #endif /* RF_RECON_STATS > 0 */
288 1.4 oster reconDesc->reconExecTimerRunning = 0;
289 1.4 oster reconDesc->reconExecTicks = 0;
290 1.4 oster reconDesc->maxReconExecTicks = 0;
291 1.4 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
292 1.5 oster
293 1.5 oster if (!rc) {
294 1.5 oster /* fix up the component label */
295 1.5 oster /* Don't actually need the read here.. */
296 1.108 jld c_label = raidget_component_label(raidPtr, scol);
297 1.108 jld
298 1.108 jld raid_init_component_label(raidPtr, c_label);
299 1.108 jld c_label->row = 0;
300 1.108 jld c_label->column = col;
301 1.108 jld c_label->clean = RF_RAID_DIRTY;
302 1.108 jld c_label->status = rf_ds_optimal;
303 1.111 enami rf_component_label_set_partitionsize(c_label,
304 1.111 enami raidPtr->Disks[scol].partitionSize);
305 1.15 oster
306 1.28 oster /* We've just done a rebuild based on all the other
307 1.28 oster disks, so at this point the parity is known to be
308 1.28 oster clean, even if it wasn't before. */
309 1.28 oster
310 1.28 oster /* XXX doesn't hold for RAID 6!!*/
311 1.28 oster
312 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
313 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
314 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
315 1.28 oster
316 1.15 oster /* XXXX MORE NEEDED HERE */
317 1.87 perry
318 1.108 jld raidflush_component_label(raidPtr, scol);
319 1.82 oster } else {
320 1.82 oster /* Reconstruct failed. */
321 1.82 oster
322 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
323 1.82 oster /* Failed disk goes back to "failed" status */
324 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
325 1.82 oster
326 1.82 oster /* Spare disk goes back to "spare" status. */
327 1.82 oster spareDiskPtr->status = rf_ds_spare;
328 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
329 1.84 oster
330 1.5 oster }
331 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
332 1.5 oster return (rc);
333 1.5 oster }
334 1.5 oster
335 1.87 perry /*
336 1.5 oster
337 1.5 oster Allow reconstructing a disk in-place -- i.e. component /dev/sd2e goes AWOL,
338 1.87 perry and you don't get a spare until the next Monday. With this function
339 1.87 perry (and hot-swappable drives) you can now put your new disk containing
340 1.5 oster /dev/sd2e on the bus, scsictl it alive, and then use raidctl(8) to
341 1.5 oster rebuild the data "on the spot".
342 1.5 oster
343 1.5 oster */
344 1.5 oster
345 1.5 oster int
346 1.60 oster rf_ReconstructInPlace(RF_Raid_t *raidPtr, RF_RowCol_t col)
347 1.5 oster {
348 1.5 oster RF_RaidDisk_t *spareDiskPtr = NULL;
349 1.5 oster RF_RaidReconDesc_t *reconDesc;
350 1.52 jdolecek const RF_LayoutSW_t *lp;
351 1.108 jld RF_ComponentLabel_t *c_label;
352 1.5 oster int numDisksDone = 0, rc;
353 1.116 oster uint64_t numsec;
354 1.116 oster unsigned int secsize;
355 1.110 dholland struct pathbuf *pb;
356 1.5 oster struct vnode *vp;
357 1.5 oster int retcode;
358 1.21 oster int ac;
359 1.5 oster
360 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
361 1.5 oster lp = raidPtr->Layout.map;
362 1.61 oster if (!lp->SubmitReconBuffer) {
363 1.61 oster RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
364 1.61 oster lp->parityConfig);
365 1.61 oster /* wakeup anyone who might be waiting to do a reconstruct */
366 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
367 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
368 1.61 oster return(EIO);
369 1.62 oster }
370 1.5 oster
371 1.62 oster /*
372 1.62 oster * The current infrastructure only supports reconstructing one
373 1.62 oster * disk at a time for each array.
374 1.62 oster */
375 1.5 oster
376 1.62 oster if (raidPtr->Disks[col].status != rf_ds_failed) {
377 1.62 oster /* "It's gone..." */
378 1.62 oster raidPtr->numFailures++;
379 1.62 oster raidPtr->Disks[col].status = rf_ds_failed;
380 1.62 oster raidPtr->status = rf_rs_degraded;
381 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
382 1.62 oster rf_update_component_labels(raidPtr,
383 1.62 oster RF_NORMAL_COMPONENT_UPDATE);
384 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
385 1.62 oster }
386 1.87 perry
387 1.62 oster while (raidPtr->reconInProgress) {
388 1.113 mrg rf_wait_cond2(raidPtr->waitForReconCond, raidPtr->mutex);
389 1.62 oster }
390 1.87 perry
391 1.62 oster raidPtr->reconInProgress++;
392 1.87 perry
393 1.62 oster /* first look for a spare drive onto which to reconstruct the
394 1.62 oster data. spare disk descriptors are stored in row 0. This
395 1.62 oster may have to change eventually */
396 1.87 perry
397 1.62 oster /* Actually, we don't care if it's failed or not... On a RAID
398 1.62 oster set with correct parity, this function should be callable
399 1.99 oster on any component without ill effects. */
400 1.62 oster /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */
401 1.87 perry
402 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
403 1.62 oster if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
404 1.62 oster RF_ERRORMSG1("Unable to reconstruct to disk at col %d: operation not supported for RF_DISTRIBUTE_SPARE\n", col);
405 1.87 perry
406 1.62 oster raidPtr->reconInProgress--;
407 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
408 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
409 1.62 oster return (EINVAL);
410 1.87 perry }
411 1.72 oster #endif
412 1.87 perry
413 1.87 perry /* This device may have been opened successfully the
414 1.62 oster first time. Close it before trying to open it again.. */
415 1.87 perry
416 1.62 oster if (raidPtr->raid_cinfo[col].ci_vp != NULL) {
417 1.37 oster #if 0
418 1.62 oster printf("Closed the open device: %s\n",
419 1.62 oster raidPtr->Disks[col].devname);
420 1.37 oster #endif
421 1.62 oster vp = raidPtr->raid_cinfo[col].ci_vp;
422 1.62 oster ac = raidPtr->Disks[col].auto_configured;
423 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
424 1.62 oster rf_close_component(raidPtr, vp, ac);
425 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
426 1.62 oster raidPtr->raid_cinfo[col].ci_vp = NULL;
427 1.62 oster }
428 1.62 oster /* note that this disk was *not* auto_configured (any longer)*/
429 1.62 oster raidPtr->Disks[col].auto_configured = 0;
430 1.87 perry
431 1.37 oster #if 0
432 1.62 oster printf("About to (re-)open the device for rebuilding: %s\n",
433 1.62 oster raidPtr->Disks[col].devname);
434 1.37 oster #endif
435 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
436 1.110 dholland pb = pathbuf_create(raidPtr->Disks[col].devname);
437 1.110 dholland if (pb == NULL) {
438 1.110 dholland retcode = ENOMEM;
439 1.110 dholland } else {
440 1.110 dholland retcode = dk_lookup(pb, curlwp, &vp);
441 1.110 dholland pathbuf_destroy(pb);
442 1.110 dholland }
443 1.87 perry
444 1.62 oster if (retcode) {
445 1.93 christos printf("raid%d: rebuilding: dk_lookup on device: %s failed: %d!\n",raidPtr->raidid,
446 1.62 oster raidPtr->Disks[col].devname, retcode);
447 1.87 perry
448 1.87 perry /* the component isn't responding properly...
449 1.62 oster must be still dead :-( */
450 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
451 1.62 oster raidPtr->reconInProgress--;
452 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
453 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
454 1.62 oster return(retcode);
455 1.63 oster }
456 1.63 oster
457 1.87 perry /* Ok, so we can at least do a lookup...
458 1.63 oster How about actually getting a vp for it? */
459 1.87 perry
460 1.116 oster retcode = getdisksize(vp, &numsec, &secsize);
461 1.63 oster if (retcode) {
462 1.115 yamt vn_close(vp, FREAD | FWRITE, kauth_cred_get());
463 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
464 1.63 oster raidPtr->reconInProgress--;
465 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
466 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
467 1.63 oster return(retcode);
468 1.62 oster }
469 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
470 1.116 oster raidPtr->Disks[col].blockSize = secsize;
471 1.116 oster raidPtr->Disks[col].numBlocks = numsec - rf_protectedSectors;
472 1.87 perry
473 1.63 oster raidPtr->raid_cinfo[col].ci_vp = vp;
474 1.120 hannken raidPtr->raid_cinfo[col].ci_dev = vp->v_rdev;
475 1.87 perry
476 1.120 hannken raidPtr->Disks[col].dev = vp->v_rdev;
477 1.87 perry
478 1.63 oster /* we allow the user to specify that only a fraction
479 1.63 oster of the disks should be used this is just for debug:
480 1.63 oster it speeds up * the parity scan */
481 1.63 oster raidPtr->Disks[col].numBlocks = raidPtr->Disks[col].numBlocks *
482 1.63 oster rf_sizePercentage / 100;
483 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
484 1.87 perry
485 1.62 oster spareDiskPtr = &raidPtr->Disks[col];
486 1.62 oster spareDiskPtr->status = rf_ds_used_spare;
487 1.87 perry
488 1.87 perry printf("raid%d: initiating in-place reconstruction on column %d\n",
489 1.62 oster raidPtr->raidid, col);
490 1.5 oster
491 1.87 perry reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr,
492 1.62 oster numDisksDone, col);
493 1.62 oster raidPtr->reconDesc = (void *) reconDesc;
494 1.5 oster #if RF_RECON_STATS > 0
495 1.62 oster reconDesc->hsStallCount = 0;
496 1.62 oster reconDesc->numReconExecDelays = 0;
497 1.62 oster reconDesc->numReconEventWaits = 0;
498 1.5 oster #endif /* RF_RECON_STATS > 0 */
499 1.62 oster reconDesc->reconExecTimerRunning = 0;
500 1.62 oster reconDesc->reconExecTicks = 0;
501 1.62 oster reconDesc->maxReconExecTicks = 0;
502 1.62 oster rc = rf_ContinueReconstructFailedDisk(reconDesc);
503 1.87 perry
504 1.5 oster if (!rc) {
505 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
506 1.5 oster /* Need to set these here, as at this point it'll be claiming
507 1.5 oster that the disk is in rf_ds_spared! But we know better :-) */
508 1.87 perry
509 1.57 oster raidPtr->Disks[col].status = rf_ds_optimal;
510 1.57 oster raidPtr->status = rf_rs_optimal;
511 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
512 1.87 perry
513 1.5 oster /* fix up the component label */
514 1.5 oster /* Don't actually need the read here.. */
515 1.108 jld c_label = raidget_component_label(raidPtr, col);
516 1.16 oster
517 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
518 1.108 jld raid_init_component_label(raidPtr, c_label);
519 1.16 oster
520 1.108 jld c_label->row = 0;
521 1.108 jld c_label->column = col;
522 1.28 oster
523 1.28 oster /* We've just done a rebuild based on all the other
524 1.28 oster disks, so at this point the parity is known to be
525 1.28 oster clean, even if it wasn't before. */
526 1.28 oster
527 1.28 oster /* XXX doesn't hold for RAID 6!!*/
528 1.28 oster
529 1.28 oster raidPtr->parity_good = RF_RAID_CLEAN;
530 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
531 1.87 perry
532 1.108 jld raidflush_component_label(raidPtr, col);
533 1.82 oster } else {
534 1.82 oster /* Reconstruct-in-place failed. Disk goes back to
535 1.82 oster "failed" status, regardless of what it was before. */
536 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
537 1.82 oster raidPtr->Disks[col].status = rf_ds_failed;
538 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
539 1.82 oster }
540 1.5 oster
541 1.84 oster rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
542 1.84 oster
543 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
544 1.82 oster raidPtr->reconInProgress--;
545 1.113 mrg rf_signal_cond2(raidPtr->waitForReconCond);
546 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
547 1.87 perry
548 1.4 oster return (rc);
549 1.4 oster }
550 1.4 oster
551 1.4 oster
552 1.87 perry int
553 1.60 oster rf_ContinueReconstructFailedDisk(RF_RaidReconDesc_t *reconDesc)
554 1.4 oster {
555 1.4 oster RF_Raid_t *raidPtr = reconDesc->raidPtr;
556 1.4 oster RF_RowCol_t col = reconDesc->col;
557 1.4 oster RF_RowCol_t scol = reconDesc->scol;
558 1.4 oster RF_ReconMap_t *mapPtr;
559 1.46 oster RF_ReconCtrl_t *tmp_reconctrl;
560 1.4 oster RF_ReconEvent_t *event;
561 1.104 oster RF_StripeCount_t incPSID,lastPSID,num_writes,pending_writes,prev;
562 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
563 1.118 oster RF_StripeCount_t startPSID,endPSID,aPSID,bPSID,offPSID;
564 1.118 oster #endif
565 1.104 oster RF_ReconUnitCount_t RUsPerPU;
566 1.4 oster struct timeval etime, elpsd;
567 1.4 oster unsigned long xor_s, xor_resid_us;
568 1.54 simonb int i, ds;
569 1.104 oster int status, done;
570 1.82 oster int recon_error, write_error;
571 1.4 oster
572 1.78 oster raidPtr->accumXorTimeUs = 0;
573 1.67 oster #if RF_ACC_TRACE > 0
574 1.78 oster /* create one trace record per physical disk */
575 1.78 oster RF_Malloc(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t), (RF_AccTraceEntry_t *));
576 1.67 oster #endif
577 1.87 perry
578 1.78 oster /* quiesce the array prior to starting recon. this is needed
579 1.78 oster * to assure no nasty interactions with pending user writes.
580 1.78 oster * We need to do this before we change the disk or row status. */
581 1.87 perry
582 1.78 oster Dprintf("RECON: begin request suspend\n");
583 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
584 1.78 oster Dprintf("RECON: end request suspend\n");
585 1.87 perry
586 1.78 oster /* allocate our RF_ReconCTRL_t before we protect raidPtr->reconControl[row] */
587 1.78 oster tmp_reconctrl = rf_MakeReconControl(reconDesc, col, scol);
588 1.87 perry
589 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
590 1.87 perry
591 1.78 oster /* create the reconstruction control pointer and install it in
592 1.78 oster * the right slot */
593 1.78 oster raidPtr->reconControl = tmp_reconctrl;
594 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
595 1.88 oster raidPtr->reconControl->numRUsTotal = mapPtr->totalRUs;
596 1.88 oster raidPtr->reconControl->numRUsComplete = 0;
597 1.78 oster raidPtr->status = rf_rs_reconstructing;
598 1.78 oster raidPtr->Disks[col].status = rf_ds_reconstructing;
599 1.78 oster raidPtr->Disks[col].spareCol = scol;
600 1.87 perry
601 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
602 1.87 perry
603 1.78 oster RF_GETTIME(raidPtr->reconControl->starttime);
604 1.87 perry
605 1.78 oster Dprintf("RECON: resume requests\n");
606 1.78 oster rf_ResumeNewRequests(raidPtr);
607 1.87 perry
608 1.4 oster
609 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
610 1.104 oster
611 1.104 oster incPSID = RF_RECONMAP_SIZE;
612 1.104 oster lastPSID = raidPtr->Layout.numStripe / raidPtr->Layout.SUsPerPU;
613 1.104 oster RUsPerPU = raidPtr->Layout.SUsPerPU / raidPtr->Layout.SUsPerRU;
614 1.82 oster recon_error = 0;
615 1.82 oster write_error = 0;
616 1.104 oster pending_writes = incPSID;
617 1.118 oster raidPtr->reconControl->lastPSID = incPSID - 1;
618 1.118 oster
619 1.118 oster /* bounds check raidPtr->reconControl->lastPSID and
620 1.118 oster pending_writes so that we don't attempt to wait for more IO
621 1.118 oster than can possibly happen */
622 1.118 oster
623 1.118 oster if (raidPtr->reconControl->lastPSID > lastPSID)
624 1.118 oster raidPtr->reconControl->lastPSID = lastPSID;
625 1.118 oster
626 1.118 oster if (pending_writes > lastPSID)
627 1.118 oster pending_writes = lastPSID;
628 1.104 oster
629 1.104 oster /* start the actual reconstruction */
630 1.82 oster
631 1.104 oster done = 0;
632 1.104 oster while (!done) {
633 1.104 oster
634 1.106 oster if (raidPtr->waitShutdown) {
635 1.106 oster /* someone is unconfiguring this array... bail on the reconstruct.. */
636 1.106 oster recon_error = 1;
637 1.106 oster break;
638 1.106 oster }
639 1.106 oster
640 1.104 oster num_writes = 0;
641 1.118 oster
642 1.118 oster #if RF_INCLUDE_RAID5_RS > 0
643 1.118 oster /* For RAID5 with Rotated Spares we will be 'short'
644 1.118 oster some number of writes since no writes will get
645 1.118 oster issued for stripes where the spare is on the
646 1.118 oster component being rebuilt. Account for the shortage
647 1.118 oster here so that we don't hang indefinitely below
648 1.118 oster waiting for writes to complete that were never
649 1.118 oster scheduled.
650 1.118 oster
651 1.118 oster XXX: Should be fixed for PARITY_DECLUSTERING and
652 1.118 oster others too!
653 1.118 oster
654 1.118 oster */
655 1.118 oster
656 1.118 oster if (raidPtr->Layout.numDataCol <
657 1.118 oster raidPtr->numCol - raidPtr->Layout.numParityCol) {
658 1.118 oster /* numDataCol is at least 2 less than numCol, so
659 1.118 oster should be RAID 5 with Rotated Spares */
660 1.118 oster
661 1.118 oster /* XXX need to update for RAID 6 */
662 1.118 oster
663 1.118 oster startPSID = raidPtr->reconControl->lastPSID - pending_writes + 1;
664 1.118 oster endPSID = raidPtr->reconControl->lastPSID;
665 1.118 oster
666 1.118 oster offPSID = raidPtr->numCol - col - 1;
667 1.118 oster
668 1.118 oster aPSID = startPSID - startPSID % raidPtr->numCol + offPSID;
669 1.118 oster if (aPSID < startPSID) {
670 1.118 oster aPSID += raidPtr->numCol;
671 1.118 oster }
672 1.118 oster
673 1.118 oster bPSID = endPSID - ((endPSID - offPSID) % raidPtr->numCol);
674 1.118 oster
675 1.118 oster if (aPSID < endPSID) {
676 1.118 oster num_writes = ((bPSID - aPSID) / raidPtr->numCol) + 1;
677 1.118 oster }
678 1.118 oster
679 1.118 oster if ((aPSID == endPSID) && (bPSID == endPSID)) {
680 1.118 oster num_writes++;
681 1.118 oster }
682 1.118 oster }
683 1.118 oster #endif
684 1.104 oster
685 1.104 oster /* issue a read for each surviving disk */
686 1.104 oster
687 1.104 oster reconDesc->numDisksDone = 0;
688 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
689 1.104 oster if (i != col) {
690 1.104 oster /* find and issue the next I/O on the
691 1.104 oster * indicated disk */
692 1.104 oster if (IssueNextReadRequest(raidPtr, i)) {
693 1.104 oster Dprintf1("RECON: done issuing for c%d\n", i);
694 1.104 oster reconDesc->numDisksDone++;
695 1.104 oster }
696 1.104 oster }
697 1.104 oster }
698 1.87 perry
699 1.104 oster /* process reconstruction events until all disks report that
700 1.104 oster * they've completed all work */
701 1.82 oster
702 1.104 oster while (reconDesc->numDisksDone < raidPtr->numCol - 1) {
703 1.82 oster
704 1.104 oster event = rf_GetNextReconEvent(reconDesc);
705 1.104 oster status = ProcessReconEvent(raidPtr, event);
706 1.104 oster
707 1.104 oster /* the normal case is that a read completes, and all is well. */
708 1.104 oster if (status == RF_RECON_DONE_READS) {
709 1.104 oster reconDesc->numDisksDone++;
710 1.104 oster } else if ((status == RF_RECON_READ_ERROR) ||
711 1.104 oster (status == RF_RECON_WRITE_ERROR)) {
712 1.104 oster /* an error was encountered while reconstructing...
713 1.104 oster Pretend we've finished this disk.
714 1.104 oster */
715 1.104 oster recon_error = 1;
716 1.104 oster raidPtr->reconControl->error = 1;
717 1.104 oster
718 1.104 oster /* bump the numDisksDone count for reads,
719 1.104 oster but not for writes */
720 1.104 oster if (status == RF_RECON_READ_ERROR)
721 1.104 oster reconDesc->numDisksDone++;
722 1.104 oster
723 1.104 oster /* write errors are special -- when we are
724 1.104 oster done dealing with the reads that are
725 1.104 oster finished, we don't want to wait for any
726 1.104 oster writes */
727 1.107 oster if (status == RF_RECON_WRITE_ERROR) {
728 1.104 oster write_error = 1;
729 1.107 oster num_writes++;
730 1.107 oster }
731 1.104 oster
732 1.104 oster } else if (status == RF_RECON_READ_STOPPED) {
733 1.104 oster /* count this component as being "done" */
734 1.82 oster reconDesc->numDisksDone++;
735 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
736 1.104 oster num_writes++;
737 1.104 oster }
738 1.104 oster
739 1.104 oster if (recon_error) {
740 1.104 oster /* make sure any stragglers are woken up so that
741 1.104 oster their theads will complete, and we can get out
742 1.104 oster of here with all IO processed */
743 1.104 oster
744 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
745 1.104 oster }
746 1.104 oster
747 1.104 oster raidPtr->reconControl->numRUsTotal =
748 1.104 oster mapPtr->totalRUs;
749 1.104 oster raidPtr->reconControl->numRUsComplete =
750 1.104 oster mapPtr->totalRUs -
751 1.104 oster rf_UnitsLeftToReconstruct(mapPtr);
752 1.82 oster
753 1.104 oster #if RF_DEBUG_RECON
754 1.104 oster raidPtr->reconControl->percentComplete =
755 1.104 oster (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
756 1.104 oster if (rf_prReconSched) {
757 1.104 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
758 1.82 oster }
759 1.104 oster #endif
760 1.82 oster }
761 1.82 oster
762 1.118 oster /* reads done, wakeup any waiters, and then wait for writes */
763 1.82 oster
764 1.104 oster rf_WakeupHeadSepCBWaiters(raidPtr);
765 1.104 oster
766 1.104 oster while (!recon_error && (num_writes < pending_writes)) {
767 1.104 oster event = rf_GetNextReconEvent(reconDesc);
768 1.104 oster status = ProcessReconEvent(raidPtr, event);
769 1.104 oster
770 1.104 oster if (status == RF_RECON_WRITE_ERROR) {
771 1.107 oster num_writes++;
772 1.104 oster recon_error = 1;
773 1.104 oster raidPtr->reconControl->error = 1;
774 1.104 oster /* an error was encountered at the very end... bail */
775 1.104 oster } else if (status == RF_RECON_WRITE_DONE) {
776 1.104 oster num_writes++;
777 1.107 oster } /* else it's something else, and we don't care */
778 1.104 oster }
779 1.104 oster if (recon_error ||
780 1.104 oster (raidPtr->reconControl->lastPSID == lastPSID)) {
781 1.104 oster done = 1;
782 1.104 oster break;
783 1.104 oster }
784 1.104 oster
785 1.104 oster prev = raidPtr->reconControl->lastPSID;
786 1.104 oster raidPtr->reconControl->lastPSID += incPSID;
787 1.104 oster
788 1.104 oster if (raidPtr->reconControl->lastPSID > lastPSID) {
789 1.104 oster pending_writes = lastPSID - prev;
790 1.104 oster raidPtr->reconControl->lastPSID = lastPSID;
791 1.104 oster }
792 1.104 oster
793 1.104 oster /* back down curPSID to get ready for the next round... */
794 1.104 oster for (i = 0; i < raidPtr->numCol; i++) {
795 1.104 oster if (i != col) {
796 1.104 oster raidPtr->reconControl->perDiskInfo[i].curPSID--;
797 1.104 oster raidPtr->reconControl->perDiskInfo[i].ru_count = RUsPerPU - 1;
798 1.104 oster }
799 1.78 oster }
800 1.78 oster }
801 1.87 perry
802 1.78 oster mapPtr = raidPtr->reconControl->reconMap;
803 1.78 oster if (rf_reconDebug) {
804 1.78 oster printf("RECON: all reads completed\n");
805 1.78 oster }
806 1.78 oster /* at this point all the reads have completed. We now wait
807 1.78 oster * for any pending writes to complete, and then we're done */
808 1.82 oster
809 1.82 oster while (!recon_error && rf_UnitsLeftToReconstruct(raidPtr->reconControl->reconMap) > 0) {
810 1.87 perry
811 1.78 oster event = rf_GetNextReconEvent(reconDesc);
812 1.83 oster status = ProcessReconEvent(raidPtr, event);
813 1.82 oster
814 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
815 1.82 oster recon_error = 1;
816 1.87 perry raidPtr->reconControl->error = 1;
817 1.82 oster /* an error was encountered at the very end... bail */
818 1.82 oster } else {
819 1.82 oster #if RF_DEBUG_RECON
820 1.82 oster raidPtr->reconControl->percentComplete = 100 - (rf_UnitsLeftToReconstruct(mapPtr) * 100 / mapPtr->totalRUs);
821 1.82 oster if (rf_prReconSched) {
822 1.82 oster rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
823 1.82 oster }
824 1.82 oster #endif
825 1.82 oster }
826 1.82 oster }
827 1.82 oster
828 1.82 oster if (recon_error) {
829 1.82 oster /* we've encountered an error in reconstructing. */
830 1.82 oster printf("raid%d: reconstruction failed.\n", raidPtr->raidid);
831 1.87 perry
832 1.82 oster /* we start by blocking IO to the RAID set. */
833 1.82 oster rf_SuspendNewRequestsAndWait(raidPtr);
834 1.87 perry
835 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
836 1.82 oster /* mark set as being degraded, rather than
837 1.82 oster rf_rs_reconstructing as we were before the problem.
838 1.82 oster After this is done we can update status of the
839 1.82 oster component disks without worrying about someone
840 1.82 oster trying to read from a failed component.
841 1.82 oster */
842 1.82 oster raidPtr->status = rf_rs_degraded;
843 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
844 1.87 perry
845 1.82 oster /* resume IO */
846 1.87 perry rf_ResumeNewRequests(raidPtr);
847 1.87 perry
848 1.82 oster /* At this point there are two cases:
849 1.82 oster 1) If we've experienced a read error, then we've
850 1.82 oster already waited for all the reads we're going to get,
851 1.82 oster and we just need to wait for the writes.
852 1.82 oster
853 1.82 oster 2) If we've experienced a write error, we've also
854 1.82 oster already waited for all the reads to complete,
855 1.82 oster but there is little point in waiting for the writes --
856 1.82 oster when they do complete, they will just be ignored.
857 1.82 oster
858 1.87 perry So we just wait for writes to complete if we didn't have a
859 1.82 oster write error.
860 1.82 oster */
861 1.82 oster
862 1.82 oster if (!write_error) {
863 1.82 oster /* wait for writes to complete */
864 1.82 oster while (raidPtr->reconControl->pending_writes > 0) {
865 1.83 oster
866 1.82 oster event = rf_GetNextReconEvent(reconDesc);
867 1.82 oster status = ProcessReconEvent(raidPtr, event);
868 1.82 oster
869 1.82 oster if (status == RF_RECON_WRITE_ERROR) {
870 1.87 perry raidPtr->reconControl->error = 1;
871 1.82 oster /* an error was encountered at the very end... bail.
872 1.82 oster This will be very bad news for the user, since
873 1.82 oster at this point there will have been a read error
874 1.82 oster on one component, and a write error on another!
875 1.82 oster */
876 1.82 oster break;
877 1.82 oster }
878 1.82 oster }
879 1.4 oster }
880 1.82 oster
881 1.87 perry
882 1.82 oster /* cleanup */
883 1.82 oster
884 1.82 oster /* drain the event queue - after waiting for the writes above,
885 1.82 oster there shouldn't be much (if anything!) left in the queue. */
886 1.82 oster
887 1.82 oster rf_DrainReconEventQueue(reconDesc);
888 1.87 perry
889 1.82 oster /* XXX As much as we'd like to free the recon control structure
890 1.82 oster and the reconDesc, we have no way of knowing if/when those will
891 1.82 oster be touched by IO that has yet to occur. It is rather poor to be
892 1.82 oster basically causing a 'memory leak' here, but there doesn't seem to be
893 1.82 oster a cleaner alternative at this time. Perhaps when the reconstruct code
894 1.82 oster gets a makeover this problem will go away.
895 1.82 oster */
896 1.82 oster #if 0
897 1.82 oster rf_FreeReconControl(raidPtr);
898 1.82 oster #endif
899 1.82 oster
900 1.82 oster #if RF_ACC_TRACE > 0
901 1.82 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
902 1.41 oster #endif
903 1.82 oster /* XXX see comment above */
904 1.82 oster #if 0
905 1.82 oster FreeReconDesc(reconDesc);
906 1.82 oster #endif
907 1.82 oster
908 1.82 oster return (1);
909 1.78 oster }
910 1.14 oster
911 1.78 oster /* Success: mark the dead disk as reconstructed. We quiesce
912 1.78 oster * the array here to assure no nasty interactions with pending
913 1.78 oster * user accesses when we free up the psstatus structure as
914 1.78 oster * part of FreeReconControl() */
915 1.87 perry
916 1.78 oster rf_SuspendNewRequestsAndWait(raidPtr);
917 1.87 perry
918 1.113 mrg rf_lock_mutex2(raidPtr->mutex);
919 1.78 oster raidPtr->numFailures--;
920 1.78 oster ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
921 1.78 oster raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared;
922 1.78 oster raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal;
923 1.113 mrg rf_unlock_mutex2(raidPtr->mutex);
924 1.78 oster RF_GETTIME(etime);
925 1.78 oster RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd);
926 1.87 perry
927 1.78 oster rf_ResumeNewRequests(raidPtr);
928 1.87 perry
929 1.87 perry printf("raid%d: Reconstruction of disk at col %d completed\n",
930 1.78 oster raidPtr->raidid, col);
931 1.78 oster xor_s = raidPtr->accumXorTimeUs / 1000000;
932 1.78 oster xor_resid_us = raidPtr->accumXorTimeUs % 1000000;
933 1.78 oster printf("raid%d: Recon time was %d.%06d seconds, accumulated XOR time was %ld us (%ld.%06ld)\n",
934 1.87 perry raidPtr->raidid,
935 1.87 perry (int) elpsd.tv_sec, (int) elpsd.tv_usec,
936 1.78 oster raidPtr->accumXorTimeUs, xor_s, xor_resid_us);
937 1.78 oster printf("raid%d: (start time %d sec %d usec, end time %d sec %d usec)\n",
938 1.78 oster raidPtr->raidid,
939 1.78 oster (int) raidPtr->reconControl->starttime.tv_sec,
940 1.78 oster (int) raidPtr->reconControl->starttime.tv_usec,
941 1.78 oster (int) etime.tv_sec, (int) etime.tv_usec);
942 1.1 oster #if RF_RECON_STATS > 0
943 1.78 oster printf("raid%d: Total head-sep stall count was %d\n",
944 1.78 oster raidPtr->raidid, (int) reconDesc->hsStallCount);
945 1.4 oster #endif /* RF_RECON_STATS > 0 */
946 1.78 oster rf_FreeReconControl(raidPtr);
947 1.67 oster #if RF_ACC_TRACE > 0
948 1.78 oster RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
949 1.67 oster #endif
950 1.78 oster FreeReconDesc(reconDesc);
951 1.87 perry
952 1.4 oster return (0);
953 1.82 oster
954 1.1 oster }
955 1.13 oster /*****************************************************************************
956 1.1 oster * do the right thing upon each reconstruction event.
957 1.13 oster *****************************************************************************/
958 1.87 perry static int
959 1.60 oster ProcessReconEvent(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
960 1.4 oster {
961 1.4 oster int retcode = 0, submitblocked;
962 1.4 oster RF_ReconBuffer_t *rbuf;
963 1.4 oster RF_SectorCount_t sectorsPerRU;
964 1.4 oster
965 1.82 oster retcode = RF_RECON_READ_STOPPED;
966 1.82 oster
967 1.4 oster Dprintf1("RECON: ProcessReconEvent type %d\n", event->type);
968 1.104 oster
969 1.4 oster switch (event->type) {
970 1.4 oster
971 1.4 oster /* a read I/O has completed */
972 1.4 oster case RF_REVENT_READDONE:
973 1.57 oster rbuf = raidPtr->reconControl->perDiskInfo[event->col].rbuf;
974 1.57 oster Dprintf2("RECON: READDONE EVENT: col %d psid %ld\n",
975 1.57 oster event->col, rbuf->parityStripeID);
976 1.4 oster Dprintf7("RECON: done read psid %ld buf %lx %02x %02x %02x %02x %02x\n",
977 1.4 oster rbuf->parityStripeID, rbuf->buffer, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
978 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
979 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
980 1.82 oster if (!raidPtr->reconControl->error) {
981 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 0, 0);
982 1.82 oster Dprintf1("RECON: submitblocked=%d\n", submitblocked);
983 1.82 oster if (!submitblocked)
984 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
985 1.89 oster else
986 1.89 oster retcode = 0;
987 1.82 oster }
988 1.4 oster break;
989 1.4 oster
990 1.4 oster /* a write I/O has completed */
991 1.4 oster case RF_REVENT_WRITEDONE:
992 1.40 oster #if RF_DEBUG_RECON
993 1.4 oster if (rf_floatingRbufDebug) {
994 1.4 oster rf_CheckFloatingRbufCount(raidPtr, 1);
995 1.4 oster }
996 1.38 oster #endif
997 1.4 oster sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
998 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
999 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1000 1.4 oster Dprintf3("RECON: WRITEDONE EVENT: psid %d ru %d (%d %% complete)\n",
1001 1.57 oster rbuf->parityStripeID, rbuf->which_ru, raidPtr->reconControl->percentComplete);
1002 1.57 oster rf_ReconMapUpdate(raidPtr, raidPtr->reconControl->reconMap,
1003 1.4 oster rbuf->failedDiskSectorOffset, rbuf->failedDiskSectorOffset + sectorsPerRU - 1);
1004 1.57 oster rf_RemoveFromActiveReconTable(raidPtr, rbuf->parityStripeID, rbuf->which_ru);
1005 1.4 oster
1006 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1007 1.82 oster raidPtr->reconControl->pending_writes--;
1008 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1009 1.82 oster
1010 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FLOATING) {
1011 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1012 1.76 oster while(raidPtr->reconControl->rb_lock) {
1013 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1014 1.112 mrg raidPtr->reconControl->rb_mutex);
1015 1.76 oster }
1016 1.76 oster raidPtr->reconControl->rb_lock = 1;
1017 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1018 1.76 oster
1019 1.4 oster raidPtr->numFullReconBuffers--;
1020 1.57 oster rf_ReleaseFloatingReconBuffer(raidPtr, rbuf);
1021 1.76 oster
1022 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1023 1.76 oster raidPtr->reconControl->rb_lock = 0;
1024 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1025 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1026 1.4 oster } else
1027 1.4 oster if (rbuf->type == RF_RBUF_TYPE_FORCED)
1028 1.4 oster rf_FreeReconBuffer(rbuf);
1029 1.4 oster else
1030 1.4 oster RF_ASSERT(0);
1031 1.104 oster retcode = RF_RECON_WRITE_DONE;
1032 1.4 oster break;
1033 1.4 oster
1034 1.4 oster case RF_REVENT_BUFCLEAR: /* A buffer-stall condition has been
1035 1.4 oster * cleared */
1036 1.57 oster Dprintf1("RECON: BUFCLEAR EVENT: col %d\n", event->col);
1037 1.82 oster if (!raidPtr->reconControl->error) {
1038 1.87 perry submitblocked = rf_SubmitReconBuffer(raidPtr->reconControl->perDiskInfo[event->col].rbuf,
1039 1.82 oster 0, (int) (long) event->arg);
1040 1.82 oster RF_ASSERT(!submitblocked); /* we wouldn't have gotten the
1041 1.82 oster * BUFCLEAR event if we
1042 1.82 oster * couldn't submit */
1043 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1044 1.82 oster }
1045 1.4 oster break;
1046 1.4 oster
1047 1.4 oster case RF_REVENT_BLOCKCLEAR: /* A user-write reconstruction
1048 1.4 oster * blockage has been cleared */
1049 1.57 oster DDprintf1("RECON: BLOCKCLEAR EVENT: col %d\n", event->col);
1050 1.82 oster if (!raidPtr->reconControl->error) {
1051 1.82 oster retcode = TryToRead(raidPtr, event->col);
1052 1.82 oster }
1053 1.4 oster break;
1054 1.4 oster
1055 1.4 oster case RF_REVENT_HEADSEPCLEAR: /* A max-head-separation
1056 1.4 oster * reconstruction blockage has been
1057 1.4 oster * cleared */
1058 1.57 oster Dprintf1("RECON: HEADSEPCLEAR EVENT: col %d\n", event->col);
1059 1.82 oster if (!raidPtr->reconControl->error) {
1060 1.82 oster retcode = TryToRead(raidPtr, event->col);
1061 1.82 oster }
1062 1.4 oster break;
1063 1.4 oster
1064 1.4 oster /* a buffer has become ready to write */
1065 1.4 oster case RF_REVENT_BUFREADY:
1066 1.57 oster Dprintf1("RECON: BUFREADY EVENT: col %d\n", event->col);
1067 1.82 oster if (!raidPtr->reconControl->error) {
1068 1.82 oster retcode = IssueNextWriteRequest(raidPtr);
1069 1.40 oster #if RF_DEBUG_RECON
1070 1.82 oster if (rf_floatingRbufDebug) {
1071 1.82 oster rf_CheckFloatingRbufCount(raidPtr, 1);
1072 1.82 oster }
1073 1.82 oster #endif
1074 1.4 oster }
1075 1.4 oster break;
1076 1.4 oster
1077 1.4 oster /* we need to skip the current RU entirely because it got
1078 1.4 oster * recon'd while we were waiting for something else to happen */
1079 1.4 oster case RF_REVENT_SKIP:
1080 1.57 oster DDprintf1("RECON: SKIP EVENT: col %d\n", event->col);
1081 1.87 perry if (!raidPtr->reconControl->error) {
1082 1.82 oster retcode = IssueNextReadRequest(raidPtr, event->col);
1083 1.82 oster }
1084 1.4 oster break;
1085 1.4 oster
1086 1.4 oster /* a forced-reconstruction read access has completed. Just
1087 1.4 oster * submit the buffer */
1088 1.4 oster case RF_REVENT_FORCEDREADDONE:
1089 1.4 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1090 1.4 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1091 1.57 oster DDprintf1("RECON: FORCEDREADDONE EVENT: col %d\n", event->col);
1092 1.82 oster if (!raidPtr->reconControl->error) {
1093 1.82 oster submitblocked = rf_SubmitReconBuffer(rbuf, 1, 0);
1094 1.82 oster RF_ASSERT(!submitblocked);
1095 1.103 oster retcode = 0;
1096 1.82 oster }
1097 1.4 oster break;
1098 1.4 oster
1099 1.70 oster /* A read I/O failed to complete */
1100 1.70 oster case RF_REVENT_READ_FAILED:
1101 1.82 oster retcode = RF_RECON_READ_ERROR;
1102 1.82 oster break;
1103 1.70 oster
1104 1.70 oster /* A write I/O failed to complete */
1105 1.70 oster case RF_REVENT_WRITE_FAILED:
1106 1.82 oster retcode = RF_RECON_WRITE_ERROR;
1107 1.82 oster
1108 1.107 oster /* This is an error, but it was a pending write.
1109 1.107 oster Account for it. */
1110 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1111 1.107 oster raidPtr->reconControl->pending_writes--;
1112 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1113 1.107 oster
1114 1.82 oster rbuf = (RF_ReconBuffer_t *) event->arg;
1115 1.82 oster
1116 1.82 oster /* cleanup the disk queue data */
1117 1.82 oster rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1118 1.82 oster
1119 1.82 oster /* At this point we're erroring out, badly, and floatingRbufs
1120 1.82 oster may not even be valid. Rather than putting this back onto
1121 1.82 oster the floatingRbufs list, just arrange for its immediate
1122 1.82 oster destruction.
1123 1.82 oster */
1124 1.82 oster rf_FreeReconBuffer(rbuf);
1125 1.82 oster break;
1126 1.70 oster
1127 1.70 oster /* a forced read I/O failed to complete */
1128 1.70 oster case RF_REVENT_FORCEDREAD_FAILED:
1129 1.82 oster retcode = RF_RECON_READ_ERROR;
1130 1.82 oster break;
1131 1.70 oster
1132 1.4 oster default:
1133 1.4 oster RF_PANIC();
1134 1.4 oster }
1135 1.4 oster rf_FreeReconEventDesc(event);
1136 1.4 oster return (retcode);
1137 1.1 oster }
1138 1.13 oster /*****************************************************************************
1139 1.1 oster *
1140 1.13 oster * find the next thing that's needed on the indicated disk, and issue
1141 1.13 oster * a read request for it. We assume that the reconstruction buffer
1142 1.13 oster * associated with this process is free to receive the data. If
1143 1.13 oster * reconstruction is blocked on the indicated RU, we issue a
1144 1.13 oster * blockage-release request instead of a physical disk read request.
1145 1.13 oster * If the current disk gets too far ahead of the others, we issue a
1146 1.13 oster * head-separation wait request and return.
1147 1.13 oster *
1148 1.13 oster * ctrl->{ru_count, curPSID, diskOffset} and
1149 1.22 soren * rbuf->failedDiskSectorOffset are maintained to point to the unit
1150 1.13 oster * we're currently accessing. Note that this deviates from the
1151 1.13 oster * standard C idiom of having counters point to the next thing to be
1152 1.13 oster * accessed. This allows us to easily retry when we're blocked by
1153 1.13 oster * head separation or reconstruction-blockage events.
1154 1.1 oster *
1155 1.13 oster *****************************************************************************/
1156 1.87 perry static int
1157 1.60 oster IssueNextReadRequest(RF_Raid_t *raidPtr, RF_RowCol_t col)
1158 1.4 oster {
1159 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1160 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1161 1.4 oster RF_ReconBuffer_t *rbuf = ctrl->rbuf;
1162 1.4 oster RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
1163 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1164 1.4 oster int do_new_check = 0, retcode = 0, status;
1165 1.4 oster
1166 1.4 oster /* if we are currently the slowest disk, mark that we have to do a new
1167 1.4 oster * check */
1168 1.57 oster if (ctrl->headSepCounter <= raidPtr->reconControl->minHeadSepCounter)
1169 1.4 oster do_new_check = 1;
1170 1.4 oster
1171 1.4 oster while (1) {
1172 1.4 oster
1173 1.4 oster ctrl->ru_count++;
1174 1.4 oster if (ctrl->ru_count < RUsPerPU) {
1175 1.4 oster ctrl->diskOffset += sectorsPerRU;
1176 1.4 oster rbuf->failedDiskSectorOffset += sectorsPerRU;
1177 1.4 oster } else {
1178 1.4 oster ctrl->curPSID++;
1179 1.4 oster ctrl->ru_count = 0;
1180 1.4 oster /* code left over from when head-sep was based on
1181 1.4 oster * parity stripe id */
1182 1.118 oster if (ctrl->curPSID > raidPtr->reconControl->lastPSID) {
1183 1.57 oster CheckForNewMinHeadSep(raidPtr, ++(ctrl->headSepCounter));
1184 1.82 oster return (RF_RECON_DONE_READS); /* finito! */
1185 1.4 oster }
1186 1.4 oster /* find the disk offsets of the start of the parity
1187 1.4 oster * stripe on both the current disk and the failed
1188 1.4 oster * disk. skip this entire parity stripe if either disk
1189 1.4 oster * does not appear in the indicated PS */
1190 1.57 oster status = ComputePSDiskOffsets(raidPtr, ctrl->curPSID, col, &ctrl->diskOffset, &rbuf->failedDiskSectorOffset,
1191 1.57 oster &rbuf->spCol, &rbuf->spOffset);
1192 1.4 oster if (status) {
1193 1.4 oster ctrl->ru_count = RUsPerPU - 1;
1194 1.4 oster continue;
1195 1.4 oster }
1196 1.4 oster }
1197 1.4 oster rbuf->which_ru = ctrl->ru_count;
1198 1.4 oster
1199 1.4 oster /* skip this RU if it's already been reconstructed */
1200 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, rbuf->failedDiskSectorOffset)) {
1201 1.4 oster Dprintf2("Skipping psid %ld ru %d: already reconstructed\n", ctrl->curPSID, ctrl->ru_count);
1202 1.4 oster continue;
1203 1.4 oster }
1204 1.4 oster break;
1205 1.4 oster }
1206 1.4 oster ctrl->headSepCounter++;
1207 1.4 oster if (do_new_check)
1208 1.57 oster CheckForNewMinHeadSep(raidPtr, ctrl->headSepCounter); /* update min if needed */
1209 1.4 oster
1210 1.4 oster
1211 1.4 oster /* at this point, we have definitely decided what to do, and we have
1212 1.4 oster * only to see if we can actually do it now */
1213 1.4 oster rbuf->parityStripeID = ctrl->curPSID;
1214 1.4 oster rbuf->which_ru = ctrl->ru_count;
1215 1.67 oster #if RF_ACC_TRACE > 0
1216 1.29 thorpej memset((char *) &raidPtr->recon_tracerecs[col], 0,
1217 1.29 thorpej sizeof(raidPtr->recon_tracerecs[col]));
1218 1.4 oster raidPtr->recon_tracerecs[col].reconacc = 1;
1219 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1220 1.67 oster #endif
1221 1.57 oster retcode = TryToRead(raidPtr, col);
1222 1.4 oster return (retcode);
1223 1.1 oster }
1224 1.13 oster
1225 1.13 oster /*
1226 1.13 oster * tries to issue the next read on the indicated disk. We may be
1227 1.13 oster * blocked by (a) the heads being too far apart, or (b) recon on the
1228 1.13 oster * indicated RU being blocked due to a write by a user thread. In
1229 1.13 oster * this case, we issue a head-sep or blockage wait request, which will
1230 1.13 oster * cause this same routine to be invoked again later when the blockage
1231 1.87 perry * has cleared.
1232 1.1 oster */
1233 1.13 oster
1234 1.87 perry static int
1235 1.60 oster TryToRead(RF_Raid_t *raidPtr, RF_RowCol_t col)
1236 1.4 oster {
1237 1.57 oster RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1238 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1239 1.4 oster RF_StripeNum_t psid = ctrl->curPSID;
1240 1.4 oster RF_ReconUnitNum_t which_ru = ctrl->ru_count;
1241 1.4 oster RF_DiskQueueData_t *req;
1242 1.68 oster int status;
1243 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;
1244 1.4 oster
1245 1.4 oster /* if the current disk is too far ahead of the others, issue a
1246 1.4 oster * head-separation wait and return */
1247 1.57 oster if (CheckHeadSeparation(raidPtr, ctrl, col, ctrl->headSepCounter, which_ru))
1248 1.4 oster return (0);
1249 1.68 oster
1250 1.68 oster /* allocate a new PSS in case we need it */
1251 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1252 1.68 oster
1253 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1254 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE, newpssPtr);
1255 1.68 oster
1256 1.68 oster if (pssPtr != newpssPtr) {
1257 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1258 1.68 oster }
1259 1.4 oster
1260 1.4 oster /* if recon is blocked on the indicated parity stripe, issue a
1261 1.4 oster * block-wait request and return. this also must mark the indicated RU
1262 1.4 oster * in the stripe as under reconstruction if not blocked. */
1263 1.57 oster status = CheckForcedOrBlockedReconstruction(raidPtr, pssPtr, ctrl, col, psid, which_ru);
1264 1.4 oster if (status == RF_PSS_RECON_BLOCKED) {
1265 1.4 oster Dprintf2("RECON: Stalling psid %ld ru %d: recon blocked\n", psid, which_ru);
1266 1.4 oster goto out;
1267 1.4 oster } else
1268 1.4 oster if (status == RF_PSS_FORCED_ON_WRITE) {
1269 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1270 1.4 oster goto out;
1271 1.4 oster }
1272 1.4 oster /* make one last check to be sure that the indicated RU didn't get
1273 1.4 oster * reconstructed while we were waiting for something else to happen.
1274 1.4 oster * This is unfortunate in that it causes us to make this check twice
1275 1.4 oster * in the normal case. Might want to make some attempt to re-work
1276 1.4 oster * this so that we only do this check if we've definitely blocked on
1277 1.4 oster * one of the above checks. When this condition is detected, we may
1278 1.4 oster * have just created a bogus status entry, which we need to delete. */
1279 1.57 oster if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, ctrl->rbuf->failedDiskSectorOffset)) {
1280 1.4 oster Dprintf2("RECON: Skipping psid %ld ru %d: prior recon after stall\n", psid, which_ru);
1281 1.68 oster if (pssPtr == newpssPtr)
1282 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1283 1.57 oster rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1284 1.4 oster goto out;
1285 1.4 oster }
1286 1.4 oster /* found something to read. issue the I/O */
1287 1.57 oster Dprintf4("RECON: Read for psid %ld on col %d offset %ld buf %lx\n",
1288 1.57 oster psid, col, ctrl->diskOffset, ctrl->rbuf->buffer);
1289 1.67 oster #if RF_ACC_TRACE > 0
1290 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[col].recon_timer);
1291 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[col].recon_timer);
1292 1.4 oster raidPtr->recon_tracerecs[col].specific.recon.recon_start_to_fetch_us =
1293 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[col].recon_timer);
1294 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1295 1.67 oster #endif
1296 1.4 oster /* should be ok to use a NULL proc pointer here, all the bufs we use
1297 1.4 oster * should be in kernel space */
1298 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, ctrl->diskOffset, sectorsPerRU, ctrl->rbuf->buffer, psid, which_ru,
1299 1.86 oster ReconReadDoneProc, (void *) ctrl,
1300 1.67 oster #if RF_ACC_TRACE > 0
1301 1.67 oster &raidPtr->recon_tracerecs[col],
1302 1.67 oster #else
1303 1.67 oster NULL,
1304 1.67 oster #endif
1305 1.85 oster (void *) raidPtr, 0, NULL, PR_WAITOK);
1306 1.4 oster
1307 1.4 oster ctrl->rbuf->arg = (void *) req;
1308 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[col], req, RF_IO_RECON_PRIORITY);
1309 1.4 oster pssPtr->issued[col] = 1;
1310 1.1 oster
1311 1.1 oster out:
1312 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1313 1.4 oster return (0);
1314 1.1 oster }
1315 1.1 oster
1316 1.1 oster
1317 1.13 oster /*
1318 1.13 oster * given a parity stripe ID, we want to find out whether both the
1319 1.13 oster * current disk and the failed disk exist in that parity stripe. If
1320 1.13 oster * not, we want to skip this whole PS. If so, we want to find the
1321 1.13 oster * disk offset of the start of the PS on both the current disk and the
1322 1.13 oster * failed disk.
1323 1.13 oster *
1324 1.13 oster * this works by getting a list of disks comprising the indicated
1325 1.13 oster * parity stripe, and searching the list for the current and failed
1326 1.13 oster * disks. Once we've decided they both exist in the parity stripe, we
1327 1.13 oster * need to decide whether each is data or parity, so that we'll know
1328 1.13 oster * which mapping function to call to get the corresponding disk
1329 1.1 oster * offsets.
1330 1.1 oster *
1331 1.13 oster * this is kind of unpleasant, but doing it this way allows the
1332 1.13 oster * reconstruction code to use parity stripe IDs rather than physical
1333 1.13 oster * disks address to march through the failed disk, which greatly
1334 1.13 oster * simplifies a lot of code, as well as eliminating the need for a
1335 1.13 oster * reverse-mapping function. I also think it will execute faster,
1336 1.13 oster * since the calls to the mapping module are kept to a minimum.
1337 1.1 oster *
1338 1.13 oster * ASSUMES THAT THE STRIPE IDENTIFIER IDENTIFIES THE DISKS COMPRISING
1339 1.87 perry * THE STRIPE IN THE CORRECT ORDER
1340 1.87 perry *
1341 1.60 oster * raidPtr - raid descriptor
1342 1.60 oster * psid - parity stripe identifier
1343 1.60 oster * col - column of disk to find the offsets for
1344 1.60 oster * spCol - out: col of spare unit for failed unit
1345 1.60 oster * spOffset - out: offset into disk containing spare unit
1346 1.60 oster *
1347 1.60 oster */
1348 1.13 oster
1349 1.13 oster
1350 1.87 perry static int
1351 1.60 oster ComputePSDiskOffsets(RF_Raid_t *raidPtr, RF_StripeNum_t psid,
1352 1.60 oster RF_RowCol_t col, RF_SectorNum_t *outDiskOffset,
1353 1.60 oster RF_SectorNum_t *outFailedDiskSectorOffset,
1354 1.60 oster RF_RowCol_t *spCol, RF_SectorNum_t *spOffset)
1355 1.60 oster {
1356 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1357 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1358 1.4 oster RF_RaidAddr_t sosRaidAddress; /* start-of-stripe */
1359 1.4 oster RF_RowCol_t *diskids;
1360 1.4 oster u_int i, j, k, i_offset, j_offset;
1361 1.57 oster RF_RowCol_t pcol;
1362 1.57 oster int testcol;
1363 1.4 oster RF_SectorNum_t poffset;
1364 1.4 oster char i_is_parity = 0, j_is_parity = 0;
1365 1.4 oster RF_RowCol_t stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
1366 1.4 oster
1367 1.4 oster /* get a listing of the disks comprising that stripe */
1368 1.4 oster sosRaidAddress = rf_ParityStripeIDToRaidAddress(layoutPtr, psid);
1369 1.57 oster (layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids);
1370 1.4 oster RF_ASSERT(diskids);
1371 1.4 oster
1372 1.4 oster /* reject this entire parity stripe if it does not contain the
1373 1.4 oster * indicated disk or it does not contain the failed disk */
1374 1.57 oster
1375 1.4 oster for (i = 0; i < stripeWidth; i++) {
1376 1.4 oster if (col == diskids[i])
1377 1.4 oster break;
1378 1.4 oster }
1379 1.4 oster if (i == stripeWidth)
1380 1.4 oster goto skipit;
1381 1.4 oster for (j = 0; j < stripeWidth; j++) {
1382 1.4 oster if (fcol == diskids[j])
1383 1.4 oster break;
1384 1.4 oster }
1385 1.4 oster if (j == stripeWidth) {
1386 1.4 oster goto skipit;
1387 1.4 oster }
1388 1.4 oster /* find out which disk the parity is on */
1389 1.57 oster (layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &pcol, &poffset, RF_DONT_REMAP);
1390 1.4 oster
1391 1.4 oster /* find out if either the current RU or the failed RU is parity */
1392 1.4 oster /* also, if the parity occurs in this stripe prior to the data and/or
1393 1.4 oster * failed col, we need to decrement i and/or j */
1394 1.4 oster for (k = 0; k < stripeWidth; k++)
1395 1.4 oster if (diskids[k] == pcol)
1396 1.4 oster break;
1397 1.4 oster RF_ASSERT(k < stripeWidth);
1398 1.4 oster i_offset = i;
1399 1.4 oster j_offset = j;
1400 1.4 oster if (k < i)
1401 1.4 oster i_offset--;
1402 1.4 oster else
1403 1.4 oster if (k == i) {
1404 1.4 oster i_is_parity = 1;
1405 1.4 oster i_offset = 0;
1406 1.4 oster } /* set offsets to zero to disable multiply
1407 1.4 oster * below */
1408 1.4 oster if (k < j)
1409 1.4 oster j_offset--;
1410 1.4 oster else
1411 1.4 oster if (k == j) {
1412 1.4 oster j_is_parity = 1;
1413 1.4 oster j_offset = 0;
1414 1.4 oster }
1415 1.4 oster /* at this point, [ij]_is_parity tells us whether the [current,failed]
1416 1.4 oster * disk is parity at the start of this RU, and, if data, "[ij]_offset"
1417 1.4 oster * tells us how far into the stripe the [current,failed] disk is. */
1418 1.4 oster
1419 1.4 oster /* call the mapping routine to get the offset into the current disk,
1420 1.4 oster * repeat for failed disk. */
1421 1.4 oster if (i_is_parity)
1422 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1423 1.4 oster else
1424 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1425 1.4 oster
1426 1.57 oster RF_ASSERT(col == testcol);
1427 1.4 oster
1428 1.4 oster if (j_is_parity)
1429 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1430 1.4 oster else
1431 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1432 1.57 oster RF_ASSERT(fcol == testcol);
1433 1.4 oster
1434 1.4 oster /* now locate the spare unit for the failed unit */
1435 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1436 1.4 oster if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
1437 1.4 oster if (j_is_parity)
1438 1.57 oster layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1439 1.4 oster else
1440 1.57 oster layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1441 1.4 oster } else {
1442 1.72 oster #endif
1443 1.57 oster *spCol = raidPtr->reconControl->spareCol;
1444 1.4 oster *spOffset = *outFailedDiskSectorOffset;
1445 1.72 oster #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1446 1.4 oster }
1447 1.72 oster #endif
1448 1.4 oster return (0);
1449 1.1 oster
1450 1.1 oster skipit:
1451 1.99 oster Dprintf2("RECON: Skipping psid %ld: nothing needed from c%d\n",
1452 1.57 oster psid, col);
1453 1.4 oster return (1);
1454 1.1 oster }
1455 1.4 oster /* this is called when a buffer has become ready to write to the replacement disk */
1456 1.87 perry static int
1457 1.60 oster IssueNextWriteRequest(RF_Raid_t *raidPtr)
1458 1.4 oster {
1459 1.4 oster RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1460 1.4 oster RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1461 1.67 oster #if RF_ACC_TRACE > 0
1462 1.57 oster RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1463 1.67 oster #endif
1464 1.4 oster RF_ReconBuffer_t *rbuf;
1465 1.4 oster RF_DiskQueueData_t *req;
1466 1.4 oster
1467 1.57 oster rbuf = rf_GetFullReconBuffer(raidPtr->reconControl);
1468 1.4 oster RF_ASSERT(rbuf); /* there must be one available, or we wouldn't
1469 1.4 oster * have gotten the event that sent us here */
1470 1.4 oster RF_ASSERT(rbuf->pssPtr);
1471 1.4 oster
1472 1.4 oster rbuf->pssPtr->writeRbuf = rbuf;
1473 1.4 oster rbuf->pssPtr = NULL;
1474 1.4 oster
1475 1.57 oster Dprintf6("RECON: New write (c %d offs %d) for psid %ld ru %d (failed disk offset %ld) buf %lx\n",
1476 1.57 oster rbuf->spCol, rbuf->spOffset, rbuf->parityStripeID,
1477 1.4 oster rbuf->which_ru, rbuf->failedDiskSectorOffset, rbuf->buffer);
1478 1.4 oster Dprintf6("RECON: new write psid %ld %02x %02x %02x %02x %02x\n",
1479 1.4 oster rbuf->parityStripeID, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
1480 1.4 oster rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
1481 1.4 oster
1482 1.4 oster /* should be ok to use a NULL b_proc here b/c all addrs should be in
1483 1.4 oster * kernel space */
1484 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_WRITE, rbuf->spOffset,
1485 1.4 oster sectorsPerRU, rbuf->buffer,
1486 1.4 oster rbuf->parityStripeID, rbuf->which_ru,
1487 1.86 oster ReconWriteDoneProc, (void *) rbuf,
1488 1.67 oster #if RF_ACC_TRACE > 0
1489 1.4 oster &raidPtr->recon_tracerecs[fcol],
1490 1.67 oster #else
1491 1.87 perry NULL,
1492 1.67 oster #endif
1493 1.85 oster (void *) raidPtr, 0, NULL, PR_WAITOK);
1494 1.1 oster
1495 1.4 oster rbuf->arg = (void *) req;
1496 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1497 1.82 oster raidPtr->reconControl->pending_writes++;
1498 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1499 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[rbuf->spCol], req, RF_IO_RECON_PRIORITY);
1500 1.1 oster
1501 1.4 oster return (0);
1502 1.1 oster }
1503 1.13 oster
1504 1.13 oster /*
1505 1.13 oster * this gets called upon the completion of a reconstruction read
1506 1.13 oster * operation the arg is a pointer to the per-disk reconstruction
1507 1.13 oster * control structure for the process that just finished a read.
1508 1.1 oster *
1509 1.13 oster * called at interrupt context in the kernel, so don't do anything
1510 1.87 perry * illegal here.
1511 1.1 oster */
1512 1.87 perry static int
1513 1.60 oster ReconReadDoneProc(void *arg, int status)
1514 1.4 oster {
1515 1.4 oster RF_PerDiskReconCtrl_t *ctrl = (RF_PerDiskReconCtrl_t *) arg;
1516 1.82 oster RF_Raid_t *raidPtr;
1517 1.82 oster
1518 1.82 oster /* Detect that reconCtrl is no longer valid, and if that
1519 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1520 1.82 oster There won't be anyone listening for this event anyway */
1521 1.82 oster
1522 1.82 oster if (ctrl->reconCtrl == NULL)
1523 1.82 oster return(0);
1524 1.82 oster
1525 1.82 oster raidPtr = ctrl->reconCtrl->reconDesc->raidPtr;
1526 1.4 oster
1527 1.4 oster if (status) {
1528 1.102 oster printf("raid%d: Recon read failed: %d\n", raidPtr->raidid, status);
1529 1.70 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READ_FAILED);
1530 1.70 oster return(0);
1531 1.4 oster }
1532 1.67 oster #if RF_ACC_TRACE > 0
1533 1.4 oster RF_ETIMER_STOP(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1534 1.4 oster RF_ETIMER_EVAL(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1535 1.4 oster raidPtr->recon_tracerecs[ctrl->col].specific.recon.recon_fetch_to_return_us =
1536 1.4 oster RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1537 1.4 oster RF_ETIMER_START(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1538 1.67 oster #endif
1539 1.57 oster rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READDONE);
1540 1.4 oster return (0);
1541 1.1 oster }
1542 1.1 oster /* this gets called upon the completion of a reconstruction write operation.
1543 1.1 oster * the arg is a pointer to the rbuf that was just written
1544 1.1 oster *
1545 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1546 1.1 oster */
1547 1.87 perry static int
1548 1.60 oster ReconWriteDoneProc(void *arg, int status)
1549 1.4 oster {
1550 1.4 oster RF_ReconBuffer_t *rbuf = (RF_ReconBuffer_t *) arg;
1551 1.4 oster
1552 1.82 oster /* Detect that reconControl is no longer valid, and if that
1553 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1554 1.82 oster There won't be anyone listening for this event anyway */
1555 1.82 oster
1556 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1557 1.82 oster return(0);
1558 1.82 oster
1559 1.4 oster Dprintf2("Reconstruction completed on psid %ld ru %d\n", rbuf->parityStripeID, rbuf->which_ru);
1560 1.4 oster if (status) {
1561 1.119 yamt printf("raid%d: Recon write failed (status %d(0x%x))!\n", rbuf->raidPtr->raidid,status,status);
1562 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITE_FAILED);
1563 1.70 oster return(0);
1564 1.4 oster }
1565 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITEDONE);
1566 1.4 oster return (0);
1567 1.1 oster }
1568 1.1 oster
1569 1.1 oster
1570 1.87 perry /*
1571 1.13 oster * computes a new minimum head sep, and wakes up anyone who needs to
1572 1.87 perry * be woken as a result
1573 1.13 oster */
1574 1.87 perry static void
1575 1.95 christos CheckForNewMinHeadSep(RF_Raid_t *raidPtr, RF_HeadSepLimit_t hsCtr)
1576 1.4 oster {
1577 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1578 1.4 oster RF_HeadSepLimit_t new_min;
1579 1.4 oster RF_RowCol_t i;
1580 1.4 oster RF_CallbackDesc_t *p;
1581 1.4 oster RF_ASSERT(hsCtr >= reconCtrlPtr->minHeadSepCounter); /* from the definition
1582 1.4 oster * of a minimum */
1583 1.4 oster
1584 1.4 oster
1585 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1586 1.76 oster while(reconCtrlPtr->rb_lock) {
1587 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1588 1.76 oster }
1589 1.76 oster reconCtrlPtr->rb_lock = 1;
1590 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1591 1.4 oster
1592 1.4 oster new_min = ~(1L << (8 * sizeof(long) - 1)); /* 0x7FFF....FFF */
1593 1.4 oster for (i = 0; i < raidPtr->numCol; i++)
1594 1.4 oster if (i != reconCtrlPtr->fcol) {
1595 1.4 oster if (reconCtrlPtr->perDiskInfo[i].headSepCounter < new_min)
1596 1.4 oster new_min = reconCtrlPtr->perDiskInfo[i].headSepCounter;
1597 1.4 oster }
1598 1.4 oster /* set the new minimum and wake up anyone who can now run again */
1599 1.4 oster if (new_min != reconCtrlPtr->minHeadSepCounter) {
1600 1.4 oster reconCtrlPtr->minHeadSepCounter = new_min;
1601 1.4 oster Dprintf1("RECON: new min head pos counter val is %ld\n", new_min);
1602 1.4 oster while (reconCtrlPtr->headSepCBList) {
1603 1.4 oster if (reconCtrlPtr->headSepCBList->callbackArg.v > new_min)
1604 1.4 oster break;
1605 1.4 oster p = reconCtrlPtr->headSepCBList;
1606 1.4 oster reconCtrlPtr->headSepCBList = p->next;
1607 1.4 oster p->next = NULL;
1608 1.57 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1609 1.4 oster rf_FreeCallbackDesc(p);
1610 1.4 oster }
1611 1.1 oster
1612 1.4 oster }
1613 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1614 1.76 oster reconCtrlPtr->rb_lock = 0;
1615 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1616 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1617 1.1 oster }
1618 1.13 oster
1619 1.13 oster /*
1620 1.13 oster * checks to see that the maximum head separation will not be violated
1621 1.13 oster * if we initiate a reconstruction I/O on the indicated disk.
1622 1.13 oster * Limiting the maximum head separation between two disks eliminates
1623 1.13 oster * the nasty buffer-stall conditions that occur when one disk races
1624 1.13 oster * ahead of the others and consumes all of the floating recon buffers.
1625 1.13 oster * This code is complex and unpleasant but it's necessary to avoid
1626 1.13 oster * some very nasty, albeit fairly rare, reconstruction behavior.
1627 1.1 oster *
1628 1.13 oster * returns non-zero if and only if we have to stop working on the
1629 1.87 perry * indicated disk due to a head-separation delay.
1630 1.1 oster */
1631 1.87 perry static int
1632 1.60 oster CheckHeadSeparation(RF_Raid_t *raidPtr, RF_PerDiskReconCtrl_t *ctrl,
1633 1.95 christos RF_RowCol_t col, RF_HeadSepLimit_t hsCtr,
1634 1.95 christos RF_ReconUnitNum_t which_ru)
1635 1.4 oster {
1636 1.57 oster RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1637 1.4 oster RF_CallbackDesc_t *cb, *p, *pt;
1638 1.10 oster int retval = 0;
1639 1.4 oster
1640 1.4 oster /* if we're too far ahead of the slowest disk, stop working on this
1641 1.4 oster * disk until the slower ones catch up. We do this by scheduling a
1642 1.4 oster * wakeup callback for the time when the slowest disk has caught up.
1643 1.4 oster * We define "caught up" with 20% hysteresis, i.e. the head separation
1644 1.4 oster * must have fallen to at most 80% of the max allowable head
1645 1.4 oster * separation before we'll wake up.
1646 1.87 perry *
1647 1.4 oster */
1648 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1649 1.76 oster while(reconCtrlPtr->rb_lock) {
1650 1.112 mrg rf_wait_cond2(reconCtrlPtr->rb_cv, reconCtrlPtr->rb_mutex);
1651 1.76 oster }
1652 1.76 oster reconCtrlPtr->rb_lock = 1;
1653 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1654 1.4 oster if ((raidPtr->headSepLimit >= 0) &&
1655 1.4 oster ((ctrl->headSepCounter - reconCtrlPtr->minHeadSepCounter) > raidPtr->headSepLimit)) {
1656 1.57 oster Dprintf5("raid%d: RECON: head sep stall: col %d hsCtr %ld minHSCtr %ld limit %ld\n",
1657 1.87 perry raidPtr->raidid, col, ctrl->headSepCounter,
1658 1.87 perry reconCtrlPtr->minHeadSepCounter,
1659 1.10 oster raidPtr->headSepLimit);
1660 1.4 oster cb = rf_AllocCallbackDesc();
1661 1.4 oster /* the minHeadSepCounter value we have to get to before we'll
1662 1.4 oster * wake up. build in 20% hysteresis. */
1663 1.4 oster cb->callbackArg.v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
1664 1.4 oster cb->col = col;
1665 1.4 oster cb->next = NULL;
1666 1.4 oster
1667 1.4 oster /* insert this callback descriptor into the sorted list of
1668 1.4 oster * pending head-sep callbacks */
1669 1.4 oster p = reconCtrlPtr->headSepCBList;
1670 1.4 oster if (!p)
1671 1.4 oster reconCtrlPtr->headSepCBList = cb;
1672 1.4 oster else
1673 1.4 oster if (cb->callbackArg.v < p->callbackArg.v) {
1674 1.4 oster cb->next = reconCtrlPtr->headSepCBList;
1675 1.4 oster reconCtrlPtr->headSepCBList = cb;
1676 1.4 oster } else {
1677 1.4 oster for (pt = p, p = p->next; p && (p->callbackArg.v < cb->callbackArg.v); pt = p, p = p->next);
1678 1.4 oster cb->next = p;
1679 1.4 oster pt->next = cb;
1680 1.4 oster }
1681 1.4 oster retval = 1;
1682 1.1 oster #if RF_RECON_STATS > 0
1683 1.4 oster ctrl->reconCtrl->reconDesc->hsStallCount++;
1684 1.4 oster #endif /* RF_RECON_STATS > 0 */
1685 1.4 oster }
1686 1.112 mrg rf_lock_mutex2(reconCtrlPtr->rb_mutex);
1687 1.76 oster reconCtrlPtr->rb_lock = 0;
1688 1.112 mrg rf_broadcast_cond2(reconCtrlPtr->rb_cv);
1689 1.112 mrg rf_unlock_mutex2(reconCtrlPtr->rb_mutex);
1690 1.1 oster
1691 1.4 oster return (retval);
1692 1.1 oster }
1693 1.87 perry /*
1694 1.13 oster * checks to see if reconstruction has been either forced or blocked
1695 1.13 oster * by a user operation. if forced, we skip this RU entirely. else if
1696 1.13 oster * blocked, put ourselves on the wait list. else return 0.
1697 1.1 oster *
1698 1.87 perry * ASSUMES THE PSS MUTEX IS LOCKED UPON ENTRY
1699 1.1 oster */
1700 1.87 perry static int
1701 1.95 christos CheckForcedOrBlockedReconstruction(RF_Raid_t *raidPtr,
1702 1.60 oster RF_ReconParityStripeStatus_t *pssPtr,
1703 1.95 christos RF_PerDiskReconCtrl_t *ctrl,
1704 1.94 christos RF_RowCol_t col,
1705 1.95 christos RF_StripeNum_t psid,
1706 1.95 christos RF_ReconUnitNum_t which_ru)
1707 1.4 oster {
1708 1.4 oster RF_CallbackDesc_t *cb;
1709 1.4 oster int retcode = 0;
1710 1.4 oster
1711 1.4 oster if ((pssPtr->flags & RF_PSS_FORCED_ON_READ) || (pssPtr->flags & RF_PSS_FORCED_ON_WRITE))
1712 1.4 oster retcode = RF_PSS_FORCED_ON_WRITE;
1713 1.4 oster else
1714 1.4 oster if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
1715 1.57 oster Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
1716 1.4 oster cb = rf_AllocCallbackDesc(); /* append ourselves to
1717 1.4 oster * the blockage-wait
1718 1.4 oster * list */
1719 1.4 oster cb->col = col;
1720 1.4 oster cb->next = pssPtr->blockWaitList;
1721 1.4 oster pssPtr->blockWaitList = cb;
1722 1.4 oster retcode = RF_PSS_RECON_BLOCKED;
1723 1.4 oster }
1724 1.4 oster if (!retcode)
1725 1.4 oster pssPtr->flags |= RF_PSS_UNDER_RECON; /* mark this RU as under
1726 1.4 oster * reconstruction */
1727 1.4 oster
1728 1.4 oster return (retcode);
1729 1.1 oster }
1730 1.13 oster /*
1731 1.13 oster * if reconstruction is currently ongoing for the indicated stripeID,
1732 1.13 oster * reconstruction is forced to completion and we return non-zero to
1733 1.13 oster * indicate that the caller must wait. If not, then reconstruction is
1734 1.13 oster * blocked on the indicated stripe and the routine returns zero. If
1735 1.13 oster * and only if we return non-zero, we'll cause the cbFunc to get
1736 1.87 perry * invoked with the cbArg when the reconstruction has completed.
1737 1.1 oster */
1738 1.87 perry int
1739 1.60 oster rf_ForceOrBlockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap,
1740 1.60 oster void (*cbFunc)(RF_Raid_t *, void *), void *cbArg)
1741 1.4 oster {
1742 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID; /* the stripe ID we're
1743 1.4 oster * forcing recon on */
1744 1.4 oster RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU; /* num sects in one RU */
1745 1.68 oster RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr; /* a pointer to the parity
1746 1.4 oster * stripe status structure */
1747 1.4 oster RF_StripeNum_t psid; /* parity stripe id */
1748 1.4 oster RF_SectorNum_t offset, fd_offset; /* disk offset, failed-disk
1749 1.4 oster * offset */
1750 1.4 oster RF_RowCol_t *diskids;
1751 1.4 oster RF_ReconUnitNum_t which_ru; /* RU within parity stripe */
1752 1.4 oster RF_RowCol_t fcol, diskno, i;
1753 1.4 oster RF_ReconBuffer_t *new_rbuf; /* ptr to newly allocated rbufs */
1754 1.4 oster RF_DiskQueueData_t *req;/* disk I/O req to be enqueued */
1755 1.4 oster RF_CallbackDesc_t *cb;
1756 1.68 oster int nPromoted;
1757 1.4 oster
1758 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1759 1.4 oster
1760 1.68 oster /* allocate a new PSS in case we need it */
1761 1.68 oster newpssPtr = rf_AllocPSStatus(raidPtr);
1762 1.68 oster
1763 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1764 1.4 oster
1765 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE | RF_PSS_RECON_BLOCKED, newpssPtr);
1766 1.68 oster
1767 1.68 oster if (pssPtr != newpssPtr) {
1768 1.68 oster rf_FreePSStatus(raidPtr, newpssPtr);
1769 1.68 oster }
1770 1.4 oster
1771 1.4 oster /* if recon is not ongoing on this PS, just return */
1772 1.4 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1773 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1774 1.4 oster return (0);
1775 1.4 oster }
1776 1.4 oster /* otherwise, we have to wait for reconstruction to complete on this
1777 1.4 oster * RU. */
1778 1.4 oster /* In order to avoid waiting for a potentially large number of
1779 1.4 oster * low-priority accesses to complete, we force a normal-priority (i.e.
1780 1.4 oster * not low-priority) reconstruction on this RU. */
1781 1.4 oster if (!(pssPtr->flags & RF_PSS_FORCED_ON_WRITE) && !(pssPtr->flags & RF_PSS_FORCED_ON_READ)) {
1782 1.4 oster DDprintf1("Forcing recon on psid %ld\n", psid);
1783 1.4 oster pssPtr->flags |= RF_PSS_FORCED_ON_WRITE; /* mark this RU as under
1784 1.4 oster * forced recon */
1785 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED; /* clear the blockage
1786 1.4 oster * that we just set */
1787 1.57 oster fcol = raidPtr->reconControl->fcol;
1788 1.4 oster
1789 1.4 oster /* get a listing of the disks comprising the indicated stripe */
1790 1.57 oster (raidPtr->Layout.map->IdentifyStripe) (raidPtr, asmap->raidAddress, &diskids);
1791 1.4 oster
1792 1.4 oster /* For previously issued reads, elevate them to normal
1793 1.4 oster * priority. If the I/O has already completed, it won't be
1794 1.4 oster * found in the queue, and hence this will be a no-op. For
1795 1.4 oster * unissued reads, allocate buffers and issue new reads. The
1796 1.4 oster * fact that we've set the FORCED bit means that the regular
1797 1.4 oster * recon procs will not re-issue these reqs */
1798 1.4 oster for (i = 0; i < raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol; i++)
1799 1.4 oster if ((diskno = diskids[i]) != fcol) {
1800 1.4 oster if (pssPtr->issued[diskno]) {
1801 1.57 oster nPromoted = rf_DiskIOPromote(&raidPtr->Queues[diskno], psid, which_ru);
1802 1.4 oster if (rf_reconDebug && nPromoted)
1803 1.57 oster printf("raid%d: promoted read from col %d\n", raidPtr->raidid, diskno);
1804 1.4 oster } else {
1805 1.57 oster new_rbuf = rf_MakeReconBuffer(raidPtr, diskno, RF_RBUF_TYPE_FORCED); /* create new buf */
1806 1.57 oster ComputePSDiskOffsets(raidPtr, psid, diskno, &offset, &fd_offset,
1807 1.57 oster &new_rbuf->spCol, &new_rbuf->spOffset); /* find offsets & spare
1808 1.4 oster * location */
1809 1.4 oster new_rbuf->parityStripeID = psid; /* fill in the buffer */
1810 1.4 oster new_rbuf->which_ru = which_ru;
1811 1.4 oster new_rbuf->failedDiskSectorOffset = fd_offset;
1812 1.4 oster new_rbuf->priority = RF_IO_NORMAL_PRIORITY;
1813 1.4 oster
1814 1.4 oster /* use NULL b_proc b/c all addrs
1815 1.4 oster * should be in kernel space */
1816 1.4 oster req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, offset + which_ru * sectorsPerRU, sectorsPerRU, new_rbuf->buffer,
1817 1.86 oster psid, which_ru, (int (*) (void *, int)) ForceReconReadDoneProc, (void *) new_rbuf,
1818 1.85 oster NULL, (void *) raidPtr, 0, NULL, PR_WAITOK);
1819 1.4 oster
1820 1.4 oster new_rbuf->arg = req;
1821 1.57 oster rf_DiskIOEnqueue(&raidPtr->Queues[diskno], req, RF_IO_NORMAL_PRIORITY); /* enqueue the I/O */
1822 1.57 oster Dprintf2("raid%d: Issued new read req on col %d\n", raidPtr->raidid, diskno);
1823 1.4 oster }
1824 1.4 oster }
1825 1.4 oster /* if the write is sitting in the disk queue, elevate its
1826 1.4 oster * priority */
1827 1.57 oster if (rf_DiskIOPromote(&raidPtr->Queues[fcol], psid, which_ru))
1828 1.102 oster if (rf_reconDebug)
1829 1.102 oster printf("raid%d: promoted write to col %d\n",
1830 1.102 oster raidPtr->raidid, fcol);
1831 1.4 oster }
1832 1.4 oster /* install a callback descriptor to be invoked when recon completes on
1833 1.4 oster * this parity stripe. */
1834 1.4 oster cb = rf_AllocCallbackDesc();
1835 1.4 oster /* XXX the following is bogus.. These functions don't really match!!
1836 1.4 oster * GO */
1837 1.4 oster cb->callbackFunc = (void (*) (RF_CBParam_t)) cbFunc;
1838 1.4 oster cb->callbackArg.p = (void *) cbArg;
1839 1.4 oster cb->next = pssPtr->procWaitList;
1840 1.4 oster pssPtr->procWaitList = cb;
1841 1.87 perry DDprintf2("raid%d: Waiting for forced recon on psid %ld\n",
1842 1.10 oster raidPtr->raidid, psid);
1843 1.4 oster
1844 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1845 1.4 oster return (1);
1846 1.1 oster }
1847 1.1 oster /* called upon the completion of a forced reconstruction read.
1848 1.1 oster * all we do is schedule the FORCEDREADONE event.
1849 1.1 oster * called at interrupt context in the kernel, so don't do anything illegal here.
1850 1.1 oster */
1851 1.87 perry static void
1852 1.60 oster ForceReconReadDoneProc(void *arg, int status)
1853 1.4 oster {
1854 1.4 oster RF_ReconBuffer_t *rbuf = arg;
1855 1.4 oster
1856 1.82 oster /* Detect that reconControl is no longer valid, and if that
1857 1.82 oster is the case, bail without calling rf_CauseReconEvent().
1858 1.82 oster There won't be anyone listening for this event anyway */
1859 1.82 oster
1860 1.82 oster if (rbuf->raidPtr->reconControl == NULL)
1861 1.82 oster return;
1862 1.82 oster
1863 1.4 oster if (status) {
1864 1.70 oster printf("raid%d: Forced recon read failed!\n", rbuf->raidPtr->raidid);
1865 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREAD_FAILED);
1866 1.79 oster return;
1867 1.4 oster }
1868 1.71 oster rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREADDONE);
1869 1.1 oster }
1870 1.1 oster /* releases a block on the reconstruction of the indicated stripe */
1871 1.87 perry int
1872 1.60 oster rf_UnblockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
1873 1.4 oster {
1874 1.4 oster RF_StripeNum_t stripeID = asmap->stripeID;
1875 1.4 oster RF_ReconParityStripeStatus_t *pssPtr;
1876 1.4 oster RF_ReconUnitNum_t which_ru;
1877 1.4 oster RF_StripeNum_t psid;
1878 1.4 oster RF_CallbackDesc_t *cb;
1879 1.4 oster
1880 1.4 oster psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1881 1.57 oster RF_LOCK_PSS_MUTEX(raidPtr, psid);
1882 1.68 oster pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_NONE, NULL);
1883 1.4 oster
1884 1.4 oster /* When recon is forced, the pss desc can get deleted before we get
1885 1.4 oster * back to unblock recon. But, this can _only_ happen when recon is
1886 1.4 oster * forced. It would be good to put some kind of sanity check here, but
1887 1.4 oster * how to decide if recon was just forced or not? */
1888 1.4 oster if (!pssPtr) {
1889 1.4 oster /* printf("Warning: no pss descriptor upon unblock on psid %ld
1890 1.4 oster * RU %d\n",psid,which_ru); */
1891 1.43 oster #if (RF_DEBUG_RECON > 0) || (RF_DEBUG_PSS > 0)
1892 1.4 oster if (rf_reconDebug || rf_pssDebug)
1893 1.4 oster printf("Warning: no pss descriptor upon unblock on psid %ld RU %d\n", (long) psid, which_ru);
1894 1.43 oster #endif
1895 1.4 oster goto out;
1896 1.4 oster }
1897 1.4 oster pssPtr->blockCount--;
1898 1.10 oster Dprintf3("raid%d: unblocking recon on psid %ld: blockcount is %d\n",
1899 1.10 oster raidPtr->raidid, psid, pssPtr->blockCount);
1900 1.4 oster if (pssPtr->blockCount == 0) { /* if recon blockage has been released */
1901 1.4 oster
1902 1.4 oster /* unblock recon before calling CauseReconEvent in case
1903 1.4 oster * CauseReconEvent causes us to try to issue a new read before
1904 1.4 oster * returning here. */
1905 1.4 oster pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;
1906 1.4 oster
1907 1.4 oster
1908 1.87 perry while (pssPtr->blockWaitList) {
1909 1.13 oster /* spin through the block-wait list and
1910 1.13 oster release all the waiters */
1911 1.4 oster cb = pssPtr->blockWaitList;
1912 1.4 oster pssPtr->blockWaitList = cb->next;
1913 1.4 oster cb->next = NULL;
1914 1.57 oster rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
1915 1.4 oster rf_FreeCallbackDesc(cb);
1916 1.4 oster }
1917 1.13 oster if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1918 1.13 oster /* if no recon was requested while recon was blocked */
1919 1.57 oster rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1920 1.4 oster }
1921 1.4 oster }
1922 1.1 oster out:
1923 1.57 oster RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1924 1.4 oster return (0);
1925 1.1 oster }
1926 1.104 oster
1927 1.104 oster void
1928 1.104 oster rf_WakeupHeadSepCBWaiters(RF_Raid_t *raidPtr)
1929 1.104 oster {
1930 1.104 oster RF_CallbackDesc_t *p;
1931 1.104 oster
1932 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1933 1.104 oster while(raidPtr->reconControl->rb_lock) {
1934 1.112 mrg rf_wait_cond2(raidPtr->reconControl->rb_cv,
1935 1.112 mrg raidPtr->reconControl->rb_mutex);
1936 1.104 oster }
1937 1.104 oster
1938 1.104 oster raidPtr->reconControl->rb_lock = 1;
1939 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1940 1.104 oster
1941 1.104 oster while (raidPtr->reconControl->headSepCBList) {
1942 1.104 oster p = raidPtr->reconControl->headSepCBList;
1943 1.104 oster raidPtr->reconControl->headSepCBList = p->next;
1944 1.104 oster p->next = NULL;
1945 1.104 oster rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1946 1.104 oster rf_FreeCallbackDesc(p);
1947 1.104 oster }
1948 1.112 mrg rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
1949 1.104 oster raidPtr->reconControl->rb_lock = 0;
1950 1.112 mrg rf_broadcast_cond2(raidPtr->reconControl->rb_cv);
1951 1.112 mrg rf_unlock_mutex2(raidPtr->reconControl->rb_mutex);
1952 1.104 oster
1953 1.104 oster }
1954 1.104 oster
1955