rf_reconstruct.c revision 1.98.8.2 1 1.98.8.2 ad /* $NetBSD: rf_reconstruct.c,v 1.98.8.2 2007/07/18 19:04:59 ad Exp $ */
2 1.98.8.2 ad /*
3 1.98.8.2 ad * Copyright (c) 1995 Carnegie-Mellon University.
4 1.98.8.2 ad * All rights reserved.
5 1.98.8.2 ad *
6 1.98.8.2 ad * Author: Mark Holland
7 1.98.8.2 ad *
8 1.98.8.2 ad * Permission to use, copy, modify and distribute this software and
9 1.98.8.2 ad * its documentation is hereby granted, provided that both the copyright
10 1.98.8.2 ad * notice and this permission notice appear in all copies of the
11 1.98.8.2 ad * software, derivative works or modified versions, and any portions
12 1.98.8.2 ad * thereof, and that both notices appear in supporting documentation.
13 1.98.8.2 ad *
14 1.98.8.2 ad * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.98.8.2 ad * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.98.8.2 ad * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.98.8.2 ad *
18 1.98.8.2 ad * Carnegie Mellon requests users of this software to return to
19 1.98.8.2 ad *
20 1.98.8.2 ad * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.98.8.2 ad * School of Computer Science
22 1.98.8.2 ad * Carnegie Mellon University
23 1.98.8.2 ad * Pittsburgh PA 15213-3890
24 1.98.8.2 ad *
25 1.98.8.2 ad * any improvements or extensions that they make and grant Carnegie the
26 1.98.8.2 ad * rights to redistribute these changes.
27 1.98.8.2 ad */
28 1.98.8.2 ad
29 1.98.8.2 ad /************************************************************
30 1.98.8.2 ad *
31 1.98.8.2 ad * rf_reconstruct.c -- code to perform on-line reconstruction
32 1.98.8.2 ad *
33 1.98.8.2 ad ************************************************************/
34 1.98.8.2 ad
35 1.98.8.2 ad #include <sys/cdefs.h>
36 1.98.8.2 ad __KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.98.8.2 2007/07/18 19:04:59 ad Exp $");
37 1.98.8.2 ad
38 1.98.8.2 ad #include <sys/param.h>
39 1.98.8.2 ad #include <sys/time.h>
40 1.98.8.2 ad #include <sys/buf.h>
41 1.98.8.2 ad #include <sys/errno.h>
42 1.98.8.2 ad #include <sys/systm.h>
43 1.98.8.2 ad #include <sys/proc.h>
44 1.98.8.2 ad #include <sys/ioctl.h>
45 1.98.8.2 ad #include <sys/fcntl.h>
46 1.98.8.2 ad #include <sys/vnode.h>
47 1.98.8.2 ad #include <dev/raidframe/raidframevar.h>
48 1.98.8.2 ad
49 1.98.8.2 ad #include "rf_raid.h"
50 1.98.8.2 ad #include "rf_reconutil.h"
51 1.98.8.2 ad #include "rf_revent.h"
52 1.98.8.2 ad #include "rf_reconbuffer.h"
53 1.98.8.2 ad #include "rf_acctrace.h"
54 1.98.8.2 ad #include "rf_etimer.h"
55 1.98.8.2 ad #include "rf_dag.h"
56 1.98.8.2 ad #include "rf_desc.h"
57 1.98.8.2 ad #include "rf_debugprint.h"
58 1.98.8.2 ad #include "rf_general.h"
59 1.98.8.2 ad #include "rf_driver.h"
60 1.98.8.2 ad #include "rf_utils.h"
61 1.98.8.2 ad #include "rf_shutdown.h"
62 1.98.8.2 ad
63 1.98.8.2 ad #include "rf_kintf.h"
64 1.98.8.2 ad
65 1.98.8.2 ad /* setting these to -1 causes them to be set to their default values if not set by debug options */
66 1.98.8.2 ad
67 1.98.8.2 ad #if RF_DEBUG_RECON
68 1.98.8.2 ad #define Dprintf(s) if (rf_reconDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
69 1.98.8.2 ad #define Dprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
70 1.98.8.2 ad #define Dprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
71 1.98.8.2 ad #define Dprintf3(s,a,b,c) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL)
72 1.98.8.2 ad #define Dprintf4(s,a,b,c,d) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL)
73 1.98.8.2 ad #define Dprintf5(s,a,b,c,d,e) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL)
74 1.98.8.2 ad #define Dprintf6(s,a,b,c,d,e,f) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),NULL,NULL)
75 1.98.8.2 ad #define Dprintf7(s,a,b,c,d,e,f,g) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),(void *)((unsigned long)g),NULL)
76 1.98.8.2 ad
77 1.98.8.2 ad #define DDprintf1(s,a) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
78 1.98.8.2 ad #define DDprintf2(s,a,b) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
79 1.98.8.2 ad
80 1.98.8.2 ad #else /* RF_DEBUG_RECON */
81 1.98.8.2 ad
82 1.98.8.2 ad #define Dprintf(s) {}
83 1.98.8.2 ad #define Dprintf1(s,a) {}
84 1.98.8.2 ad #define Dprintf2(s,a,b) {}
85 1.98.8.2 ad #define Dprintf3(s,a,b,c) {}
86 1.98.8.2 ad #define Dprintf4(s,a,b,c,d) {}
87 1.98.8.2 ad #define Dprintf5(s,a,b,c,d,e) {}
88 1.98.8.2 ad #define Dprintf6(s,a,b,c,d,e,f) {}
89 1.98.8.2 ad #define Dprintf7(s,a,b,c,d,e,f,g) {}
90 1.98.8.2 ad
91 1.98.8.2 ad #define DDprintf1(s,a) {}
92 1.98.8.2 ad #define DDprintf2(s,a,b) {}
93 1.98.8.2 ad
94 1.98.8.2 ad #endif /* RF_DEBUG_RECON */
95 1.98.8.2 ad
96 1.98.8.2 ad #define RF_RECON_DONE_READS 1
97 1.98.8.2 ad #define RF_RECON_READ_ERROR 2
98 1.98.8.2 ad #define RF_RECON_WRITE_ERROR 3
99 1.98.8.2 ad #define RF_RECON_READ_STOPPED 4
100 1.98.8.2 ad
101 1.98.8.2 ad #define RF_MAX_FREE_RECONBUFFER 32
102 1.98.8.2 ad #define RF_MIN_FREE_RECONBUFFER 16
103 1.98.8.2 ad
104 1.98.8.2 ad static RF_RaidReconDesc_t *AllocRaidReconDesc(RF_Raid_t *, RF_RowCol_t,
105 1.98.8.2 ad RF_RaidDisk_t *, int, RF_RowCol_t);
106 1.98.8.2 ad static void FreeReconDesc(RF_RaidReconDesc_t *);
107 1.98.8.2 ad static int ProcessReconEvent(RF_Raid_t *, RF_ReconEvent_t *);
108 1.98.8.2 ad static int IssueNextReadRequest(RF_Raid_t *, RF_RowCol_t);
109 1.98.8.2 ad static int TryToRead(RF_Raid_t *, RF_RowCol_t);
110 1.98.8.2 ad static int ComputePSDiskOffsets(RF_Raid_t *, RF_StripeNum_t, RF_RowCol_t,
111 1.98.8.2 ad RF_SectorNum_t *, RF_SectorNum_t *, RF_RowCol_t *,
112 1.98.8.2 ad RF_SectorNum_t *);
113 1.98.8.2 ad static int IssueNextWriteRequest(RF_Raid_t *);
114 1.98.8.2 ad static int ReconReadDoneProc(void *, int);
115 1.98.8.2 ad static int ReconWriteDoneProc(void *, int);
116 1.98.8.2 ad static void CheckForNewMinHeadSep(RF_Raid_t *, RF_HeadSepLimit_t);
117 1.98.8.2 ad static int CheckHeadSeparation(RF_Raid_t *, RF_PerDiskReconCtrl_t *,
118 1.98.8.2 ad RF_RowCol_t, RF_HeadSepLimit_t,
119 1.98.8.2 ad RF_ReconUnitNum_t);
120 1.98.8.2 ad static int CheckForcedOrBlockedReconstruction(RF_Raid_t *,
121 1.98.8.2 ad RF_ReconParityStripeStatus_t *,
122 1.98.8.2 ad RF_PerDiskReconCtrl_t *,
123 1.98.8.2 ad RF_RowCol_t, RF_StripeNum_t,
124 1.98.8.2 ad RF_ReconUnitNum_t);
125 1.98.8.2 ad static void ForceReconReadDoneProc(void *, int);
126 1.98.8.2 ad static void rf_ShutdownReconstruction(void *);
127 1.98.8.2 ad
128 1.98.8.2 ad struct RF_ReconDoneProc_s {
129 1.98.8.2 ad void (*proc) (RF_Raid_t *, void *);
130 1.98.8.2 ad void *arg;
131 1.98.8.2 ad RF_ReconDoneProc_t *next;
132 1.98.8.2 ad };
133 1.98.8.2 ad
134 1.98.8.2 ad /**************************************************************************
135 1.98.8.2 ad *
136 1.98.8.2 ad * sets up the parameters that will be used by the reconstruction process
137 1.98.8.2 ad * currently there are none, except for those that the layout-specific
138 1.98.8.2 ad * configuration (e.g. rf_ConfigureDeclustered) routine sets up.
139 1.98.8.2 ad *
140 1.98.8.2 ad * in the kernel, we fire off the recon thread.
141 1.98.8.2 ad *
142 1.98.8.2 ad **************************************************************************/
143 1.98.8.2 ad static void
144 1.98.8.2 ad rf_ShutdownReconstruction(void *ignored)
145 1.98.8.2 ad {
146 1.98.8.2 ad pool_destroy(&rf_pools.reconbuffer);
147 1.98.8.2 ad }
148 1.98.8.2 ad
149 1.98.8.2 ad int
150 1.98.8.2 ad rf_ConfigureReconstruction(RF_ShutdownList_t **listp)
151 1.98.8.2 ad {
152 1.98.8.2 ad
153 1.98.8.2 ad rf_pool_init(&rf_pools.reconbuffer, sizeof(RF_ReconBuffer_t),
154 1.98.8.2 ad "rf_reconbuffer_pl", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
155 1.98.8.2 ad rf_ShutdownCreate(listp, rf_ShutdownReconstruction, NULL);
156 1.98.8.2 ad
157 1.98.8.2 ad return (0);
158 1.98.8.2 ad }
159 1.98.8.2 ad
160 1.98.8.2 ad static RF_RaidReconDesc_t *
161 1.98.8.2 ad AllocRaidReconDesc(RF_Raid_t *raidPtr, RF_RowCol_t col,
162 1.98.8.2 ad RF_RaidDisk_t *spareDiskPtr, int numDisksDone,
163 1.98.8.2 ad RF_RowCol_t scol)
164 1.98.8.2 ad {
165 1.98.8.2 ad
166 1.98.8.2 ad RF_RaidReconDesc_t *reconDesc;
167 1.98.8.2 ad
168 1.98.8.2 ad RF_Malloc(reconDesc, sizeof(RF_RaidReconDesc_t),
169 1.98.8.2 ad (RF_RaidReconDesc_t *));
170 1.98.8.2 ad reconDesc->raidPtr = raidPtr;
171 1.98.8.2 ad reconDesc->col = col;
172 1.98.8.2 ad reconDesc->spareDiskPtr = spareDiskPtr;
173 1.98.8.2 ad reconDesc->numDisksDone = numDisksDone;
174 1.98.8.2 ad reconDesc->scol = scol;
175 1.98.8.2 ad reconDesc->next = NULL;
176 1.98.8.2 ad
177 1.98.8.2 ad return (reconDesc);
178 1.98.8.2 ad }
179 1.98.8.2 ad
180 1.98.8.2 ad static void
181 1.98.8.2 ad FreeReconDesc(RF_RaidReconDesc_t *reconDesc)
182 1.98.8.2 ad {
183 1.98.8.2 ad #if RF_RECON_STATS > 0
184 1.98.8.2 ad printf("raid%d: %lu recon event waits, %lu recon delays\n",
185 1.98.8.2 ad reconDesc->raidPtr->raidid,
186 1.98.8.2 ad (long) reconDesc->numReconEventWaits,
187 1.98.8.2 ad (long) reconDesc->numReconExecDelays);
188 1.98.8.2 ad #endif /* RF_RECON_STATS > 0 */
189 1.98.8.2 ad printf("raid%d: %lu max exec ticks\n",
190 1.98.8.2 ad reconDesc->raidPtr->raidid,
191 1.98.8.2 ad (long) reconDesc->maxReconExecTicks);
192 1.98.8.2 ad #if (RF_RECON_STATS > 0) || defined(KERNEL)
193 1.98.8.2 ad printf("\n");
194 1.98.8.2 ad #endif /* (RF_RECON_STATS > 0) || KERNEL */
195 1.98.8.2 ad RF_Free(reconDesc, sizeof(RF_RaidReconDesc_t));
196 1.98.8.2 ad }
197 1.98.8.2 ad
198 1.98.8.2 ad
199 1.98.8.2 ad /*****************************************************************************
200 1.98.8.2 ad *
201 1.98.8.2 ad * primary routine to reconstruct a failed disk. This should be called from
202 1.98.8.2 ad * within its own thread. It won't return until reconstruction completes,
203 1.98.8.2 ad * fails, or is aborted.
204 1.98.8.2 ad *****************************************************************************/
205 1.98.8.2 ad int
206 1.98.8.2 ad rf_ReconstructFailedDisk(RF_Raid_t *raidPtr, RF_RowCol_t col)
207 1.98.8.2 ad {
208 1.98.8.2 ad const RF_LayoutSW_t *lp;
209 1.98.8.2 ad int rc;
210 1.98.8.2 ad
211 1.98.8.2 ad lp = raidPtr->Layout.map;
212 1.98.8.2 ad if (lp->SubmitReconBuffer) {
213 1.98.8.2 ad /*
214 1.98.8.2 ad * The current infrastructure only supports reconstructing one
215 1.98.8.2 ad * disk at a time for each array.
216 1.98.8.2 ad */
217 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
218 1.98.8.2 ad while (raidPtr->reconInProgress) {
219 1.98.8.2 ad RF_WAIT_COND(raidPtr->waitForReconCond, raidPtr->mutex);
220 1.98.8.2 ad }
221 1.98.8.2 ad raidPtr->reconInProgress++;
222 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
223 1.98.8.2 ad rc = rf_ReconstructFailedDiskBasic(raidPtr, col);
224 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
225 1.98.8.2 ad raidPtr->reconInProgress--;
226 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
227 1.98.8.2 ad } else {
228 1.98.8.2 ad RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
229 1.98.8.2 ad lp->parityConfig);
230 1.98.8.2 ad rc = EIO;
231 1.98.8.2 ad }
232 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
233 1.98.8.2 ad return (rc);
234 1.98.8.2 ad }
235 1.98.8.2 ad
236 1.98.8.2 ad int
237 1.98.8.2 ad rf_ReconstructFailedDiskBasic(RF_Raid_t *raidPtr, RF_RowCol_t col)
238 1.98.8.2 ad {
239 1.98.8.2 ad RF_ComponentLabel_t c_label;
240 1.98.8.2 ad RF_RaidDisk_t *spareDiskPtr = NULL;
241 1.98.8.2 ad RF_RaidReconDesc_t *reconDesc;
242 1.98.8.2 ad RF_RowCol_t scol;
243 1.98.8.2 ad int numDisksDone = 0, rc;
244 1.98.8.2 ad
245 1.98.8.2 ad /* first look for a spare drive onto which to reconstruct the data */
246 1.98.8.2 ad /* spare disk descriptors are stored in row 0. This may have to
247 1.98.8.2 ad * change eventually */
248 1.98.8.2 ad
249 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
250 1.98.8.2 ad RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed);
251 1.98.8.2 ad #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
252 1.98.8.2 ad if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
253 1.98.8.2 ad if (raidPtr->status != rf_rs_degraded) {
254 1.98.8.2 ad RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col);
255 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
256 1.98.8.2 ad return (EINVAL);
257 1.98.8.2 ad }
258 1.98.8.2 ad scol = (-1);
259 1.98.8.2 ad } else {
260 1.98.8.2 ad #endif
261 1.98.8.2 ad for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) {
262 1.98.8.2 ad if (raidPtr->Disks[scol].status == rf_ds_spare) {
263 1.98.8.2 ad spareDiskPtr = &raidPtr->Disks[scol];
264 1.98.8.2 ad spareDiskPtr->status = rf_ds_used_spare;
265 1.98.8.2 ad break;
266 1.98.8.2 ad }
267 1.98.8.2 ad }
268 1.98.8.2 ad if (!spareDiskPtr) {
269 1.98.8.2 ad RF_ERRORMSG1("Unable to reconstruct disk at col %d because no spares are available\n", col);
270 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
271 1.98.8.2 ad return (ENOSPC);
272 1.98.8.2 ad }
273 1.98.8.2 ad printf("RECON: initiating reconstruction on col %d -> spare at col %d\n", col, scol);
274 1.98.8.2 ad #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
275 1.98.8.2 ad }
276 1.98.8.2 ad #endif
277 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
278 1.98.8.2 ad
279 1.98.8.2 ad reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr, numDisksDone, scol);
280 1.98.8.2 ad raidPtr->reconDesc = (void *) reconDesc;
281 1.98.8.2 ad #if RF_RECON_STATS > 0
282 1.98.8.2 ad reconDesc->hsStallCount = 0;
283 1.98.8.2 ad reconDesc->numReconExecDelays = 0;
284 1.98.8.2 ad reconDesc->numReconEventWaits = 0;
285 1.98.8.2 ad #endif /* RF_RECON_STATS > 0 */
286 1.98.8.2 ad reconDesc->reconExecTimerRunning = 0;
287 1.98.8.2 ad reconDesc->reconExecTicks = 0;
288 1.98.8.2 ad reconDesc->maxReconExecTicks = 0;
289 1.98.8.2 ad rc = rf_ContinueReconstructFailedDisk(reconDesc);
290 1.98.8.2 ad
291 1.98.8.2 ad if (!rc) {
292 1.98.8.2 ad /* fix up the component label */
293 1.98.8.2 ad /* Don't actually need the read here.. */
294 1.98.8.2 ad raidread_component_label(
295 1.98.8.2 ad raidPtr->raid_cinfo[scol].ci_dev,
296 1.98.8.2 ad raidPtr->raid_cinfo[scol].ci_vp,
297 1.98.8.2 ad &c_label);
298 1.98.8.2 ad
299 1.98.8.2 ad raid_init_component_label( raidPtr, &c_label);
300 1.98.8.2 ad c_label.row = 0;
301 1.98.8.2 ad c_label.column = col;
302 1.98.8.2 ad c_label.clean = RF_RAID_DIRTY;
303 1.98.8.2 ad c_label.status = rf_ds_optimal;
304 1.98.8.2 ad c_label.partitionSize = raidPtr->Disks[scol].partitionSize;
305 1.98.8.2 ad
306 1.98.8.2 ad /* We've just done a rebuild based on all the other
307 1.98.8.2 ad disks, so at this point the parity is known to be
308 1.98.8.2 ad clean, even if it wasn't before. */
309 1.98.8.2 ad
310 1.98.8.2 ad /* XXX doesn't hold for RAID 6!!*/
311 1.98.8.2 ad
312 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
313 1.98.8.2 ad raidPtr->parity_good = RF_RAID_CLEAN;
314 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
315 1.98.8.2 ad
316 1.98.8.2 ad /* XXXX MORE NEEDED HERE */
317 1.98.8.2 ad
318 1.98.8.2 ad raidwrite_component_label(
319 1.98.8.2 ad raidPtr->raid_cinfo[scol].ci_dev,
320 1.98.8.2 ad raidPtr->raid_cinfo[scol].ci_vp,
321 1.98.8.2 ad &c_label);
322 1.98.8.2 ad
323 1.98.8.2 ad } else {
324 1.98.8.2 ad /* Reconstruct failed. */
325 1.98.8.2 ad
326 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
327 1.98.8.2 ad /* Failed disk goes back to "failed" status */
328 1.98.8.2 ad raidPtr->Disks[col].status = rf_ds_failed;
329 1.98.8.2 ad
330 1.98.8.2 ad /* Spare disk goes back to "spare" status. */
331 1.98.8.2 ad spareDiskPtr->status = rf_ds_spare;
332 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
333 1.98.8.2 ad
334 1.98.8.2 ad }
335 1.98.8.2 ad rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
336 1.98.8.2 ad return (rc);
337 1.98.8.2 ad }
338 1.98.8.2 ad
339 1.98.8.2 ad /*
340 1.98.8.2 ad
341 1.98.8.2 ad Allow reconstructing a disk in-place -- i.e. component /dev/sd2e goes AWOL,
342 1.98.8.2 ad and you don't get a spare until the next Monday. With this function
343 1.98.8.2 ad (and hot-swappable drives) you can now put your new disk containing
344 1.98.8.2 ad /dev/sd2e on the bus, scsictl it alive, and then use raidctl(8) to
345 1.98.8.2 ad rebuild the data "on the spot".
346 1.98.8.2 ad
347 1.98.8.2 ad */
348 1.98.8.2 ad
349 1.98.8.2 ad int
350 1.98.8.2 ad rf_ReconstructInPlace(RF_Raid_t *raidPtr, RF_RowCol_t col)
351 1.98.8.2 ad {
352 1.98.8.2 ad RF_RaidDisk_t *spareDiskPtr = NULL;
353 1.98.8.2 ad RF_RaidReconDesc_t *reconDesc;
354 1.98.8.2 ad const RF_LayoutSW_t *lp;
355 1.98.8.2 ad RF_ComponentLabel_t c_label;
356 1.98.8.2 ad int numDisksDone = 0, rc;
357 1.98.8.2 ad struct partinfo dpart;
358 1.98.8.2 ad struct vnode *vp;
359 1.98.8.2 ad struct vattr va;
360 1.98.8.2 ad struct lwp *lwp;
361 1.98.8.2 ad int retcode;
362 1.98.8.2 ad int ac;
363 1.98.8.2 ad
364 1.98.8.2 ad lp = raidPtr->Layout.map;
365 1.98.8.2 ad if (!lp->SubmitReconBuffer) {
366 1.98.8.2 ad RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
367 1.98.8.2 ad lp->parityConfig);
368 1.98.8.2 ad /* wakeup anyone who might be waiting to do a reconstruct */
369 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
370 1.98.8.2 ad return(EIO);
371 1.98.8.2 ad }
372 1.98.8.2 ad
373 1.98.8.2 ad /*
374 1.98.8.2 ad * The current infrastructure only supports reconstructing one
375 1.98.8.2 ad * disk at a time for each array.
376 1.98.8.2 ad */
377 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
378 1.98.8.2 ad
379 1.98.8.2 ad if (raidPtr->Disks[col].status != rf_ds_failed) {
380 1.98.8.2 ad /* "It's gone..." */
381 1.98.8.2 ad raidPtr->numFailures++;
382 1.98.8.2 ad raidPtr->Disks[col].status = rf_ds_failed;
383 1.98.8.2 ad raidPtr->status = rf_rs_degraded;
384 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
385 1.98.8.2 ad rf_update_component_labels(raidPtr,
386 1.98.8.2 ad RF_NORMAL_COMPONENT_UPDATE);
387 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
388 1.98.8.2 ad }
389 1.98.8.2 ad
390 1.98.8.2 ad while (raidPtr->reconInProgress) {
391 1.98.8.2 ad RF_WAIT_COND(raidPtr->waitForReconCond, raidPtr->mutex);
392 1.98.8.2 ad }
393 1.98.8.2 ad
394 1.98.8.2 ad raidPtr->reconInProgress++;
395 1.98.8.2 ad
396 1.98.8.2 ad /* first look for a spare drive onto which to reconstruct the
397 1.98.8.2 ad data. spare disk descriptors are stored in row 0. This
398 1.98.8.2 ad may have to change eventually */
399 1.98.8.2 ad
400 1.98.8.2 ad /* Actually, we don't care if it's failed or not... On a RAID
401 1.98.8.2 ad set with correct parity, this function should be callable
402 1.98.8.2 ad on any component without ill affects. */
403 1.98.8.2 ad /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */
404 1.98.8.2 ad
405 1.98.8.2 ad #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
406 1.98.8.2 ad if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
407 1.98.8.2 ad RF_ERRORMSG1("Unable to reconstruct to disk at col %d: operation not supported for RF_DISTRIBUTE_SPARE\n", col);
408 1.98.8.2 ad
409 1.98.8.2 ad raidPtr->reconInProgress--;
410 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
411 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
412 1.98.8.2 ad return (EINVAL);
413 1.98.8.2 ad }
414 1.98.8.2 ad #endif
415 1.98.8.2 ad lwp = raidPtr->engine_thread;
416 1.98.8.2 ad
417 1.98.8.2 ad /* This device may have been opened successfully the
418 1.98.8.2 ad first time. Close it before trying to open it again.. */
419 1.98.8.2 ad
420 1.98.8.2 ad if (raidPtr->raid_cinfo[col].ci_vp != NULL) {
421 1.98.8.2 ad #if 0
422 1.98.8.2 ad printf("Closed the open device: %s\n",
423 1.98.8.2 ad raidPtr->Disks[col].devname);
424 1.98.8.2 ad #endif
425 1.98.8.2 ad vp = raidPtr->raid_cinfo[col].ci_vp;
426 1.98.8.2 ad ac = raidPtr->Disks[col].auto_configured;
427 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
428 1.98.8.2 ad rf_close_component(raidPtr, vp, ac);
429 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
430 1.98.8.2 ad raidPtr->raid_cinfo[col].ci_vp = NULL;
431 1.98.8.2 ad }
432 1.98.8.2 ad /* note that this disk was *not* auto_configured (any longer)*/
433 1.98.8.2 ad raidPtr->Disks[col].auto_configured = 0;
434 1.98.8.2 ad
435 1.98.8.2 ad #if 0
436 1.98.8.2 ad printf("About to (re-)open the device for rebuilding: %s\n",
437 1.98.8.2 ad raidPtr->Disks[col].devname);
438 1.98.8.2 ad #endif
439 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
440 1.98.8.2 ad retcode = dk_lookup(raidPtr->Disks[col].devname, lwp, &vp, UIO_SYSSPACE);
441 1.98.8.2 ad
442 1.98.8.2 ad if (retcode) {
443 1.98.8.2 ad printf("raid%d: rebuilding: dk_lookup on device: %s failed: %d!\n",raidPtr->raidid,
444 1.98.8.2 ad raidPtr->Disks[col].devname, retcode);
445 1.98.8.2 ad
446 1.98.8.2 ad /* the component isn't responding properly...
447 1.98.8.2 ad must be still dead :-( */
448 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
449 1.98.8.2 ad raidPtr->reconInProgress--;
450 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
451 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
452 1.98.8.2 ad return(retcode);
453 1.98.8.2 ad }
454 1.98.8.2 ad
455 1.98.8.2 ad /* Ok, so we can at least do a lookup...
456 1.98.8.2 ad How about actually getting a vp for it? */
457 1.98.8.2 ad
458 1.98.8.2 ad if ((retcode = VOP_GETATTR(vp, &va, lwp->l_cred, lwp)) != 0) {
459 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
460 1.98.8.2 ad raidPtr->reconInProgress--;
461 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
462 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
463 1.98.8.2 ad return(retcode);
464 1.98.8.2 ad }
465 1.98.8.2 ad
466 1.98.8.2 ad retcode = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, lwp->l_cred, lwp);
467 1.98.8.2 ad if (retcode) {
468 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
469 1.98.8.2 ad raidPtr->reconInProgress--;
470 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
471 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
472 1.98.8.2 ad return(retcode);
473 1.98.8.2 ad }
474 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
475 1.98.8.2 ad raidPtr->Disks[col].blockSize = dpart.disklab->d_secsize;
476 1.98.8.2 ad
477 1.98.8.2 ad raidPtr->Disks[col].numBlocks = dpart.part->p_size -
478 1.98.8.2 ad rf_protectedSectors;
479 1.98.8.2 ad
480 1.98.8.2 ad raidPtr->raid_cinfo[col].ci_vp = vp;
481 1.98.8.2 ad raidPtr->raid_cinfo[col].ci_dev = va.va_rdev;
482 1.98.8.2 ad
483 1.98.8.2 ad raidPtr->Disks[col].dev = va.va_rdev;
484 1.98.8.2 ad
485 1.98.8.2 ad /* we allow the user to specify that only a fraction
486 1.98.8.2 ad of the disks should be used this is just for debug:
487 1.98.8.2 ad it speeds up * the parity scan */
488 1.98.8.2 ad raidPtr->Disks[col].numBlocks = raidPtr->Disks[col].numBlocks *
489 1.98.8.2 ad rf_sizePercentage / 100;
490 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
491 1.98.8.2 ad
492 1.98.8.2 ad spareDiskPtr = &raidPtr->Disks[col];
493 1.98.8.2 ad spareDiskPtr->status = rf_ds_used_spare;
494 1.98.8.2 ad
495 1.98.8.2 ad printf("raid%d: initiating in-place reconstruction on column %d\n",
496 1.98.8.2 ad raidPtr->raidid, col);
497 1.98.8.2 ad
498 1.98.8.2 ad reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr,
499 1.98.8.2 ad numDisksDone, col);
500 1.98.8.2 ad raidPtr->reconDesc = (void *) reconDesc;
501 1.98.8.2 ad #if RF_RECON_STATS > 0
502 1.98.8.2 ad reconDesc->hsStallCount = 0;
503 1.98.8.2 ad reconDesc->numReconExecDelays = 0;
504 1.98.8.2 ad reconDesc->numReconEventWaits = 0;
505 1.98.8.2 ad #endif /* RF_RECON_STATS > 0 */
506 1.98.8.2 ad reconDesc->reconExecTimerRunning = 0;
507 1.98.8.2 ad reconDesc->reconExecTicks = 0;
508 1.98.8.2 ad reconDesc->maxReconExecTicks = 0;
509 1.98.8.2 ad rc = rf_ContinueReconstructFailedDisk(reconDesc);
510 1.98.8.2 ad
511 1.98.8.2 ad if (!rc) {
512 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
513 1.98.8.2 ad /* Need to set these here, as at this point it'll be claiming
514 1.98.8.2 ad that the disk is in rf_ds_spared! But we know better :-) */
515 1.98.8.2 ad
516 1.98.8.2 ad raidPtr->Disks[col].status = rf_ds_optimal;
517 1.98.8.2 ad raidPtr->status = rf_rs_optimal;
518 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
519 1.98.8.2 ad
520 1.98.8.2 ad /* fix up the component label */
521 1.98.8.2 ad /* Don't actually need the read here.. */
522 1.98.8.2 ad raidread_component_label(raidPtr->raid_cinfo[col].ci_dev,
523 1.98.8.2 ad raidPtr->raid_cinfo[col].ci_vp,
524 1.98.8.2 ad &c_label);
525 1.98.8.2 ad
526 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
527 1.98.8.2 ad raid_init_component_label(raidPtr, &c_label);
528 1.98.8.2 ad
529 1.98.8.2 ad c_label.row = 0;
530 1.98.8.2 ad c_label.column = col;
531 1.98.8.2 ad
532 1.98.8.2 ad /* We've just done a rebuild based on all the other
533 1.98.8.2 ad disks, so at this point the parity is known to be
534 1.98.8.2 ad clean, even if it wasn't before. */
535 1.98.8.2 ad
536 1.98.8.2 ad /* XXX doesn't hold for RAID 6!!*/
537 1.98.8.2 ad
538 1.98.8.2 ad raidPtr->parity_good = RF_RAID_CLEAN;
539 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
540 1.98.8.2 ad
541 1.98.8.2 ad raidwrite_component_label(raidPtr->raid_cinfo[col].ci_dev,
542 1.98.8.2 ad raidPtr->raid_cinfo[col].ci_vp,
543 1.98.8.2 ad &c_label);
544 1.98.8.2 ad
545 1.98.8.2 ad } else {
546 1.98.8.2 ad /* Reconstruct-in-place failed. Disk goes back to
547 1.98.8.2 ad "failed" status, regardless of what it was before. */
548 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
549 1.98.8.2 ad raidPtr->Disks[col].status = rf_ds_failed;
550 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
551 1.98.8.2 ad }
552 1.98.8.2 ad
553 1.98.8.2 ad rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
554 1.98.8.2 ad
555 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
556 1.98.8.2 ad raidPtr->reconInProgress--;
557 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
558 1.98.8.2 ad
559 1.98.8.2 ad RF_SIGNAL_COND(raidPtr->waitForReconCond);
560 1.98.8.2 ad return (rc);
561 1.98.8.2 ad }
562 1.98.8.2 ad
563 1.98.8.2 ad
564 1.98.8.2 ad int
565 1.98.8.2 ad rf_ContinueReconstructFailedDisk(RF_RaidReconDesc_t *reconDesc)
566 1.98.8.2 ad {
567 1.98.8.2 ad RF_Raid_t *raidPtr = reconDesc->raidPtr;
568 1.98.8.2 ad RF_RowCol_t col = reconDesc->col;
569 1.98.8.2 ad RF_RowCol_t scol = reconDesc->scol;
570 1.98.8.2 ad RF_ReconMap_t *mapPtr;
571 1.98.8.2 ad RF_ReconCtrl_t *tmp_reconctrl;
572 1.98.8.2 ad RF_ReconEvent_t *event;
573 1.98.8.2 ad RF_CallbackDesc_t *p;
574 1.98.8.2 ad struct timeval etime, elpsd;
575 1.98.8.2 ad unsigned long xor_s, xor_resid_us;
576 1.98.8.2 ad int i, ds;
577 1.98.8.2 ad int status;
578 1.98.8.2 ad int recon_error, write_error;
579 1.98.8.2 ad
580 1.98.8.2 ad raidPtr->accumXorTimeUs = 0;
581 1.98.8.2 ad #if RF_ACC_TRACE > 0
582 1.98.8.2 ad /* create one trace record per physical disk */
583 1.98.8.2 ad RF_Malloc(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t), (RF_AccTraceEntry_t *));
584 1.98.8.2 ad #endif
585 1.98.8.2 ad
586 1.98.8.2 ad /* quiesce the array prior to starting recon. this is needed
587 1.98.8.2 ad * to assure no nasty interactions with pending user writes.
588 1.98.8.2 ad * We need to do this before we change the disk or row status. */
589 1.98.8.2 ad
590 1.98.8.2 ad Dprintf("RECON: begin request suspend\n");
591 1.98.8.2 ad rf_SuspendNewRequestsAndWait(raidPtr);
592 1.98.8.2 ad Dprintf("RECON: end request suspend\n");
593 1.98.8.2 ad
594 1.98.8.2 ad /* allocate our RF_ReconCTRL_t before we protect raidPtr->reconControl[row] */
595 1.98.8.2 ad tmp_reconctrl = rf_MakeReconControl(reconDesc, col, scol);
596 1.98.8.2 ad
597 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
598 1.98.8.2 ad
599 1.98.8.2 ad /* create the reconstruction control pointer and install it in
600 1.98.8.2 ad * the right slot */
601 1.98.8.2 ad raidPtr->reconControl = tmp_reconctrl;
602 1.98.8.2 ad mapPtr = raidPtr->reconControl->reconMap;
603 1.98.8.2 ad raidPtr->reconControl->numRUsTotal = mapPtr->totalRUs;
604 1.98.8.2 ad raidPtr->reconControl->numRUsComplete = 0;
605 1.98.8.2 ad raidPtr->status = rf_rs_reconstructing;
606 1.98.8.2 ad raidPtr->Disks[col].status = rf_ds_reconstructing;
607 1.98.8.2 ad raidPtr->Disks[col].spareCol = scol;
608 1.98.8.2 ad
609 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
610 1.98.8.2 ad
611 1.98.8.2 ad RF_GETTIME(raidPtr->reconControl->starttime);
612 1.98.8.2 ad
613 1.98.8.2 ad /* now start up the actual reconstruction: issue a read for
614 1.98.8.2 ad * each surviving disk */
615 1.98.8.2 ad
616 1.98.8.2 ad reconDesc->numDisksDone = 0;
617 1.98.8.2 ad for (i = 0; i < raidPtr->numCol; i++) {
618 1.98.8.2 ad if (i != col) {
619 1.98.8.2 ad /* find and issue the next I/O on the
620 1.98.8.2 ad * indicated disk */
621 1.98.8.2 ad if (IssueNextReadRequest(raidPtr, i)) {
622 1.98.8.2 ad Dprintf1("RECON: done issuing for c%d\n", i);
623 1.98.8.2 ad reconDesc->numDisksDone++;
624 1.98.8.2 ad }
625 1.98.8.2 ad }
626 1.98.8.2 ad }
627 1.98.8.2 ad
628 1.98.8.2 ad Dprintf("RECON: resume requests\n");
629 1.98.8.2 ad rf_ResumeNewRequests(raidPtr);
630 1.98.8.2 ad
631 1.98.8.2 ad /* process reconstruction events until all disks report that
632 1.98.8.2 ad * they've completed all work */
633 1.98.8.2 ad
634 1.98.8.2 ad mapPtr = raidPtr->reconControl->reconMap;
635 1.98.8.2 ad recon_error = 0;
636 1.98.8.2 ad write_error = 0;
637 1.98.8.2 ad
638 1.98.8.2 ad while (reconDesc->numDisksDone < raidPtr->numCol - 1) {
639 1.98.8.2 ad
640 1.98.8.2 ad event = rf_GetNextReconEvent(reconDesc);
641 1.98.8.2 ad status = ProcessReconEvent(raidPtr, event);
642 1.98.8.2 ad
643 1.98.8.2 ad /* the normal case is that a read completes, and all is well. */
644 1.98.8.2 ad if (status == RF_RECON_DONE_READS) {
645 1.98.8.2 ad reconDesc->numDisksDone++;
646 1.98.8.2 ad } else if ((status == RF_RECON_READ_ERROR) ||
647 1.98.8.2 ad (status == RF_RECON_WRITE_ERROR)) {
648 1.98.8.2 ad /* an error was encountered while reconstructing...
649 1.98.8.2 ad Pretend we've finished this disk.
650 1.98.8.2 ad */
651 1.98.8.2 ad recon_error = 1;
652 1.98.8.2 ad raidPtr->reconControl->error = 1;
653 1.98.8.2 ad
654 1.98.8.2 ad /* bump the numDisksDone count for reads,
655 1.98.8.2 ad but not for writes */
656 1.98.8.2 ad if (status == RF_RECON_READ_ERROR)
657 1.98.8.2 ad reconDesc->numDisksDone++;
658 1.98.8.2 ad
659 1.98.8.2 ad /* write errors are special -- when we are
660 1.98.8.2 ad done dealing with the reads that are
661 1.98.8.2 ad finished, we don't want to wait for any
662 1.98.8.2 ad writes */
663 1.98.8.2 ad if (status == RF_RECON_WRITE_ERROR)
664 1.98.8.2 ad write_error = 1;
665 1.98.8.2 ad
666 1.98.8.2 ad } else if (status == RF_RECON_READ_STOPPED) {
667 1.98.8.2 ad /* count this component as being "done" */
668 1.98.8.2 ad reconDesc->numDisksDone++;
669 1.98.8.2 ad }
670 1.98.8.2 ad
671 1.98.8.2 ad if (recon_error) {
672 1.98.8.2 ad
673 1.98.8.2 ad /* make sure any stragglers are woken up so that
674 1.98.8.2 ad their theads will complete, and we can get out
675 1.98.8.2 ad of here with all IO processed */
676 1.98.8.2 ad
677 1.98.8.2 ad while (raidPtr->reconControl->headSepCBList) {
678 1.98.8.2 ad p = raidPtr->reconControl->headSepCBList;
679 1.98.8.2 ad raidPtr->reconControl->headSepCBList = p->next;
680 1.98.8.2 ad p->next = NULL;
681 1.98.8.2 ad rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
682 1.98.8.2 ad rf_FreeCallbackDesc(p);
683 1.98.8.2 ad }
684 1.98.8.2 ad }
685 1.98.8.2 ad
686 1.98.8.2 ad raidPtr->reconControl->numRUsTotal =
687 1.98.8.2 ad mapPtr->totalRUs;
688 1.98.8.2 ad raidPtr->reconControl->numRUsComplete =
689 1.98.8.2 ad mapPtr->totalRUs -
690 1.98.8.2 ad rf_UnitsLeftToReconstruct(mapPtr);
691 1.98.8.2 ad
692 1.98.8.2 ad #if RF_DEBUG_RECON
693 1.98.8.2 ad raidPtr->reconControl->percentComplete =
694 1.98.8.2 ad (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
695 1.98.8.2 ad if (rf_prReconSched) {
696 1.98.8.2 ad rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
697 1.98.8.2 ad }
698 1.98.8.2 ad #endif
699 1.98.8.2 ad }
700 1.98.8.2 ad
701 1.98.8.2 ad mapPtr = raidPtr->reconControl->reconMap;
702 1.98.8.2 ad if (rf_reconDebug) {
703 1.98.8.2 ad printf("RECON: all reads completed\n");
704 1.98.8.2 ad }
705 1.98.8.2 ad /* at this point all the reads have completed. We now wait
706 1.98.8.2 ad * for any pending writes to complete, and then we're done */
707 1.98.8.2 ad
708 1.98.8.2 ad while (!recon_error && rf_UnitsLeftToReconstruct(raidPtr->reconControl->reconMap) > 0) {
709 1.98.8.2 ad
710 1.98.8.2 ad event = rf_GetNextReconEvent(reconDesc);
711 1.98.8.2 ad status = ProcessReconEvent(raidPtr, event);
712 1.98.8.2 ad
713 1.98.8.2 ad if (status == RF_RECON_WRITE_ERROR) {
714 1.98.8.2 ad recon_error = 1;
715 1.98.8.2 ad raidPtr->reconControl->error = 1;
716 1.98.8.2 ad /* an error was encountered at the very end... bail */
717 1.98.8.2 ad } else {
718 1.98.8.2 ad #if RF_DEBUG_RECON
719 1.98.8.2 ad raidPtr->reconControl->percentComplete = 100 - (rf_UnitsLeftToReconstruct(mapPtr) * 100 / mapPtr->totalRUs);
720 1.98.8.2 ad if (rf_prReconSched) {
721 1.98.8.2 ad rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
722 1.98.8.2 ad }
723 1.98.8.2 ad #endif
724 1.98.8.2 ad }
725 1.98.8.2 ad }
726 1.98.8.2 ad
727 1.98.8.2 ad if (recon_error) {
728 1.98.8.2 ad /* we've encountered an error in reconstructing. */
729 1.98.8.2 ad printf("raid%d: reconstruction failed.\n", raidPtr->raidid);
730 1.98.8.2 ad
731 1.98.8.2 ad /* we start by blocking IO to the RAID set. */
732 1.98.8.2 ad rf_SuspendNewRequestsAndWait(raidPtr);
733 1.98.8.2 ad
734 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
735 1.98.8.2 ad /* mark set as being degraded, rather than
736 1.98.8.2 ad rf_rs_reconstructing as we were before the problem.
737 1.98.8.2 ad After this is done we can update status of the
738 1.98.8.2 ad component disks without worrying about someone
739 1.98.8.2 ad trying to read from a failed component.
740 1.98.8.2 ad */
741 1.98.8.2 ad raidPtr->status = rf_rs_degraded;
742 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
743 1.98.8.2 ad
744 1.98.8.2 ad /* resume IO */
745 1.98.8.2 ad rf_ResumeNewRequests(raidPtr);
746 1.98.8.2 ad
747 1.98.8.2 ad /* At this point there are two cases:
748 1.98.8.2 ad 1) If we've experienced a read error, then we've
749 1.98.8.2 ad already waited for all the reads we're going to get,
750 1.98.8.2 ad and we just need to wait for the writes.
751 1.98.8.2 ad
752 1.98.8.2 ad 2) If we've experienced a write error, we've also
753 1.98.8.2 ad already waited for all the reads to complete,
754 1.98.8.2 ad but there is little point in waiting for the writes --
755 1.98.8.2 ad when they do complete, they will just be ignored.
756 1.98.8.2 ad
757 1.98.8.2 ad So we just wait for writes to complete if we didn't have a
758 1.98.8.2 ad write error.
759 1.98.8.2 ad */
760 1.98.8.2 ad
761 1.98.8.2 ad if (!write_error) {
762 1.98.8.2 ad /* wait for writes to complete */
763 1.98.8.2 ad while (raidPtr->reconControl->pending_writes > 0) {
764 1.98.8.2 ad
765 1.98.8.2 ad event = rf_GetNextReconEvent(reconDesc);
766 1.98.8.2 ad status = ProcessReconEvent(raidPtr, event);
767 1.98.8.2 ad
768 1.98.8.2 ad if (status == RF_RECON_WRITE_ERROR) {
769 1.98.8.2 ad raidPtr->reconControl->error = 1;
770 1.98.8.2 ad /* an error was encountered at the very end... bail.
771 1.98.8.2 ad This will be very bad news for the user, since
772 1.98.8.2 ad at this point there will have been a read error
773 1.98.8.2 ad on one component, and a write error on another!
774 1.98.8.2 ad */
775 1.98.8.2 ad break;
776 1.98.8.2 ad }
777 1.98.8.2 ad }
778 1.98.8.2 ad }
779 1.98.8.2 ad
780 1.98.8.2 ad
781 1.98.8.2 ad /* cleanup */
782 1.98.8.2 ad
783 1.98.8.2 ad /* drain the event queue - after waiting for the writes above,
784 1.98.8.2 ad there shouldn't be much (if anything!) left in the queue. */
785 1.98.8.2 ad
786 1.98.8.2 ad rf_DrainReconEventQueue(reconDesc);
787 1.98.8.2 ad
788 1.98.8.2 ad /* XXX As much as we'd like to free the recon control structure
789 1.98.8.2 ad and the reconDesc, we have no way of knowing if/when those will
790 1.98.8.2 ad be touched by IO that has yet to occur. It is rather poor to be
791 1.98.8.2 ad basically causing a 'memory leak' here, but there doesn't seem to be
792 1.98.8.2 ad a cleaner alternative at this time. Perhaps when the reconstruct code
793 1.98.8.2 ad gets a makeover this problem will go away.
794 1.98.8.2 ad */
795 1.98.8.2 ad #if 0
796 1.98.8.2 ad rf_FreeReconControl(raidPtr);
797 1.98.8.2 ad #endif
798 1.98.8.2 ad
799 1.98.8.2 ad #if RF_ACC_TRACE > 0
800 1.98.8.2 ad RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
801 1.98.8.2 ad #endif
802 1.98.8.2 ad /* XXX see comment above */
803 1.98.8.2 ad #if 0
804 1.98.8.2 ad FreeReconDesc(reconDesc);
805 1.98.8.2 ad #endif
806 1.98.8.2 ad
807 1.98.8.2 ad return (1);
808 1.98.8.2 ad }
809 1.98.8.2 ad
810 1.98.8.2 ad /* Success: mark the dead disk as reconstructed. We quiesce
811 1.98.8.2 ad * the array here to assure no nasty interactions with pending
812 1.98.8.2 ad * user accesses when we free up the psstatus structure as
813 1.98.8.2 ad * part of FreeReconControl() */
814 1.98.8.2 ad
815 1.98.8.2 ad rf_SuspendNewRequestsAndWait(raidPtr);
816 1.98.8.2 ad
817 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->mutex);
818 1.98.8.2 ad raidPtr->numFailures--;
819 1.98.8.2 ad ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
820 1.98.8.2 ad raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared;
821 1.98.8.2 ad raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal;
822 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->mutex);
823 1.98.8.2 ad RF_GETTIME(etime);
824 1.98.8.2 ad RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd);
825 1.98.8.2 ad
826 1.98.8.2 ad rf_ResumeNewRequests(raidPtr);
827 1.98.8.2 ad
828 1.98.8.2 ad printf("raid%d: Reconstruction of disk at col %d completed\n",
829 1.98.8.2 ad raidPtr->raidid, col);
830 1.98.8.2 ad xor_s = raidPtr->accumXorTimeUs / 1000000;
831 1.98.8.2 ad xor_resid_us = raidPtr->accumXorTimeUs % 1000000;
832 1.98.8.2 ad printf("raid%d: Recon time was %d.%06d seconds, accumulated XOR time was %ld us (%ld.%06ld)\n",
833 1.98.8.2 ad raidPtr->raidid,
834 1.98.8.2 ad (int) elpsd.tv_sec, (int) elpsd.tv_usec,
835 1.98.8.2 ad raidPtr->accumXorTimeUs, xor_s, xor_resid_us);
836 1.98.8.2 ad printf("raid%d: (start time %d sec %d usec, end time %d sec %d usec)\n",
837 1.98.8.2 ad raidPtr->raidid,
838 1.98.8.2 ad (int) raidPtr->reconControl->starttime.tv_sec,
839 1.98.8.2 ad (int) raidPtr->reconControl->starttime.tv_usec,
840 1.98.8.2 ad (int) etime.tv_sec, (int) etime.tv_usec);
841 1.98.8.2 ad #if RF_RECON_STATS > 0
842 1.98.8.2 ad printf("raid%d: Total head-sep stall count was %d\n",
843 1.98.8.2 ad raidPtr->raidid, (int) reconDesc->hsStallCount);
844 1.98.8.2 ad #endif /* RF_RECON_STATS > 0 */
845 1.98.8.2 ad rf_FreeReconControl(raidPtr);
846 1.98.8.2 ad #if RF_ACC_TRACE > 0
847 1.98.8.2 ad RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
848 1.98.8.2 ad #endif
849 1.98.8.2 ad FreeReconDesc(reconDesc);
850 1.98.8.2 ad
851 1.98.8.2 ad return (0);
852 1.98.8.2 ad
853 1.98.8.2 ad }
854 1.98.8.2 ad /*****************************************************************************
855 1.98.8.2 ad * do the right thing upon each reconstruction event.
856 1.98.8.2 ad *****************************************************************************/
857 1.98.8.2 ad static int
858 1.98.8.2 ad ProcessReconEvent(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
859 1.98.8.2 ad {
860 1.98.8.2 ad int retcode = 0, submitblocked;
861 1.98.8.2 ad RF_ReconBuffer_t *rbuf;
862 1.98.8.2 ad RF_SectorCount_t sectorsPerRU;
863 1.98.8.2 ad
864 1.98.8.2 ad retcode = RF_RECON_READ_STOPPED;
865 1.98.8.2 ad
866 1.98.8.2 ad Dprintf1("RECON: ProcessReconEvent type %d\n", event->type);
867 1.98.8.2 ad switch (event->type) {
868 1.98.8.2 ad
869 1.98.8.2 ad /* a read I/O has completed */
870 1.98.8.2 ad case RF_REVENT_READDONE:
871 1.98.8.2 ad rbuf = raidPtr->reconControl->perDiskInfo[event->col].rbuf;
872 1.98.8.2 ad Dprintf2("RECON: READDONE EVENT: col %d psid %ld\n",
873 1.98.8.2 ad event->col, rbuf->parityStripeID);
874 1.98.8.2 ad Dprintf7("RECON: done read psid %ld buf %lx %02x %02x %02x %02x %02x\n",
875 1.98.8.2 ad rbuf->parityStripeID, rbuf->buffer, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
876 1.98.8.2 ad rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
877 1.98.8.2 ad rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
878 1.98.8.2 ad if (!raidPtr->reconControl->error) {
879 1.98.8.2 ad submitblocked = rf_SubmitReconBuffer(rbuf, 0, 0);
880 1.98.8.2 ad Dprintf1("RECON: submitblocked=%d\n", submitblocked);
881 1.98.8.2 ad if (!submitblocked)
882 1.98.8.2 ad retcode = IssueNextReadRequest(raidPtr, event->col);
883 1.98.8.2 ad else
884 1.98.8.2 ad retcode = 0;
885 1.98.8.2 ad }
886 1.98.8.2 ad break;
887 1.98.8.2 ad
888 1.98.8.2 ad /* a write I/O has completed */
889 1.98.8.2 ad case RF_REVENT_WRITEDONE:
890 1.98.8.2 ad #if RF_DEBUG_RECON
891 1.98.8.2 ad if (rf_floatingRbufDebug) {
892 1.98.8.2 ad rf_CheckFloatingRbufCount(raidPtr, 1);
893 1.98.8.2 ad }
894 1.98.8.2 ad #endif
895 1.98.8.2 ad sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
896 1.98.8.2 ad rbuf = (RF_ReconBuffer_t *) event->arg;
897 1.98.8.2 ad rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
898 1.98.8.2 ad Dprintf3("RECON: WRITEDONE EVENT: psid %d ru %d (%d %% complete)\n",
899 1.98.8.2 ad rbuf->parityStripeID, rbuf->which_ru, raidPtr->reconControl->percentComplete);
900 1.98.8.2 ad rf_ReconMapUpdate(raidPtr, raidPtr->reconControl->reconMap,
901 1.98.8.2 ad rbuf->failedDiskSectorOffset, rbuf->failedDiskSectorOffset + sectorsPerRU - 1);
902 1.98.8.2 ad rf_RemoveFromActiveReconTable(raidPtr, rbuf->parityStripeID, rbuf->which_ru);
903 1.98.8.2 ad
904 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
905 1.98.8.2 ad raidPtr->reconControl->pending_writes--;
906 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
907 1.98.8.2 ad
908 1.98.8.2 ad if (rbuf->type == RF_RBUF_TYPE_FLOATING) {
909 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
910 1.98.8.2 ad while(raidPtr->reconControl->rb_lock) {
911 1.98.8.2 ad ltsleep(&raidPtr->reconControl->rb_lock, PRIBIO, "reconctrlpre1", 0,
912 1.98.8.2 ad &raidPtr->reconControl->rb_mutex);
913 1.98.8.2 ad }
914 1.98.8.2 ad raidPtr->reconControl->rb_lock = 1;
915 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
916 1.98.8.2 ad
917 1.98.8.2 ad raidPtr->numFullReconBuffers--;
918 1.98.8.2 ad rf_ReleaseFloatingReconBuffer(raidPtr, rbuf);
919 1.98.8.2 ad
920 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
921 1.98.8.2 ad raidPtr->reconControl->rb_lock = 0;
922 1.98.8.2 ad wakeup(&raidPtr->reconControl->rb_lock);
923 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
924 1.98.8.2 ad } else
925 1.98.8.2 ad if (rbuf->type == RF_RBUF_TYPE_FORCED)
926 1.98.8.2 ad rf_FreeReconBuffer(rbuf);
927 1.98.8.2 ad else
928 1.98.8.2 ad RF_ASSERT(0);
929 1.98.8.2 ad retcode = 0;
930 1.98.8.2 ad break;
931 1.98.8.2 ad
932 1.98.8.2 ad case RF_REVENT_BUFCLEAR: /* A buffer-stall condition has been
933 1.98.8.2 ad * cleared */
934 1.98.8.2 ad Dprintf1("RECON: BUFCLEAR EVENT: col %d\n", event->col);
935 1.98.8.2 ad if (!raidPtr->reconControl->error) {
936 1.98.8.2 ad submitblocked = rf_SubmitReconBuffer(raidPtr->reconControl->perDiskInfo[event->col].rbuf,
937 1.98.8.2 ad 0, (int) (long) event->arg);
938 1.98.8.2 ad RF_ASSERT(!submitblocked); /* we wouldn't have gotten the
939 1.98.8.2 ad * BUFCLEAR event if we
940 1.98.8.2 ad * couldn't submit */
941 1.98.8.2 ad retcode = IssueNextReadRequest(raidPtr, event->col);
942 1.98.8.2 ad }
943 1.98.8.2 ad break;
944 1.98.8.2 ad
945 1.98.8.2 ad case RF_REVENT_BLOCKCLEAR: /* A user-write reconstruction
946 1.98.8.2 ad * blockage has been cleared */
947 1.98.8.2 ad DDprintf1("RECON: BLOCKCLEAR EVENT: col %d\n", event->col);
948 1.98.8.2 ad if (!raidPtr->reconControl->error) {
949 1.98.8.2 ad retcode = TryToRead(raidPtr, event->col);
950 1.98.8.2 ad }
951 1.98.8.2 ad break;
952 1.98.8.2 ad
953 1.98.8.2 ad case RF_REVENT_HEADSEPCLEAR: /* A max-head-separation
954 1.98.8.2 ad * reconstruction blockage has been
955 1.98.8.2 ad * cleared */
956 1.98.8.2 ad Dprintf1("RECON: HEADSEPCLEAR EVENT: col %d\n", event->col);
957 1.98.8.2 ad if (!raidPtr->reconControl->error) {
958 1.98.8.2 ad retcode = TryToRead(raidPtr, event->col);
959 1.98.8.2 ad }
960 1.98.8.2 ad break;
961 1.98.8.2 ad
962 1.98.8.2 ad /* a buffer has become ready to write */
963 1.98.8.2 ad case RF_REVENT_BUFREADY:
964 1.98.8.2 ad Dprintf1("RECON: BUFREADY EVENT: col %d\n", event->col);
965 1.98.8.2 ad if (!raidPtr->reconControl->error) {
966 1.98.8.2 ad retcode = IssueNextWriteRequest(raidPtr);
967 1.98.8.2 ad #if RF_DEBUG_RECON
968 1.98.8.2 ad if (rf_floatingRbufDebug) {
969 1.98.8.2 ad rf_CheckFloatingRbufCount(raidPtr, 1);
970 1.98.8.2 ad }
971 1.98.8.2 ad #endif
972 1.98.8.2 ad }
973 1.98.8.2 ad break;
974 1.98.8.2 ad
975 1.98.8.2 ad /* we need to skip the current RU entirely because it got
976 1.98.8.2 ad * recon'd while we were waiting for something else to happen */
977 1.98.8.2 ad case RF_REVENT_SKIP:
978 1.98.8.2 ad DDprintf1("RECON: SKIP EVENT: col %d\n", event->col);
979 1.98.8.2 ad if (!raidPtr->reconControl->error) {
980 1.98.8.2 ad retcode = IssueNextReadRequest(raidPtr, event->col);
981 1.98.8.2 ad }
982 1.98.8.2 ad break;
983 1.98.8.2 ad
984 1.98.8.2 ad /* a forced-reconstruction read access has completed. Just
985 1.98.8.2 ad * submit the buffer */
986 1.98.8.2 ad case RF_REVENT_FORCEDREADDONE:
987 1.98.8.2 ad rbuf = (RF_ReconBuffer_t *) event->arg;
988 1.98.8.2 ad rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
989 1.98.8.2 ad DDprintf1("RECON: FORCEDREADDONE EVENT: col %d\n", event->col);
990 1.98.8.2 ad if (!raidPtr->reconControl->error) {
991 1.98.8.2 ad submitblocked = rf_SubmitReconBuffer(rbuf, 1, 0);
992 1.98.8.2 ad RF_ASSERT(!submitblocked);
993 1.98.8.2 ad }
994 1.98.8.2 ad break;
995 1.98.8.2 ad
996 1.98.8.2 ad /* A read I/O failed to complete */
997 1.98.8.2 ad case RF_REVENT_READ_FAILED:
998 1.98.8.2 ad retcode = RF_RECON_READ_ERROR;
999 1.98.8.2 ad break;
1000 1.98.8.2 ad
1001 1.98.8.2 ad /* A write I/O failed to complete */
1002 1.98.8.2 ad case RF_REVENT_WRITE_FAILED:
1003 1.98.8.2 ad retcode = RF_RECON_WRITE_ERROR;
1004 1.98.8.2 ad
1005 1.98.8.2 ad rbuf = (RF_ReconBuffer_t *) event->arg;
1006 1.98.8.2 ad
1007 1.98.8.2 ad /* cleanup the disk queue data */
1008 1.98.8.2 ad rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
1009 1.98.8.2 ad
1010 1.98.8.2 ad /* At this point we're erroring out, badly, and floatingRbufs
1011 1.98.8.2 ad may not even be valid. Rather than putting this back onto
1012 1.98.8.2 ad the floatingRbufs list, just arrange for its immediate
1013 1.98.8.2 ad destruction.
1014 1.98.8.2 ad */
1015 1.98.8.2 ad rf_FreeReconBuffer(rbuf);
1016 1.98.8.2 ad break;
1017 1.98.8.2 ad
1018 1.98.8.2 ad /* a forced read I/O failed to complete */
1019 1.98.8.2 ad case RF_REVENT_FORCEDREAD_FAILED:
1020 1.98.8.2 ad retcode = RF_RECON_READ_ERROR;
1021 1.98.8.2 ad break;
1022 1.98.8.2 ad
1023 1.98.8.2 ad default:
1024 1.98.8.2 ad RF_PANIC();
1025 1.98.8.2 ad }
1026 1.98.8.2 ad rf_FreeReconEventDesc(event);
1027 1.98.8.2 ad return (retcode);
1028 1.98.8.2 ad }
1029 1.98.8.2 ad /*****************************************************************************
1030 1.98.8.2 ad *
1031 1.98.8.2 ad * find the next thing that's needed on the indicated disk, and issue
1032 1.98.8.2 ad * a read request for it. We assume that the reconstruction buffer
1033 1.98.8.2 ad * associated with this process is free to receive the data. If
1034 1.98.8.2 ad * reconstruction is blocked on the indicated RU, we issue a
1035 1.98.8.2 ad * blockage-release request instead of a physical disk read request.
1036 1.98.8.2 ad * If the current disk gets too far ahead of the others, we issue a
1037 1.98.8.2 ad * head-separation wait request and return.
1038 1.98.8.2 ad *
1039 1.98.8.2 ad * ctrl->{ru_count, curPSID, diskOffset} and
1040 1.98.8.2 ad * rbuf->failedDiskSectorOffset are maintained to point to the unit
1041 1.98.8.2 ad * we're currently accessing. Note that this deviates from the
1042 1.98.8.2 ad * standard C idiom of having counters point to the next thing to be
1043 1.98.8.2 ad * accessed. This allows us to easily retry when we're blocked by
1044 1.98.8.2 ad * head separation or reconstruction-blockage events.
1045 1.98.8.2 ad *
1046 1.98.8.2 ad *****************************************************************************/
1047 1.98.8.2 ad static int
1048 1.98.8.2 ad IssueNextReadRequest(RF_Raid_t *raidPtr, RF_RowCol_t col)
1049 1.98.8.2 ad {
1050 1.98.8.2 ad RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1051 1.98.8.2 ad RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1052 1.98.8.2 ad RF_ReconBuffer_t *rbuf = ctrl->rbuf;
1053 1.98.8.2 ad RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
1054 1.98.8.2 ad RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1055 1.98.8.2 ad int do_new_check = 0, retcode = 0, status;
1056 1.98.8.2 ad
1057 1.98.8.2 ad /* if we are currently the slowest disk, mark that we have to do a new
1058 1.98.8.2 ad * check */
1059 1.98.8.2 ad if (ctrl->headSepCounter <= raidPtr->reconControl->minHeadSepCounter)
1060 1.98.8.2 ad do_new_check = 1;
1061 1.98.8.2 ad
1062 1.98.8.2 ad while (1) {
1063 1.98.8.2 ad
1064 1.98.8.2 ad ctrl->ru_count++;
1065 1.98.8.2 ad if (ctrl->ru_count < RUsPerPU) {
1066 1.98.8.2 ad ctrl->diskOffset += sectorsPerRU;
1067 1.98.8.2 ad rbuf->failedDiskSectorOffset += sectorsPerRU;
1068 1.98.8.2 ad } else {
1069 1.98.8.2 ad ctrl->curPSID++;
1070 1.98.8.2 ad ctrl->ru_count = 0;
1071 1.98.8.2 ad /* code left over from when head-sep was based on
1072 1.98.8.2 ad * parity stripe id */
1073 1.98.8.2 ad if (ctrl->curPSID >= raidPtr->reconControl->lastPSID) {
1074 1.98.8.2 ad CheckForNewMinHeadSep(raidPtr, ++(ctrl->headSepCounter));
1075 1.98.8.2 ad return (RF_RECON_DONE_READS); /* finito! */
1076 1.98.8.2 ad }
1077 1.98.8.2 ad /* find the disk offsets of the start of the parity
1078 1.98.8.2 ad * stripe on both the current disk and the failed
1079 1.98.8.2 ad * disk. skip this entire parity stripe if either disk
1080 1.98.8.2 ad * does not appear in the indicated PS */
1081 1.98.8.2 ad status = ComputePSDiskOffsets(raidPtr, ctrl->curPSID, col, &ctrl->diskOffset, &rbuf->failedDiskSectorOffset,
1082 1.98.8.2 ad &rbuf->spCol, &rbuf->spOffset);
1083 1.98.8.2 ad if (status) {
1084 1.98.8.2 ad ctrl->ru_count = RUsPerPU - 1;
1085 1.98.8.2 ad continue;
1086 1.98.8.2 ad }
1087 1.98.8.2 ad }
1088 1.98.8.2 ad rbuf->which_ru = ctrl->ru_count;
1089 1.98.8.2 ad
1090 1.98.8.2 ad /* skip this RU if it's already been reconstructed */
1091 1.98.8.2 ad if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, rbuf->failedDiskSectorOffset)) {
1092 1.98.8.2 ad Dprintf2("Skipping psid %ld ru %d: already reconstructed\n", ctrl->curPSID, ctrl->ru_count);
1093 1.98.8.2 ad continue;
1094 1.98.8.2 ad }
1095 1.98.8.2 ad break;
1096 1.98.8.2 ad }
1097 1.98.8.2 ad ctrl->headSepCounter++;
1098 1.98.8.2 ad if (do_new_check)
1099 1.98.8.2 ad CheckForNewMinHeadSep(raidPtr, ctrl->headSepCounter); /* update min if needed */
1100 1.98.8.2 ad
1101 1.98.8.2 ad
1102 1.98.8.2 ad /* at this point, we have definitely decided what to do, and we have
1103 1.98.8.2 ad * only to see if we can actually do it now */
1104 1.98.8.2 ad rbuf->parityStripeID = ctrl->curPSID;
1105 1.98.8.2 ad rbuf->which_ru = ctrl->ru_count;
1106 1.98.8.2 ad #if RF_ACC_TRACE > 0
1107 1.98.8.2 ad memset((char *) &raidPtr->recon_tracerecs[col], 0,
1108 1.98.8.2 ad sizeof(raidPtr->recon_tracerecs[col]));
1109 1.98.8.2 ad raidPtr->recon_tracerecs[col].reconacc = 1;
1110 1.98.8.2 ad RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1111 1.98.8.2 ad #endif
1112 1.98.8.2 ad retcode = TryToRead(raidPtr, col);
1113 1.98.8.2 ad return (retcode);
1114 1.98.8.2 ad }
1115 1.98.8.2 ad
1116 1.98.8.2 ad /*
1117 1.98.8.2 ad * tries to issue the next read on the indicated disk. We may be
1118 1.98.8.2 ad * blocked by (a) the heads being too far apart, or (b) recon on the
1119 1.98.8.2 ad * indicated RU being blocked due to a write by a user thread. In
1120 1.98.8.2 ad * this case, we issue a head-sep or blockage wait request, which will
1121 1.98.8.2 ad * cause this same routine to be invoked again later when the blockage
1122 1.98.8.2 ad * has cleared.
1123 1.98.8.2 ad */
1124 1.98.8.2 ad
1125 1.98.8.2 ad static int
1126 1.98.8.2 ad TryToRead(RF_Raid_t *raidPtr, RF_RowCol_t col)
1127 1.98.8.2 ad {
1128 1.98.8.2 ad RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
1129 1.98.8.2 ad RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
1130 1.98.8.2 ad RF_StripeNum_t psid = ctrl->curPSID;
1131 1.98.8.2 ad RF_ReconUnitNum_t which_ru = ctrl->ru_count;
1132 1.98.8.2 ad RF_DiskQueueData_t *req;
1133 1.98.8.2 ad int status;
1134 1.98.8.2 ad RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;
1135 1.98.8.2 ad
1136 1.98.8.2 ad /* if the current disk is too far ahead of the others, issue a
1137 1.98.8.2 ad * head-separation wait and return */
1138 1.98.8.2 ad if (CheckHeadSeparation(raidPtr, ctrl, col, ctrl->headSepCounter, which_ru))
1139 1.98.8.2 ad return (0);
1140 1.98.8.2 ad
1141 1.98.8.2 ad /* allocate a new PSS in case we need it */
1142 1.98.8.2 ad newpssPtr = rf_AllocPSStatus(raidPtr);
1143 1.98.8.2 ad
1144 1.98.8.2 ad RF_LOCK_PSS_MUTEX(raidPtr, psid);
1145 1.98.8.2 ad pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE, newpssPtr);
1146 1.98.8.2 ad
1147 1.98.8.2 ad if (pssPtr != newpssPtr) {
1148 1.98.8.2 ad rf_FreePSStatus(raidPtr, newpssPtr);
1149 1.98.8.2 ad }
1150 1.98.8.2 ad
1151 1.98.8.2 ad /* if recon is blocked on the indicated parity stripe, issue a
1152 1.98.8.2 ad * block-wait request and return. this also must mark the indicated RU
1153 1.98.8.2 ad * in the stripe as under reconstruction if not blocked. */
1154 1.98.8.2 ad status = CheckForcedOrBlockedReconstruction(raidPtr, pssPtr, ctrl, col, psid, which_ru);
1155 1.98.8.2 ad if (status == RF_PSS_RECON_BLOCKED) {
1156 1.98.8.2 ad Dprintf2("RECON: Stalling psid %ld ru %d: recon blocked\n", psid, which_ru);
1157 1.98.8.2 ad goto out;
1158 1.98.8.2 ad } else
1159 1.98.8.2 ad if (status == RF_PSS_FORCED_ON_WRITE) {
1160 1.98.8.2 ad rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1161 1.98.8.2 ad goto out;
1162 1.98.8.2 ad }
1163 1.98.8.2 ad /* make one last check to be sure that the indicated RU didn't get
1164 1.98.8.2 ad * reconstructed while we were waiting for something else to happen.
1165 1.98.8.2 ad * This is unfortunate in that it causes us to make this check twice
1166 1.98.8.2 ad * in the normal case. Might want to make some attempt to re-work
1167 1.98.8.2 ad * this so that we only do this check if we've definitely blocked on
1168 1.98.8.2 ad * one of the above checks. When this condition is detected, we may
1169 1.98.8.2 ad * have just created a bogus status entry, which we need to delete. */
1170 1.98.8.2 ad if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, ctrl->rbuf->failedDiskSectorOffset)) {
1171 1.98.8.2 ad Dprintf2("RECON: Skipping psid %ld ru %d: prior recon after stall\n", psid, which_ru);
1172 1.98.8.2 ad if (pssPtr == newpssPtr)
1173 1.98.8.2 ad rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1174 1.98.8.2 ad rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
1175 1.98.8.2 ad goto out;
1176 1.98.8.2 ad }
1177 1.98.8.2 ad /* found something to read. issue the I/O */
1178 1.98.8.2 ad Dprintf4("RECON: Read for psid %ld on col %d offset %ld buf %lx\n",
1179 1.98.8.2 ad psid, col, ctrl->diskOffset, ctrl->rbuf->buffer);
1180 1.98.8.2 ad #if RF_ACC_TRACE > 0
1181 1.98.8.2 ad RF_ETIMER_STOP(raidPtr->recon_tracerecs[col].recon_timer);
1182 1.98.8.2 ad RF_ETIMER_EVAL(raidPtr->recon_tracerecs[col].recon_timer);
1183 1.98.8.2 ad raidPtr->recon_tracerecs[col].specific.recon.recon_start_to_fetch_us =
1184 1.98.8.2 ad RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[col].recon_timer);
1185 1.98.8.2 ad RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
1186 1.98.8.2 ad #endif
1187 1.98.8.2 ad /* should be ok to use a NULL proc pointer here, all the bufs we use
1188 1.98.8.2 ad * should be in kernel space */
1189 1.98.8.2 ad req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, ctrl->diskOffset, sectorsPerRU, ctrl->rbuf->buffer, psid, which_ru,
1190 1.98.8.2 ad ReconReadDoneProc, (void *) ctrl,
1191 1.98.8.2 ad #if RF_ACC_TRACE > 0
1192 1.98.8.2 ad &raidPtr->recon_tracerecs[col],
1193 1.98.8.2 ad #else
1194 1.98.8.2 ad NULL,
1195 1.98.8.2 ad #endif
1196 1.98.8.2 ad (void *) raidPtr, 0, NULL, PR_WAITOK);
1197 1.98.8.2 ad
1198 1.98.8.2 ad ctrl->rbuf->arg = (void *) req;
1199 1.98.8.2 ad rf_DiskIOEnqueue(&raidPtr->Queues[col], req, RF_IO_RECON_PRIORITY);
1200 1.98.8.2 ad pssPtr->issued[col] = 1;
1201 1.98.8.2 ad
1202 1.98.8.2 ad out:
1203 1.98.8.2 ad RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1204 1.98.8.2 ad return (0);
1205 1.98.8.2 ad }
1206 1.98.8.2 ad
1207 1.98.8.2 ad
1208 1.98.8.2 ad /*
1209 1.98.8.2 ad * given a parity stripe ID, we want to find out whether both the
1210 1.98.8.2 ad * current disk and the failed disk exist in that parity stripe. If
1211 1.98.8.2 ad * not, we want to skip this whole PS. If so, we want to find the
1212 1.98.8.2 ad * disk offset of the start of the PS on both the current disk and the
1213 1.98.8.2 ad * failed disk.
1214 1.98.8.2 ad *
1215 1.98.8.2 ad * this works by getting a list of disks comprising the indicated
1216 1.98.8.2 ad * parity stripe, and searching the list for the current and failed
1217 1.98.8.2 ad * disks. Once we've decided they both exist in the parity stripe, we
1218 1.98.8.2 ad * need to decide whether each is data or parity, so that we'll know
1219 1.98.8.2 ad * which mapping function to call to get the corresponding disk
1220 1.98.8.2 ad * offsets.
1221 1.98.8.2 ad *
1222 1.98.8.2 ad * this is kind of unpleasant, but doing it this way allows the
1223 1.98.8.2 ad * reconstruction code to use parity stripe IDs rather than physical
1224 1.98.8.2 ad * disks address to march through the failed disk, which greatly
1225 1.98.8.2 ad * simplifies a lot of code, as well as eliminating the need for a
1226 1.98.8.2 ad * reverse-mapping function. I also think it will execute faster,
1227 1.98.8.2 ad * since the calls to the mapping module are kept to a minimum.
1228 1.98.8.2 ad *
1229 1.98.8.2 ad * ASSUMES THAT THE STRIPE IDENTIFIER IDENTIFIES THE DISKS COMPRISING
1230 1.98.8.2 ad * THE STRIPE IN THE CORRECT ORDER
1231 1.98.8.2 ad *
1232 1.98.8.2 ad * raidPtr - raid descriptor
1233 1.98.8.2 ad * psid - parity stripe identifier
1234 1.98.8.2 ad * col - column of disk to find the offsets for
1235 1.98.8.2 ad * spCol - out: col of spare unit for failed unit
1236 1.98.8.2 ad * spOffset - out: offset into disk containing spare unit
1237 1.98.8.2 ad *
1238 1.98.8.2 ad */
1239 1.98.8.2 ad
1240 1.98.8.2 ad
1241 1.98.8.2 ad static int
1242 1.98.8.2 ad ComputePSDiskOffsets(RF_Raid_t *raidPtr, RF_StripeNum_t psid,
1243 1.98.8.2 ad RF_RowCol_t col, RF_SectorNum_t *outDiskOffset,
1244 1.98.8.2 ad RF_SectorNum_t *outFailedDiskSectorOffset,
1245 1.98.8.2 ad RF_RowCol_t *spCol, RF_SectorNum_t *spOffset)
1246 1.98.8.2 ad {
1247 1.98.8.2 ad RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1248 1.98.8.2 ad RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1249 1.98.8.2 ad RF_RaidAddr_t sosRaidAddress; /* start-of-stripe */
1250 1.98.8.2 ad RF_RowCol_t *diskids;
1251 1.98.8.2 ad u_int i, j, k, i_offset, j_offset;
1252 1.98.8.2 ad RF_RowCol_t pcol;
1253 1.98.8.2 ad int testcol;
1254 1.98.8.2 ad RF_SectorNum_t poffset;
1255 1.98.8.2 ad char i_is_parity = 0, j_is_parity = 0;
1256 1.98.8.2 ad RF_RowCol_t stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
1257 1.98.8.2 ad
1258 1.98.8.2 ad /* get a listing of the disks comprising that stripe */
1259 1.98.8.2 ad sosRaidAddress = rf_ParityStripeIDToRaidAddress(layoutPtr, psid);
1260 1.98.8.2 ad (layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids);
1261 1.98.8.2 ad RF_ASSERT(diskids);
1262 1.98.8.2 ad
1263 1.98.8.2 ad /* reject this entire parity stripe if it does not contain the
1264 1.98.8.2 ad * indicated disk or it does not contain the failed disk */
1265 1.98.8.2 ad
1266 1.98.8.2 ad for (i = 0; i < stripeWidth; i++) {
1267 1.98.8.2 ad if (col == diskids[i])
1268 1.98.8.2 ad break;
1269 1.98.8.2 ad }
1270 1.98.8.2 ad if (i == stripeWidth)
1271 1.98.8.2 ad goto skipit;
1272 1.98.8.2 ad for (j = 0; j < stripeWidth; j++) {
1273 1.98.8.2 ad if (fcol == diskids[j])
1274 1.98.8.2 ad break;
1275 1.98.8.2 ad }
1276 1.98.8.2 ad if (j == stripeWidth) {
1277 1.98.8.2 ad goto skipit;
1278 1.98.8.2 ad }
1279 1.98.8.2 ad /* find out which disk the parity is on */
1280 1.98.8.2 ad (layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &pcol, &poffset, RF_DONT_REMAP);
1281 1.98.8.2 ad
1282 1.98.8.2 ad /* find out if either the current RU or the failed RU is parity */
1283 1.98.8.2 ad /* also, if the parity occurs in this stripe prior to the data and/or
1284 1.98.8.2 ad * failed col, we need to decrement i and/or j */
1285 1.98.8.2 ad for (k = 0; k < stripeWidth; k++)
1286 1.98.8.2 ad if (diskids[k] == pcol)
1287 1.98.8.2 ad break;
1288 1.98.8.2 ad RF_ASSERT(k < stripeWidth);
1289 1.98.8.2 ad i_offset = i;
1290 1.98.8.2 ad j_offset = j;
1291 1.98.8.2 ad if (k < i)
1292 1.98.8.2 ad i_offset--;
1293 1.98.8.2 ad else
1294 1.98.8.2 ad if (k == i) {
1295 1.98.8.2 ad i_is_parity = 1;
1296 1.98.8.2 ad i_offset = 0;
1297 1.98.8.2 ad } /* set offsets to zero to disable multiply
1298 1.98.8.2 ad * below */
1299 1.98.8.2 ad if (k < j)
1300 1.98.8.2 ad j_offset--;
1301 1.98.8.2 ad else
1302 1.98.8.2 ad if (k == j) {
1303 1.98.8.2 ad j_is_parity = 1;
1304 1.98.8.2 ad j_offset = 0;
1305 1.98.8.2 ad }
1306 1.98.8.2 ad /* at this point, [ij]_is_parity tells us whether the [current,failed]
1307 1.98.8.2 ad * disk is parity at the start of this RU, and, if data, "[ij]_offset"
1308 1.98.8.2 ad * tells us how far into the stripe the [current,failed] disk is. */
1309 1.98.8.2 ad
1310 1.98.8.2 ad /* call the mapping routine to get the offset into the current disk,
1311 1.98.8.2 ad * repeat for failed disk. */
1312 1.98.8.2 ad if (i_is_parity)
1313 1.98.8.2 ad layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1314 1.98.8.2 ad else
1315 1.98.8.2 ad layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
1316 1.98.8.2 ad
1317 1.98.8.2 ad RF_ASSERT(col == testcol);
1318 1.98.8.2 ad
1319 1.98.8.2 ad if (j_is_parity)
1320 1.98.8.2 ad layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1321 1.98.8.2 ad else
1322 1.98.8.2 ad layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
1323 1.98.8.2 ad RF_ASSERT(fcol == testcol);
1324 1.98.8.2 ad
1325 1.98.8.2 ad /* now locate the spare unit for the failed unit */
1326 1.98.8.2 ad #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1327 1.98.8.2 ad if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
1328 1.98.8.2 ad if (j_is_parity)
1329 1.98.8.2 ad layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1330 1.98.8.2 ad else
1331 1.98.8.2 ad layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
1332 1.98.8.2 ad } else {
1333 1.98.8.2 ad #endif
1334 1.98.8.2 ad *spCol = raidPtr->reconControl->spareCol;
1335 1.98.8.2 ad *spOffset = *outFailedDiskSectorOffset;
1336 1.98.8.2 ad #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
1337 1.98.8.2 ad }
1338 1.98.8.2 ad #endif
1339 1.98.8.2 ad return (0);
1340 1.98.8.2 ad
1341 1.98.8.2 ad skipit:
1342 1.98.8.2 ad Dprintf2("RECON: Skipping psid %ld: nothing needed from r%d c%d\n",
1343 1.98.8.2 ad psid, col);
1344 1.98.8.2 ad return (1);
1345 1.98.8.2 ad }
1346 1.98.8.2 ad /* this is called when a buffer has become ready to write to the replacement disk */
1347 1.98.8.2 ad static int
1348 1.98.8.2 ad IssueNextWriteRequest(RF_Raid_t *raidPtr)
1349 1.98.8.2 ad {
1350 1.98.8.2 ad RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
1351 1.98.8.2 ad RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
1352 1.98.8.2 ad #if RF_ACC_TRACE > 0
1353 1.98.8.2 ad RF_RowCol_t fcol = raidPtr->reconControl->fcol;
1354 1.98.8.2 ad #endif
1355 1.98.8.2 ad RF_ReconBuffer_t *rbuf;
1356 1.98.8.2 ad RF_DiskQueueData_t *req;
1357 1.98.8.2 ad
1358 1.98.8.2 ad rbuf = rf_GetFullReconBuffer(raidPtr->reconControl);
1359 1.98.8.2 ad RF_ASSERT(rbuf); /* there must be one available, or we wouldn't
1360 1.98.8.2 ad * have gotten the event that sent us here */
1361 1.98.8.2 ad RF_ASSERT(rbuf->pssPtr);
1362 1.98.8.2 ad
1363 1.98.8.2 ad rbuf->pssPtr->writeRbuf = rbuf;
1364 1.98.8.2 ad rbuf->pssPtr = NULL;
1365 1.98.8.2 ad
1366 1.98.8.2 ad Dprintf6("RECON: New write (c %d offs %d) for psid %ld ru %d (failed disk offset %ld) buf %lx\n",
1367 1.98.8.2 ad rbuf->spCol, rbuf->spOffset, rbuf->parityStripeID,
1368 1.98.8.2 ad rbuf->which_ru, rbuf->failedDiskSectorOffset, rbuf->buffer);
1369 1.98.8.2 ad Dprintf6("RECON: new write psid %ld %02x %02x %02x %02x %02x\n",
1370 1.98.8.2 ad rbuf->parityStripeID, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
1371 1.98.8.2 ad rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
1372 1.98.8.2 ad
1373 1.98.8.2 ad /* should be ok to use a NULL b_proc here b/c all addrs should be in
1374 1.98.8.2 ad * kernel space */
1375 1.98.8.2 ad req = rf_CreateDiskQueueData(RF_IO_TYPE_WRITE, rbuf->spOffset,
1376 1.98.8.2 ad sectorsPerRU, rbuf->buffer,
1377 1.98.8.2 ad rbuf->parityStripeID, rbuf->which_ru,
1378 1.98.8.2 ad ReconWriteDoneProc, (void *) rbuf,
1379 1.98.8.2 ad #if RF_ACC_TRACE > 0
1380 1.98.8.2 ad &raidPtr->recon_tracerecs[fcol],
1381 1.98.8.2 ad #else
1382 1.98.8.2 ad NULL,
1383 1.98.8.2 ad #endif
1384 1.98.8.2 ad (void *) raidPtr, 0, NULL, PR_WAITOK);
1385 1.98.8.2 ad
1386 1.98.8.2 ad rbuf->arg = (void *) req;
1387 1.98.8.2 ad RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
1388 1.98.8.2 ad raidPtr->reconControl->pending_writes++;
1389 1.98.8.2 ad RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
1390 1.98.8.2 ad rf_DiskIOEnqueue(&raidPtr->Queues[rbuf->spCol], req, RF_IO_RECON_PRIORITY);
1391 1.98.8.2 ad
1392 1.98.8.2 ad return (0);
1393 1.98.8.2 ad }
1394 1.98.8.2 ad
1395 1.98.8.2 ad /*
1396 1.98.8.2 ad * this gets called upon the completion of a reconstruction read
1397 1.98.8.2 ad * operation the arg is a pointer to the per-disk reconstruction
1398 1.98.8.2 ad * control structure for the process that just finished a read.
1399 1.98.8.2 ad *
1400 1.98.8.2 ad * called at interrupt context in the kernel, so don't do anything
1401 1.98.8.2 ad * illegal here.
1402 1.98.8.2 ad */
1403 1.98.8.2 ad static int
1404 1.98.8.2 ad ReconReadDoneProc(void *arg, int status)
1405 1.98.8.2 ad {
1406 1.98.8.2 ad RF_PerDiskReconCtrl_t *ctrl = (RF_PerDiskReconCtrl_t *) arg;
1407 1.98.8.2 ad RF_Raid_t *raidPtr;
1408 1.98.8.2 ad
1409 1.98.8.2 ad /* Detect that reconCtrl is no longer valid, and if that
1410 1.98.8.2 ad is the case, bail without calling rf_CauseReconEvent().
1411 1.98.8.2 ad There won't be anyone listening for this event anyway */
1412 1.98.8.2 ad
1413 1.98.8.2 ad if (ctrl->reconCtrl == NULL)
1414 1.98.8.2 ad return(0);
1415 1.98.8.2 ad
1416 1.98.8.2 ad raidPtr = ctrl->reconCtrl->reconDesc->raidPtr;
1417 1.98.8.2 ad
1418 1.98.8.2 ad if (status) {
1419 1.98.8.2 ad printf("raid%d: Recon read failed!\n", raidPtr->raidid);
1420 1.98.8.2 ad rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READ_FAILED);
1421 1.98.8.2 ad return(0);
1422 1.98.8.2 ad }
1423 1.98.8.2 ad #if RF_ACC_TRACE > 0
1424 1.98.8.2 ad RF_ETIMER_STOP(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1425 1.98.8.2 ad RF_ETIMER_EVAL(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1426 1.98.8.2 ad raidPtr->recon_tracerecs[ctrl->col].specific.recon.recon_fetch_to_return_us =
1427 1.98.8.2 ad RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1428 1.98.8.2 ad RF_ETIMER_START(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
1429 1.98.8.2 ad #endif
1430 1.98.8.2 ad rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READDONE);
1431 1.98.8.2 ad return (0);
1432 1.98.8.2 ad }
1433 1.98.8.2 ad /* this gets called upon the completion of a reconstruction write operation.
1434 1.98.8.2 ad * the arg is a pointer to the rbuf that was just written
1435 1.98.8.2 ad *
1436 1.98.8.2 ad * called at interrupt context in the kernel, so don't do anything illegal here.
1437 1.98.8.2 ad */
1438 1.98.8.2 ad static int
1439 1.98.8.2 ad ReconWriteDoneProc(void *arg, int status)
1440 1.98.8.2 ad {
1441 1.98.8.2 ad RF_ReconBuffer_t *rbuf = (RF_ReconBuffer_t *) arg;
1442 1.98.8.2 ad
1443 1.98.8.2 ad /* Detect that reconControl is no longer valid, and if that
1444 1.98.8.2 ad is the case, bail without calling rf_CauseReconEvent().
1445 1.98.8.2 ad There won't be anyone listening for this event anyway */
1446 1.98.8.2 ad
1447 1.98.8.2 ad if (rbuf->raidPtr->reconControl == NULL)
1448 1.98.8.2 ad return(0);
1449 1.98.8.2 ad
1450 1.98.8.2 ad Dprintf2("Reconstruction completed on psid %ld ru %d\n", rbuf->parityStripeID, rbuf->which_ru);
1451 1.98.8.2 ad if (status) {
1452 1.98.8.2 ad printf("raid%d: Recon write failed!\n", rbuf->raidPtr->raidid);
1453 1.98.8.2 ad rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITE_FAILED);
1454 1.98.8.2 ad return(0);
1455 1.98.8.2 ad }
1456 1.98.8.2 ad rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITEDONE);
1457 1.98.8.2 ad return (0);
1458 1.98.8.2 ad }
1459 1.98.8.2 ad
1460 1.98.8.2 ad
1461 1.98.8.2 ad /*
1462 1.98.8.2 ad * computes a new minimum head sep, and wakes up anyone who needs to
1463 1.98.8.2 ad * be woken as a result
1464 1.98.8.2 ad */
1465 1.98.8.2 ad static void
1466 1.98.8.2 ad CheckForNewMinHeadSep(RF_Raid_t *raidPtr, RF_HeadSepLimit_t hsCtr)
1467 1.98.8.2 ad {
1468 1.98.8.2 ad RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1469 1.98.8.2 ad RF_HeadSepLimit_t new_min;
1470 1.98.8.2 ad RF_RowCol_t i;
1471 1.98.8.2 ad RF_CallbackDesc_t *p;
1472 1.98.8.2 ad RF_ASSERT(hsCtr >= reconCtrlPtr->minHeadSepCounter); /* from the definition
1473 1.98.8.2 ad * of a minimum */
1474 1.98.8.2 ad
1475 1.98.8.2 ad
1476 1.98.8.2 ad RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
1477 1.98.8.2 ad while(reconCtrlPtr->rb_lock) {
1478 1.98.8.2 ad ltsleep(&reconCtrlPtr->rb_lock, PRIBIO, "reconctlcnmhs", 0, &reconCtrlPtr->rb_mutex);
1479 1.98.8.2 ad }
1480 1.98.8.2 ad reconCtrlPtr->rb_lock = 1;
1481 1.98.8.2 ad RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
1482 1.98.8.2 ad
1483 1.98.8.2 ad new_min = ~(1L << (8 * sizeof(long) - 1)); /* 0x7FFF....FFF */
1484 1.98.8.2 ad for (i = 0; i < raidPtr->numCol; i++)
1485 1.98.8.2 ad if (i != reconCtrlPtr->fcol) {
1486 1.98.8.2 ad if (reconCtrlPtr->perDiskInfo[i].headSepCounter < new_min)
1487 1.98.8.2 ad new_min = reconCtrlPtr->perDiskInfo[i].headSepCounter;
1488 1.98.8.2 ad }
1489 1.98.8.2 ad /* set the new minimum and wake up anyone who can now run again */
1490 1.98.8.2 ad if (new_min != reconCtrlPtr->minHeadSepCounter) {
1491 1.98.8.2 ad reconCtrlPtr->minHeadSepCounter = new_min;
1492 1.98.8.2 ad Dprintf1("RECON: new min head pos counter val is %ld\n", new_min);
1493 1.98.8.2 ad while (reconCtrlPtr->headSepCBList) {
1494 1.98.8.2 ad if (reconCtrlPtr->headSepCBList->callbackArg.v > new_min)
1495 1.98.8.2 ad break;
1496 1.98.8.2 ad p = reconCtrlPtr->headSepCBList;
1497 1.98.8.2 ad reconCtrlPtr->headSepCBList = p->next;
1498 1.98.8.2 ad p->next = NULL;
1499 1.98.8.2 ad rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
1500 1.98.8.2 ad rf_FreeCallbackDesc(p);
1501 1.98.8.2 ad }
1502 1.98.8.2 ad
1503 1.98.8.2 ad }
1504 1.98.8.2 ad RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
1505 1.98.8.2 ad reconCtrlPtr->rb_lock = 0;
1506 1.98.8.2 ad wakeup(&reconCtrlPtr->rb_lock);
1507 1.98.8.2 ad RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
1508 1.98.8.2 ad }
1509 1.98.8.2 ad
1510 1.98.8.2 ad /*
1511 1.98.8.2 ad * checks to see that the maximum head separation will not be violated
1512 1.98.8.2 ad * if we initiate a reconstruction I/O on the indicated disk.
1513 1.98.8.2 ad * Limiting the maximum head separation between two disks eliminates
1514 1.98.8.2 ad * the nasty buffer-stall conditions that occur when one disk races
1515 1.98.8.2 ad * ahead of the others and consumes all of the floating recon buffers.
1516 1.98.8.2 ad * This code is complex and unpleasant but it's necessary to avoid
1517 1.98.8.2 ad * some very nasty, albeit fairly rare, reconstruction behavior.
1518 1.98.8.2 ad *
1519 1.98.8.2 ad * returns non-zero if and only if we have to stop working on the
1520 1.98.8.2 ad * indicated disk due to a head-separation delay.
1521 1.98.8.2 ad */
1522 1.98.8.2 ad static int
1523 1.98.8.2 ad CheckHeadSeparation(RF_Raid_t *raidPtr, RF_PerDiskReconCtrl_t *ctrl,
1524 1.98.8.2 ad RF_RowCol_t col, RF_HeadSepLimit_t hsCtr,
1525 1.98.8.2 ad RF_ReconUnitNum_t which_ru)
1526 1.98.8.2 ad {
1527 1.98.8.2 ad RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
1528 1.98.8.2 ad RF_CallbackDesc_t *cb, *p, *pt;
1529 1.98.8.2 ad int retval = 0;
1530 1.98.8.2 ad
1531 1.98.8.2 ad /* if we're too far ahead of the slowest disk, stop working on this
1532 1.98.8.2 ad * disk until the slower ones catch up. We do this by scheduling a
1533 1.98.8.2 ad * wakeup callback for the time when the slowest disk has caught up.
1534 1.98.8.2 ad * We define "caught up" with 20% hysteresis, i.e. the head separation
1535 1.98.8.2 ad * must have fallen to at most 80% of the max allowable head
1536 1.98.8.2 ad * separation before we'll wake up.
1537 1.98.8.2 ad *
1538 1.98.8.2 ad */
1539 1.98.8.2 ad RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
1540 1.98.8.2 ad while(reconCtrlPtr->rb_lock) {
1541 1.98.8.2 ad ltsleep(&reconCtrlPtr->rb_lock, PRIBIO, "reconctlchs", 0, &reconCtrlPtr->rb_mutex);
1542 1.98.8.2 ad }
1543 1.98.8.2 ad reconCtrlPtr->rb_lock = 1;
1544 1.98.8.2 ad RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
1545 1.98.8.2 ad if ((raidPtr->headSepLimit >= 0) &&
1546 1.98.8.2 ad ((ctrl->headSepCounter - reconCtrlPtr->minHeadSepCounter) > raidPtr->headSepLimit)) {
1547 1.98.8.2 ad Dprintf5("raid%d: RECON: head sep stall: col %d hsCtr %ld minHSCtr %ld limit %ld\n",
1548 1.98.8.2 ad raidPtr->raidid, col, ctrl->headSepCounter,
1549 1.98.8.2 ad reconCtrlPtr->minHeadSepCounter,
1550 1.98.8.2 ad raidPtr->headSepLimit);
1551 1.98.8.2 ad cb = rf_AllocCallbackDesc();
1552 1.98.8.2 ad /* the minHeadSepCounter value we have to get to before we'll
1553 1.98.8.2 ad * wake up. build in 20% hysteresis. */
1554 1.98.8.2 ad cb->callbackArg.v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
1555 1.98.8.2 ad cb->col = col;
1556 1.98.8.2 ad cb->next = NULL;
1557 1.98.8.2 ad
1558 1.98.8.2 ad /* insert this callback descriptor into the sorted list of
1559 1.98.8.2 ad * pending head-sep callbacks */
1560 1.98.8.2 ad p = reconCtrlPtr->headSepCBList;
1561 1.98.8.2 ad if (!p)
1562 1.98.8.2 ad reconCtrlPtr->headSepCBList = cb;
1563 1.98.8.2 ad else
1564 1.98.8.2 ad if (cb->callbackArg.v < p->callbackArg.v) {
1565 1.98.8.2 ad cb->next = reconCtrlPtr->headSepCBList;
1566 1.98.8.2 ad reconCtrlPtr->headSepCBList = cb;
1567 1.98.8.2 ad } else {
1568 1.98.8.2 ad for (pt = p, p = p->next; p && (p->callbackArg.v < cb->callbackArg.v); pt = p, p = p->next);
1569 1.98.8.2 ad cb->next = p;
1570 1.98.8.2 ad pt->next = cb;
1571 1.98.8.2 ad }
1572 1.98.8.2 ad retval = 1;
1573 1.98.8.2 ad #if RF_RECON_STATS > 0
1574 1.98.8.2 ad ctrl->reconCtrl->reconDesc->hsStallCount++;
1575 1.98.8.2 ad #endif /* RF_RECON_STATS > 0 */
1576 1.98.8.2 ad }
1577 1.98.8.2 ad RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
1578 1.98.8.2 ad reconCtrlPtr->rb_lock = 0;
1579 1.98.8.2 ad wakeup(&reconCtrlPtr->rb_lock);
1580 1.98.8.2 ad RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
1581 1.98.8.2 ad
1582 1.98.8.2 ad return (retval);
1583 1.98.8.2 ad }
1584 1.98.8.2 ad /*
1585 1.98.8.2 ad * checks to see if reconstruction has been either forced or blocked
1586 1.98.8.2 ad * by a user operation. if forced, we skip this RU entirely. else if
1587 1.98.8.2 ad * blocked, put ourselves on the wait list. else return 0.
1588 1.98.8.2 ad *
1589 1.98.8.2 ad * ASSUMES THE PSS MUTEX IS LOCKED UPON ENTRY
1590 1.98.8.2 ad */
1591 1.98.8.2 ad static int
1592 1.98.8.2 ad CheckForcedOrBlockedReconstruction(RF_Raid_t *raidPtr,
1593 1.98.8.2 ad RF_ReconParityStripeStatus_t *pssPtr,
1594 1.98.8.2 ad RF_PerDiskReconCtrl_t *ctrl,
1595 1.98.8.2 ad RF_RowCol_t col,
1596 1.98.8.2 ad RF_StripeNum_t psid,
1597 1.98.8.2 ad RF_ReconUnitNum_t which_ru)
1598 1.98.8.2 ad {
1599 1.98.8.2 ad RF_CallbackDesc_t *cb;
1600 1.98.8.2 ad int retcode = 0;
1601 1.98.8.2 ad
1602 1.98.8.2 ad if ((pssPtr->flags & RF_PSS_FORCED_ON_READ) || (pssPtr->flags & RF_PSS_FORCED_ON_WRITE))
1603 1.98.8.2 ad retcode = RF_PSS_FORCED_ON_WRITE;
1604 1.98.8.2 ad else
1605 1.98.8.2 ad if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
1606 1.98.8.2 ad Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
1607 1.98.8.2 ad cb = rf_AllocCallbackDesc(); /* append ourselves to
1608 1.98.8.2 ad * the blockage-wait
1609 1.98.8.2 ad * list */
1610 1.98.8.2 ad cb->col = col;
1611 1.98.8.2 ad cb->next = pssPtr->blockWaitList;
1612 1.98.8.2 ad pssPtr->blockWaitList = cb;
1613 1.98.8.2 ad retcode = RF_PSS_RECON_BLOCKED;
1614 1.98.8.2 ad }
1615 1.98.8.2 ad if (!retcode)
1616 1.98.8.2 ad pssPtr->flags |= RF_PSS_UNDER_RECON; /* mark this RU as under
1617 1.98.8.2 ad * reconstruction */
1618 1.98.8.2 ad
1619 1.98.8.2 ad return (retcode);
1620 1.98.8.2 ad }
1621 1.98.8.2 ad /*
1622 1.98.8.2 ad * if reconstruction is currently ongoing for the indicated stripeID,
1623 1.98.8.2 ad * reconstruction is forced to completion and we return non-zero to
1624 1.98.8.2 ad * indicate that the caller must wait. If not, then reconstruction is
1625 1.98.8.2 ad * blocked on the indicated stripe and the routine returns zero. If
1626 1.98.8.2 ad * and only if we return non-zero, we'll cause the cbFunc to get
1627 1.98.8.2 ad * invoked with the cbArg when the reconstruction has completed.
1628 1.98.8.2 ad */
1629 1.98.8.2 ad int
1630 1.98.8.2 ad rf_ForceOrBlockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap,
1631 1.98.8.2 ad void (*cbFunc)(RF_Raid_t *, void *), void *cbArg)
1632 1.98.8.2 ad {
1633 1.98.8.2 ad RF_StripeNum_t stripeID = asmap->stripeID; /* the stripe ID we're
1634 1.98.8.2 ad * forcing recon on */
1635 1.98.8.2 ad RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU; /* num sects in one RU */
1636 1.98.8.2 ad RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr; /* a pointer to the parity
1637 1.98.8.2 ad * stripe status structure */
1638 1.98.8.2 ad RF_StripeNum_t psid; /* parity stripe id */
1639 1.98.8.2 ad RF_SectorNum_t offset, fd_offset; /* disk offset, failed-disk
1640 1.98.8.2 ad * offset */
1641 1.98.8.2 ad RF_RowCol_t *diskids;
1642 1.98.8.2 ad RF_ReconUnitNum_t which_ru; /* RU within parity stripe */
1643 1.98.8.2 ad RF_RowCol_t fcol, diskno, i;
1644 1.98.8.2 ad RF_ReconBuffer_t *new_rbuf; /* ptr to newly allocated rbufs */
1645 1.98.8.2 ad RF_DiskQueueData_t *req;/* disk I/O req to be enqueued */
1646 1.98.8.2 ad RF_CallbackDesc_t *cb;
1647 1.98.8.2 ad int nPromoted;
1648 1.98.8.2 ad
1649 1.98.8.2 ad psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1650 1.98.8.2 ad
1651 1.98.8.2 ad /* allocate a new PSS in case we need it */
1652 1.98.8.2 ad newpssPtr = rf_AllocPSStatus(raidPtr);
1653 1.98.8.2 ad
1654 1.98.8.2 ad RF_LOCK_PSS_MUTEX(raidPtr, psid);
1655 1.98.8.2 ad
1656 1.98.8.2 ad pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE | RF_PSS_RECON_BLOCKED, newpssPtr);
1657 1.98.8.2 ad
1658 1.98.8.2 ad if (pssPtr != newpssPtr) {
1659 1.98.8.2 ad rf_FreePSStatus(raidPtr, newpssPtr);
1660 1.98.8.2 ad }
1661 1.98.8.2 ad
1662 1.98.8.2 ad /* if recon is not ongoing on this PS, just return */
1663 1.98.8.2 ad if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1664 1.98.8.2 ad RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1665 1.98.8.2 ad return (0);
1666 1.98.8.2 ad }
1667 1.98.8.2 ad /* otherwise, we have to wait for reconstruction to complete on this
1668 1.98.8.2 ad * RU. */
1669 1.98.8.2 ad /* In order to avoid waiting for a potentially large number of
1670 1.98.8.2 ad * low-priority accesses to complete, we force a normal-priority (i.e.
1671 1.98.8.2 ad * not low-priority) reconstruction on this RU. */
1672 1.98.8.2 ad if (!(pssPtr->flags & RF_PSS_FORCED_ON_WRITE) && !(pssPtr->flags & RF_PSS_FORCED_ON_READ)) {
1673 1.98.8.2 ad DDprintf1("Forcing recon on psid %ld\n", psid);
1674 1.98.8.2 ad pssPtr->flags |= RF_PSS_FORCED_ON_WRITE; /* mark this RU as under
1675 1.98.8.2 ad * forced recon */
1676 1.98.8.2 ad pssPtr->flags &= ~RF_PSS_RECON_BLOCKED; /* clear the blockage
1677 1.98.8.2 ad * that we just set */
1678 1.98.8.2 ad fcol = raidPtr->reconControl->fcol;
1679 1.98.8.2 ad
1680 1.98.8.2 ad /* get a listing of the disks comprising the indicated stripe */
1681 1.98.8.2 ad (raidPtr->Layout.map->IdentifyStripe) (raidPtr, asmap->raidAddress, &diskids);
1682 1.98.8.2 ad
1683 1.98.8.2 ad /* For previously issued reads, elevate them to normal
1684 1.98.8.2 ad * priority. If the I/O has already completed, it won't be
1685 1.98.8.2 ad * found in the queue, and hence this will be a no-op. For
1686 1.98.8.2 ad * unissued reads, allocate buffers and issue new reads. The
1687 1.98.8.2 ad * fact that we've set the FORCED bit means that the regular
1688 1.98.8.2 ad * recon procs will not re-issue these reqs */
1689 1.98.8.2 ad for (i = 0; i < raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol; i++)
1690 1.98.8.2 ad if ((diskno = diskids[i]) != fcol) {
1691 1.98.8.2 ad if (pssPtr->issued[diskno]) {
1692 1.98.8.2 ad nPromoted = rf_DiskIOPromote(&raidPtr->Queues[diskno], psid, which_ru);
1693 1.98.8.2 ad if (rf_reconDebug && nPromoted)
1694 1.98.8.2 ad printf("raid%d: promoted read from col %d\n", raidPtr->raidid, diskno);
1695 1.98.8.2 ad } else {
1696 1.98.8.2 ad new_rbuf = rf_MakeReconBuffer(raidPtr, diskno, RF_RBUF_TYPE_FORCED); /* create new buf */
1697 1.98.8.2 ad ComputePSDiskOffsets(raidPtr, psid, diskno, &offset, &fd_offset,
1698 1.98.8.2 ad &new_rbuf->spCol, &new_rbuf->spOffset); /* find offsets & spare
1699 1.98.8.2 ad * location */
1700 1.98.8.2 ad new_rbuf->parityStripeID = psid; /* fill in the buffer */
1701 1.98.8.2 ad new_rbuf->which_ru = which_ru;
1702 1.98.8.2 ad new_rbuf->failedDiskSectorOffset = fd_offset;
1703 1.98.8.2 ad new_rbuf->priority = RF_IO_NORMAL_PRIORITY;
1704 1.98.8.2 ad
1705 1.98.8.2 ad /* use NULL b_proc b/c all addrs
1706 1.98.8.2 ad * should be in kernel space */
1707 1.98.8.2 ad req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, offset + which_ru * sectorsPerRU, sectorsPerRU, new_rbuf->buffer,
1708 1.98.8.2 ad psid, which_ru, (int (*) (void *, int)) ForceReconReadDoneProc, (void *) new_rbuf,
1709 1.98.8.2 ad NULL, (void *) raidPtr, 0, NULL, PR_WAITOK);
1710 1.98.8.2 ad
1711 1.98.8.2 ad new_rbuf->arg = req;
1712 1.98.8.2 ad rf_DiskIOEnqueue(&raidPtr->Queues[diskno], req, RF_IO_NORMAL_PRIORITY); /* enqueue the I/O */
1713 1.98.8.2 ad Dprintf2("raid%d: Issued new read req on col %d\n", raidPtr->raidid, diskno);
1714 1.98.8.2 ad }
1715 1.98.8.2 ad }
1716 1.98.8.2 ad /* if the write is sitting in the disk queue, elevate its
1717 1.98.8.2 ad * priority */
1718 1.98.8.2 ad if (rf_DiskIOPromote(&raidPtr->Queues[fcol], psid, which_ru))
1719 1.98.8.2 ad printf("raid%d: promoted write to col %d\n",
1720 1.98.8.2 ad raidPtr->raidid, fcol);
1721 1.98.8.2 ad }
1722 1.98.8.2 ad /* install a callback descriptor to be invoked when recon completes on
1723 1.98.8.2 ad * this parity stripe. */
1724 1.98.8.2 ad cb = rf_AllocCallbackDesc();
1725 1.98.8.2 ad /* XXX the following is bogus.. These functions don't really match!!
1726 1.98.8.2 ad * GO */
1727 1.98.8.2 ad cb->callbackFunc = (void (*) (RF_CBParam_t)) cbFunc;
1728 1.98.8.2 ad cb->callbackArg.p = (void *) cbArg;
1729 1.98.8.2 ad cb->next = pssPtr->procWaitList;
1730 1.98.8.2 ad pssPtr->procWaitList = cb;
1731 1.98.8.2 ad DDprintf2("raid%d: Waiting for forced recon on psid %ld\n",
1732 1.98.8.2 ad raidPtr->raidid, psid);
1733 1.98.8.2 ad
1734 1.98.8.2 ad RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1735 1.98.8.2 ad return (1);
1736 1.98.8.2 ad }
1737 1.98.8.2 ad /* called upon the completion of a forced reconstruction read.
1738 1.98.8.2 ad * all we do is schedule the FORCEDREADONE event.
1739 1.98.8.2 ad * called at interrupt context in the kernel, so don't do anything illegal here.
1740 1.98.8.2 ad */
1741 1.98.8.2 ad static void
1742 1.98.8.2 ad ForceReconReadDoneProc(void *arg, int status)
1743 1.98.8.2 ad {
1744 1.98.8.2 ad RF_ReconBuffer_t *rbuf = arg;
1745 1.98.8.2 ad
1746 1.98.8.2 ad /* Detect that reconControl is no longer valid, and if that
1747 1.98.8.2 ad is the case, bail without calling rf_CauseReconEvent().
1748 1.98.8.2 ad There won't be anyone listening for this event anyway */
1749 1.98.8.2 ad
1750 1.98.8.2 ad if (rbuf->raidPtr->reconControl == NULL)
1751 1.98.8.2 ad return;
1752 1.98.8.2 ad
1753 1.98.8.2 ad if (status) {
1754 1.98.8.2 ad printf("raid%d: Forced recon read failed!\n", rbuf->raidPtr->raidid);
1755 1.98.8.2 ad rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREAD_FAILED);
1756 1.98.8.2 ad return;
1757 1.98.8.2 ad }
1758 1.98.8.2 ad rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREADDONE);
1759 1.98.8.2 ad }
1760 1.98.8.2 ad /* releases a block on the reconstruction of the indicated stripe */
1761 1.98.8.2 ad int
1762 1.98.8.2 ad rf_UnblockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
1763 1.98.8.2 ad {
1764 1.98.8.2 ad RF_StripeNum_t stripeID = asmap->stripeID;
1765 1.98.8.2 ad RF_ReconParityStripeStatus_t *pssPtr;
1766 1.98.8.2 ad RF_ReconUnitNum_t which_ru;
1767 1.98.8.2 ad RF_StripeNum_t psid;
1768 1.98.8.2 ad RF_CallbackDesc_t *cb;
1769 1.98.8.2 ad
1770 1.98.8.2 ad psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
1771 1.98.8.2 ad RF_LOCK_PSS_MUTEX(raidPtr, psid);
1772 1.98.8.2 ad pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_NONE, NULL);
1773 1.98.8.2 ad
1774 1.98.8.2 ad /* When recon is forced, the pss desc can get deleted before we get
1775 1.98.8.2 ad * back to unblock recon. But, this can _only_ happen when recon is
1776 1.98.8.2 ad * forced. It would be good to put some kind of sanity check here, but
1777 1.98.8.2 ad * how to decide if recon was just forced or not? */
1778 1.98.8.2 ad if (!pssPtr) {
1779 1.98.8.2 ad /* printf("Warning: no pss descriptor upon unblock on psid %ld
1780 1.98.8.2 ad * RU %d\n",psid,which_ru); */
1781 1.98.8.2 ad #if (RF_DEBUG_RECON > 0) || (RF_DEBUG_PSS > 0)
1782 1.98.8.2 ad if (rf_reconDebug || rf_pssDebug)
1783 1.98.8.2 ad printf("Warning: no pss descriptor upon unblock on psid %ld RU %d\n", (long) psid, which_ru);
1784 1.98.8.2 ad #endif
1785 1.98.8.2 ad goto out;
1786 1.98.8.2 ad }
1787 1.98.8.2 ad pssPtr->blockCount--;
1788 1.98.8.2 ad Dprintf3("raid%d: unblocking recon on psid %ld: blockcount is %d\n",
1789 1.98.8.2 ad raidPtr->raidid, psid, pssPtr->blockCount);
1790 1.98.8.2 ad if (pssPtr->blockCount == 0) { /* if recon blockage has been released */
1791 1.98.8.2 ad
1792 1.98.8.2 ad /* unblock recon before calling CauseReconEvent in case
1793 1.98.8.2 ad * CauseReconEvent causes us to try to issue a new read before
1794 1.98.8.2 ad * returning here. */
1795 1.98.8.2 ad pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;
1796 1.98.8.2 ad
1797 1.98.8.2 ad
1798 1.98.8.2 ad while (pssPtr->blockWaitList) {
1799 1.98.8.2 ad /* spin through the block-wait list and
1800 1.98.8.2 ad release all the waiters */
1801 1.98.8.2 ad cb = pssPtr->blockWaitList;
1802 1.98.8.2 ad pssPtr->blockWaitList = cb->next;
1803 1.98.8.2 ad cb->next = NULL;
1804 1.98.8.2 ad rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
1805 1.98.8.2 ad rf_FreeCallbackDesc(cb);
1806 1.98.8.2 ad }
1807 1.98.8.2 ad if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
1808 1.98.8.2 ad /* if no recon was requested while recon was blocked */
1809 1.98.8.2 ad rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
1810 1.98.8.2 ad }
1811 1.98.8.2 ad }
1812 1.98.8.2 ad out:
1813 1.98.8.2 ad RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
1814 1.98.8.2 ad return (0);
1815 1.98.8.2 ad }
1816