rf_reconutil.c revision 1.17 1 /* $NetBSD: rf_reconutil.c,v 1.17 2003/12/29 04:39:29 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /********************************************
30 * rf_reconutil.c -- reconstruction utilities
31 ********************************************/
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.17 2003/12/29 04:39:29 oster Exp $");
35
36 #include <dev/raidframe/raidframevar.h>
37
38 #include "rf_raid.h"
39 #include "rf_desc.h"
40 #include "rf_reconutil.h"
41 #include "rf_reconbuffer.h"
42 #include "rf_general.h"
43 #include "rf_decluster.h"
44 #include "rf_raid5_rotatedspare.h"
45 #include "rf_interdecluster.h"
46 #include "rf_chaindecluster.h"
47
48 /*******************************************************************
49 * allocates/frees the reconstruction control information structures
50 *******************************************************************/
51 RF_ReconCtrl_t *
52 rf_MakeReconControl(reconDesc, fcol, scol)
53 RF_RaidReconDesc_t *reconDesc;
54 RF_RowCol_t fcol; /* failed column */
55 RF_RowCol_t scol; /* identifies which spare we're using */
56 {
57 RF_Raid_t *raidPtr = reconDesc->raidPtr;
58 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
59 RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
60 RF_ReconUnitCount_t numSpareRUs;
61 RF_ReconCtrl_t *reconCtrlPtr;
62 RF_ReconBuffer_t *rbuf;
63 const RF_LayoutSW_t *lp;
64 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
65 int retcode;
66 #endif
67 RF_RowCol_t i;
68
69 lp = raidPtr->Layout.map;
70
71 /* make and zero the global reconstruction structure and the per-disk
72 * structure */
73 RF_Malloc(reconCtrlPtr, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
74
75 /* note: this zeros the perDiskInfo */
76 RF_Malloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol *
77 sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));
78 reconCtrlPtr->reconDesc = reconDesc;
79 reconCtrlPtr->fcol = fcol;
80 reconCtrlPtr->spareCol = scol;
81 reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
82 reconCtrlPtr->percentComplete = 0;
83
84 /* initialize each per-disk recon information structure */
85 for (i = 0; i < raidPtr->numCol; i++) {
86 reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
87 reconCtrlPtr->perDiskInfo[i].col = i;
88 /* make it appear as if we just finished an RU */
89 reconCtrlPtr->perDiskInfo[i].curPSID = -1;
90 reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
91 }
92
93 /* Get the number of spare units per disk and the sparemap in case
94 * spare is distributed */
95
96 if (lp->GetNumSpareRUs) {
97 numSpareRUs = lp->GetNumSpareRUs(raidPtr);
98 } else {
99 numSpareRUs = 0;
100 }
101
102 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
103 /*
104 * Not all distributed sparing archs need dynamic mappings
105 */
106 if (lp->InstallSpareTable) {
107 retcode = rf_InstallSpareTable(raidPtr, fcol);
108 if (retcode) {
109 RF_PANIC(); /* XXX fix this */
110 }
111 }
112 #endif
113 /* make the reconstruction map */
114 reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
115 raidPtr->sectorsPerDisk, numSpareRUs);
116
117 /* make the per-disk reconstruction buffers */
118 for (i = 0; i < raidPtr->numCol; i++) {
119 reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, i, RF_RBUF_TYPE_EXCLUSIVE);
120 }
121
122 /* initialize the event queue */
123 simple_lock_init(&reconCtrlPtr->eq_mutex);
124
125 reconCtrlPtr->eq_cond = 0;
126 reconCtrlPtr->eventQueue = NULL;
127 reconCtrlPtr->eq_count = 0;
128
129 /* make the floating recon buffers and append them to the free list */
130 simple_lock_init(&reconCtrlPtr->rb_mutex);
131
132 reconCtrlPtr->fullBufferList = NULL;
133 reconCtrlPtr->floatingRbufs = NULL;
134 reconCtrlPtr->committedRbufs = NULL;
135 for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
136 rbuf = rf_MakeReconBuffer(raidPtr, fcol,
137 RF_RBUF_TYPE_FLOATING);
138 rbuf->next = reconCtrlPtr->floatingRbufs;
139 reconCtrlPtr->floatingRbufs = rbuf;
140 }
141
142 /* create the parity stripe status table */
143 reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
144
145 /* set the initial min head sep counter val */
146 reconCtrlPtr->minHeadSepCounter = 0;
147
148 return (reconCtrlPtr);
149 }
150
151 void
152 rf_FreeReconControl(raidPtr)
153 RF_Raid_t *raidPtr;
154 {
155 RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
156 RF_ReconBuffer_t *t;
157 RF_ReconUnitNum_t i;
158
159 RF_ASSERT(reconCtrlPtr);
160 for (i = 0; i < raidPtr->numCol; i++)
161 if (reconCtrlPtr->perDiskInfo[i].rbuf)
162 rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
163 for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
164 t = reconCtrlPtr->floatingRbufs;
165 RF_ASSERT(t);
166 reconCtrlPtr->floatingRbufs = t->next;
167 rf_FreeReconBuffer(t);
168 }
169 rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
170 rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
171 rf_FreeReconMap(reconCtrlPtr->reconMap);
172 rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
173 RF_Free(reconCtrlPtr->perDiskInfo,
174 raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
175 RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
176 }
177
178
179 /******************************************************************************
180 * computes the default head separation limit
181 *****************************************************************************/
182 RF_HeadSepLimit_t
183 rf_GetDefaultHeadSepLimit(raidPtr)
184 RF_Raid_t *raidPtr;
185 {
186 RF_HeadSepLimit_t hsl;
187 const RF_LayoutSW_t *lp;
188
189 lp = raidPtr->Layout.map;
190 if (lp->GetDefaultHeadSepLimit == NULL)
191 return (-1);
192 hsl = lp->GetDefaultHeadSepLimit(raidPtr);
193 return (hsl);
194 }
195
196
197 /******************************************************************************
198 * computes the default number of floating recon buffers
199 *****************************************************************************/
200 int
201 rf_GetDefaultNumFloatingReconBuffers(raidPtr)
202 RF_Raid_t *raidPtr;
203 {
204 const RF_LayoutSW_t *lp;
205 int nrb;
206
207 lp = raidPtr->Layout.map;
208 if (lp->GetDefaultNumFloatingReconBuffers == NULL)
209 return (3 * raidPtr->numCol);
210 nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
211 return (nrb);
212 }
213
214
215 /******************************************************************************
216 * creates and initializes a reconstruction buffer
217 *****************************************************************************/
218 RF_ReconBuffer_t *
219 rf_MakeReconBuffer(
220 RF_Raid_t * raidPtr,
221 RF_RowCol_t col,
222 RF_RbufType_t type)
223 {
224 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
225 RF_ReconBuffer_t *t;
226 u_int recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
227
228 RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
229 RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
230 t->raidPtr = raidPtr;
231 t->col = col;
232 t->priority = RF_IO_RECON_PRIORITY;
233 t->type = type;
234 t->pssPtr = NULL;
235 t->next = NULL;
236 return (t);
237 }
238 /******************************************************************************
239 * frees a reconstruction buffer
240 *****************************************************************************/
241 void
242 rf_FreeReconBuffer(rbuf)
243 RF_ReconBuffer_t *rbuf;
244 {
245 RF_Raid_t *raidPtr = rbuf->raidPtr;
246 u_int recon_buffer_size;
247
248 recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
249
250 RF_Free(rbuf->buffer, recon_buffer_size);
251 RF_Free(rbuf, sizeof(*rbuf));
252 }
253
254 #if RF_DEBUG_RECON
255 /******************************************************************************
256 * debug only: sanity check the number of floating recon bufs in use
257 *****************************************************************************/
258 void
259 rf_CheckFloatingRbufCount(raidPtr, dolock)
260 RF_Raid_t *raidPtr;
261 int dolock;
262 {
263 RF_ReconParityStripeStatus_t *p;
264 RF_PSStatusHeader_t *pssTable;
265 RF_ReconBuffer_t *rbuf;
266 int i, j, sum = 0;
267
268 if (dolock)
269 RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
270 pssTable = raidPtr->reconControl->pssTable;
271
272 for (i = 0; i < raidPtr->pssTableSize; i++) {
273 RF_LOCK_MUTEX(pssTable[i].mutex);
274 for (p = pssTable[i].chain; p; p = p->next) {
275 rbuf = (RF_ReconBuffer_t *) p->rbuf;
276 if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
277 sum++;
278
279 rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
280 if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
281 sum++;
282
283 for (j = 0; j < p->xorBufCount; j++) {
284 rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
285 RF_ASSERT(rbuf);
286 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
287 sum++;
288 }
289 }
290 RF_UNLOCK_MUTEX(pssTable[i].mutex);
291 }
292
293 for (rbuf = raidPtr->reconControl->floatingRbufs; rbuf;
294 rbuf = rbuf->next) {
295 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
296 sum++;
297 }
298 for (rbuf = raidPtr->reconControl->committedRbufs; rbuf;
299 rbuf = rbuf->next) {
300 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
301 sum++;
302 }
303 for (rbuf = raidPtr->reconControl->fullBufferList; rbuf;
304 rbuf = rbuf->next) {
305 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
306 sum++;
307 }
308 RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
309
310 if (dolock)
311 RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
312 }
313 #endif
314
315