rf_reconutil.c revision 1.14 1 /* $NetBSD: rf_reconutil.c,v 1.14 2003/02/09 10:04:34 jdolecek Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /********************************************
30 * rf_reconutil.c -- reconstruction utilities
31 ********************************************/
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.14 2003/02/09 10:04:34 jdolecek Exp $");
35
36 #include <dev/raidframe/raidframevar.h>
37
38 #include "rf_raid.h"
39 #include "rf_desc.h"
40 #include "rf_reconutil.h"
41 #include "rf_reconbuffer.h"
42 #include "rf_general.h"
43 #include "rf_decluster.h"
44 #include "rf_raid5_rotatedspare.h"
45 #include "rf_interdecluster.h"
46 #include "rf_chaindecluster.h"
47
48 /*******************************************************************
49 * allocates/frees the reconstruction control information structures
50 *******************************************************************/
51 RF_ReconCtrl_t *
52 rf_MakeReconControl(reconDesc, frow, fcol, srow, scol)
53 RF_RaidReconDesc_t *reconDesc;
54 RF_RowCol_t frow; /* failed row and column */
55 RF_RowCol_t fcol;
56 RF_RowCol_t srow; /* identifies which spare we're using */
57 RF_RowCol_t scol;
58 {
59 RF_Raid_t *raidPtr = reconDesc->raidPtr;
60 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
61 RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
62 RF_ReconUnitCount_t numSpareRUs;
63 RF_ReconCtrl_t *reconCtrlPtr;
64 RF_ReconBuffer_t *rbuf;
65 const RF_LayoutSW_t *lp;
66 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
67 int retcode;
68 #endif
69 int rc;
70 RF_RowCol_t i;
71
72 lp = raidPtr->Layout.map;
73
74 /* make and zero the global reconstruction structure and the per-disk
75 * structure */
76 RF_Calloc(reconCtrlPtr, 1, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
77
78 /* note: this zeros the perDiskInfo */
79 RF_Calloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol,
80 sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));
81 reconCtrlPtr->reconDesc = reconDesc;
82 reconCtrlPtr->fcol = fcol;
83 reconCtrlPtr->spareRow = srow;
84 reconCtrlPtr->spareCol = scol;
85 reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
86 reconCtrlPtr->percentComplete = 0;
87
88 /* initialize each per-disk recon information structure */
89 for (i = 0; i < raidPtr->numCol; i++) {
90 reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
91 reconCtrlPtr->perDiskInfo[i].row = frow;
92 reconCtrlPtr->perDiskInfo[i].col = i;
93 /* make it appear as if we just finished an RU */
94 reconCtrlPtr->perDiskInfo[i].curPSID = -1;
95 reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
96 }
97
98 /* Get the number of spare units per disk and the sparemap in case
99 * spare is distributed */
100
101 if (lp->GetNumSpareRUs) {
102 numSpareRUs = lp->GetNumSpareRUs(raidPtr);
103 } else {
104 numSpareRUs = 0;
105 }
106
107 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
108 /*
109 * Not all distributed sparing archs need dynamic mappings
110 */
111 if (lp->InstallSpareTable) {
112 retcode = rf_InstallSpareTable(raidPtr, frow, fcol);
113 if (retcode) {
114 RF_PANIC(); /* XXX fix this */
115 }
116 }
117 #endif
118 /* make the reconstruction map */
119 reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
120 raidPtr->sectorsPerDisk, numSpareRUs);
121
122 /* make the per-disk reconstruction buffers */
123 for (i = 0; i < raidPtr->numCol; i++) {
124 reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, frow, i, RF_RBUF_TYPE_EXCLUSIVE);
125 }
126
127 /* initialize the event queue */
128 rc = rf_mutex_init(&reconCtrlPtr->eq_mutex);
129 if (rc) {
130 /* XXX deallocate, cleanup */
131 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
132 return (NULL);
133 }
134 rc = rf_cond_init(&reconCtrlPtr->eq_cond);
135 if (rc) {
136 /* XXX deallocate, cleanup */
137 rf_print_unable_to_init_cond(__FILE__, __LINE__, rc);
138 return (NULL);
139 }
140 reconCtrlPtr->eventQueue = NULL;
141 reconCtrlPtr->eq_count = 0;
142
143 /* make the floating recon buffers and append them to the free list */
144 rc = rf_mutex_init(&reconCtrlPtr->rb_mutex);
145 if (rc) {
146 /* XXX deallocate, cleanup */
147 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
148 return (NULL);
149 }
150 reconCtrlPtr->fullBufferList = NULL;
151 reconCtrlPtr->floatingRbufs = NULL;
152 reconCtrlPtr->committedRbufs = NULL;
153 for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
154 rbuf = rf_MakeReconBuffer(raidPtr, frow, fcol,
155 RF_RBUF_TYPE_FLOATING);
156 rbuf->next = reconCtrlPtr->floatingRbufs;
157 reconCtrlPtr->floatingRbufs = rbuf;
158 }
159
160 /* create the parity stripe status table */
161 reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
162
163 /* set the initial min head sep counter val */
164 reconCtrlPtr->minHeadSepCounter = 0;
165
166 return (reconCtrlPtr);
167 }
168
169 void
170 rf_FreeReconControl(raidPtr, row)
171 RF_Raid_t *raidPtr;
172 RF_RowCol_t row;
173 {
174 RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl[row];
175 RF_ReconBuffer_t *t;
176 RF_ReconUnitNum_t i;
177
178 RF_ASSERT(reconCtrlPtr);
179 for (i = 0; i < raidPtr->numCol; i++)
180 if (reconCtrlPtr->perDiskInfo[i].rbuf)
181 rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
182 for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
183 t = reconCtrlPtr->floatingRbufs;
184 RF_ASSERT(t);
185 reconCtrlPtr->floatingRbufs = t->next;
186 rf_FreeReconBuffer(t);
187 }
188 rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
189 rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
190 rf_cond_destroy(&reconCtrlPtr->eq_cond);
191 rf_FreeReconMap(reconCtrlPtr->reconMap);
192 rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
193 RF_Free(reconCtrlPtr->perDiskInfo,
194 raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
195 RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
196 }
197
198
199 /******************************************************************************
200 * computes the default head separation limit
201 *****************************************************************************/
202 RF_HeadSepLimit_t
203 rf_GetDefaultHeadSepLimit(raidPtr)
204 RF_Raid_t *raidPtr;
205 {
206 RF_HeadSepLimit_t hsl;
207 const RF_LayoutSW_t *lp;
208
209 lp = raidPtr->Layout.map;
210 if (lp->GetDefaultHeadSepLimit == NULL)
211 return (-1);
212 hsl = lp->GetDefaultHeadSepLimit(raidPtr);
213 return (hsl);
214 }
215
216
217 /******************************************************************************
218 * computes the default number of floating recon buffers
219 *****************************************************************************/
220 int
221 rf_GetDefaultNumFloatingReconBuffers(raidPtr)
222 RF_Raid_t *raidPtr;
223 {
224 const RF_LayoutSW_t *lp;
225 int nrb;
226
227 lp = raidPtr->Layout.map;
228 if (lp->GetDefaultNumFloatingReconBuffers == NULL)
229 return (3 * raidPtr->numCol);
230 nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
231 return (nrb);
232 }
233
234
235 /******************************************************************************
236 * creates and initializes a reconstruction buffer
237 *****************************************************************************/
238 RF_ReconBuffer_t *
239 rf_MakeReconBuffer(
240 RF_Raid_t * raidPtr,
241 RF_RowCol_t row,
242 RF_RowCol_t col,
243 RF_RbufType_t type)
244 {
245 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
246 RF_ReconBuffer_t *t;
247 u_int recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
248
249 RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
250 RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
251 t->raidPtr = raidPtr;
252 t->row = row;
253 t->col = col;
254 t->priority = RF_IO_RECON_PRIORITY;
255 t->type = type;
256 t->pssPtr = NULL;
257 t->next = NULL;
258 return (t);
259 }
260 /******************************************************************************
261 * frees a reconstruction buffer
262 *****************************************************************************/
263 void
264 rf_FreeReconBuffer(rbuf)
265 RF_ReconBuffer_t *rbuf;
266 {
267 RF_Raid_t *raidPtr = rbuf->raidPtr;
268 u_int recon_buffer_size;
269
270 recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
271
272 RF_Free(rbuf->buffer, recon_buffer_size);
273 RF_Free(rbuf, sizeof(*rbuf));
274 }
275
276 #if RF_DEBUG_RECON
277 /******************************************************************************
278 * debug only: sanity check the number of floating recon bufs in use
279 *****************************************************************************/
280 void
281 rf_CheckFloatingRbufCount(raidPtr, dolock)
282 RF_Raid_t *raidPtr;
283 int dolock;
284 {
285 RF_ReconParityStripeStatus_t *p;
286 RF_PSStatusHeader_t *pssTable;
287 RF_ReconBuffer_t *rbuf;
288 int i, j, sum = 0;
289 RF_RowCol_t frow = 0;
290
291 for (i = 0; i < raidPtr->numRow; i++)
292 if (raidPtr->reconControl[i]) {
293 frow = i;
294 break;
295 }
296 RF_ASSERT(frow >= 0);
297
298 if (dolock)
299 RF_LOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
300 pssTable = raidPtr->reconControl[frow]->pssTable;
301
302 for (i = 0; i < raidPtr->pssTableSize; i++) {
303 RF_LOCK_MUTEX(pssTable[i].mutex);
304 for (p = pssTable[i].chain; p; p = p->next) {
305 rbuf = (RF_ReconBuffer_t *) p->rbuf;
306 if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
307 sum++;
308
309 rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
310 if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
311 sum++;
312
313 for (j = 0; j < p->xorBufCount; j++) {
314 rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
315 RF_ASSERT(rbuf);
316 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
317 sum++;
318 }
319 }
320 RF_UNLOCK_MUTEX(pssTable[i].mutex);
321 }
322
323 for (rbuf = raidPtr->reconControl[frow]->floatingRbufs; rbuf;
324 rbuf = rbuf->next) {
325 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
326 sum++;
327 }
328 for (rbuf = raidPtr->reconControl[frow]->committedRbufs; rbuf;
329 rbuf = rbuf->next) {
330 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
331 sum++;
332 }
333 for (rbuf = raidPtr->reconControl[frow]->fullBufferList; rbuf;
334 rbuf = rbuf->next) {
335 if (rbuf->type == RF_RBUF_TYPE_FLOATING)
336 sum++;
337 }
338 RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
339
340 if (dolock)
341 RF_UNLOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
342 }
343 #endif
344
345