rf_driver.c revision 1.102 1 /* $NetBSD: rf_driver.c,v 1.102 2004/06/02 22:58:28 drochner Exp $ */
2 /*-
3 * Copyright (c) 1999 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1995 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II,
43 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka
44 *
45 * Permission to use, copy, modify and distribute this software and
46 * its documentation is hereby granted, provided that both the copyright
47 * notice and this permission notice appear in all copies of the
48 * software, derivative works or modified versions, and any portions
49 * thereof, and that both notices appear in supporting documentation.
50 *
51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 *
55 * Carnegie Mellon requests users of this software to return to
56 *
57 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
58 * School of Computer Science
59 * Carnegie Mellon University
60 * Pittsburgh PA 15213-3890
61 *
62 * any improvements or extensions that they make and grant Carnegie the
63 * rights to redistribute these changes.
64 */
65
66 /******************************************************************************
67 *
68 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver
69 *
70 * all routines are prefixed with rf_ (raidframe), to avoid conficts.
71 *
72 ******************************************************************************/
73
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.102 2004/06/02 22:58:28 drochner Exp $");
77
78 #include "opt_raid_diagnostic.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/ioctl.h>
83 #include <sys/fcntl.h>
84 #include <sys/vnode.h>
85
86
87 #include "rf_archs.h"
88 #include "rf_threadstuff.h"
89
90 #include <sys/errno.h>
91
92 #include "rf_raid.h"
93 #include "rf_dag.h"
94 #include "rf_aselect.h"
95 #include "rf_diskqueue.h"
96 #include "rf_parityscan.h"
97 #include "rf_alloclist.h"
98 #include "rf_dagutils.h"
99 #include "rf_utils.h"
100 #include "rf_etimer.h"
101 #include "rf_acctrace.h"
102 #include "rf_general.h"
103 #include "rf_desc.h"
104 #include "rf_states.h"
105 #include "rf_decluster.h"
106 #include "rf_map.h"
107 #include "rf_revent.h"
108 #include "rf_callback.h"
109 #include "rf_engine.h"
110 #include "rf_mcpair.h"
111 #include "rf_nwayxor.h"
112 #include "rf_copyback.h"
113 #include "rf_driver.h"
114 #include "rf_options.h"
115 #include "rf_shutdown.h"
116 #include "rf_kintf.h"
117
118 #include <sys/buf.h>
119
120 #ifndef RF_ACCESS_DEBUG
121 #define RF_ACCESS_DEBUG 0
122 #endif
123
124 /* rad == RF_RaidAccessDesc_t */
125 RF_DECLARE_MUTEX(rf_rad_lock)
126 #define RF_MAX_FREE_RAD 128
127 #define RF_MIN_FREE_RAD 32
128
129 /* debug variables */
130 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */
131
132 /* main configuration routines */
133 static int raidframe_booted = 0;
134
135 static void rf_ConfigureDebug(RF_Config_t * cfgPtr);
136 static void set_debug_option(char *name, long val);
137 static void rf_UnconfigureArray(void);
138 static void rf_ShutdownRDFreeList(void *);
139 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
140
141 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved
142 * printfs by different stripes */
143
144 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended))
145 #define WAIT_FOR_QUIESCENCE(_raid_) \
146 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \
147 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex))
148
149 static int configureCount = 0; /* number of active configurations */
150 static int isconfigged = 0; /* is basic raidframe (non per-array)
151 * stuff configged */
152 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex) /* used to lock the configuration
153 * stuff */
154 static RF_ShutdownList_t *globalShutdown; /* non array-specific
155 * stuff */
156
157 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
158
159 /* called at system boot time */
160 int
161 rf_BootRaidframe()
162 {
163
164 if (raidframe_booted)
165 return (EBUSY);
166 raidframe_booted = 1;
167 lockinit(&configureMutex, PRIBIO, "RAIDframe lock", 0, 0);
168 configureCount = 0;
169 isconfigged = 0;
170 globalShutdown = NULL;
171 return (0);
172 }
173
174 /*
175 * Called whenever an array is shutdown
176 */
177 static void
178 rf_UnconfigureArray()
179 {
180
181 RF_LOCK_LKMGR_MUTEX(configureMutex);
182 if (--configureCount == 0) { /* if no active configurations, shut
183 * everything down */
184 isconfigged = 0;
185 rf_ShutdownList(&globalShutdown);
186
187 /*
188 * We must wait until now, because the AllocList module
189 * uses the DebugMem module.
190 */
191 #if RF_DEBUG_MEM
192 if (rf_memDebug)
193 rf_print_unfreed();
194 #endif
195 }
196 RF_UNLOCK_LKMGR_MUTEX(configureMutex);
197 }
198
199 /*
200 * Called to shut down an array.
201 */
202 int
203 rf_Shutdown(RF_Raid_t *raidPtr)
204 {
205 RF_VoidPointerListElem_t *tmp;
206
207 if (!raidPtr->valid) {
208 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n");
209 return (EINVAL);
210 }
211 /*
212 * wait for outstanding IOs to land
213 * As described in rf_raid.h, we use the rad_freelist lock
214 * to protect the per-array info about outstanding descs
215 * since we need to do freelist locking anyway, and this
216 * cuts down on the amount of serialization we've got going
217 * on.
218 */
219 RF_LOCK_MUTEX(rf_rad_lock);
220 if (raidPtr->waitShutdown) {
221 RF_UNLOCK_MUTEX(rf_rad_lock);
222 return (EBUSY);
223 }
224 raidPtr->waitShutdown = 1;
225 while (raidPtr->nAccOutstanding) {
226 RF_WAIT_COND(raidPtr->outstandingCond, rf_rad_lock);
227 }
228 RF_UNLOCK_MUTEX(rf_rad_lock);
229
230 /* Wait for any parity re-writes to stop... */
231 while (raidPtr->parity_rewrite_in_progress) {
232 printf("Waiting for parity re-write to exit...\n");
233 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO,
234 "rfprwshutdown", 0);
235 }
236
237 raidPtr->valid = 0;
238
239 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE);
240
241 rf_UnconfigureVnodes(raidPtr);
242
243 /* Free the emergency IO buffers */
244 while (raidPtr->iobuf != NULL) {
245 tmp = raidPtr->iobuf;
246 raidPtr->iobuf = raidPtr->iobuf->next;
247 free(tmp->p, M_RAIDFRAME);
248 rf_FreeVPListElem(tmp);
249 }
250
251 /* Free the emergency stripe buffers */
252 while (raidPtr->stripebuf != NULL) {
253 tmp = raidPtr->stripebuf;
254 raidPtr->stripebuf = raidPtr->stripebuf->next;
255 free(tmp->p, M_RAIDFRAME);
256 rf_FreeVPListElem(tmp);
257 }
258
259 rf_ShutdownList(&raidPtr->shutdownList);
260
261 rf_UnconfigureArray();
262
263 return (0);
264 }
265
266
267 #define DO_INIT_CONFIGURE(f) { \
268 rc = f (&globalShutdown); \
269 if (rc) { \
270 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
271 rf_ShutdownList(&globalShutdown); \
272 configureCount--; \
273 RF_UNLOCK_LKMGR_MUTEX(configureMutex); \
274 return(rc); \
275 } \
276 }
277
278 #define DO_RAID_FAIL() { \
279 rf_UnconfigureVnodes(raidPtr); \
280 rf_ShutdownList(&raidPtr->shutdownList); \
281 rf_UnconfigureArray(); \
282 }
283
284 #define DO_RAID_INIT_CONFIGURE(f) { \
285 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \
286 if (rc) { \
287 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
288 DO_RAID_FAIL(); \
289 return(rc); \
290 } \
291 }
292
293 #define DO_RAID_MUTEX(_m_) { \
294 rf_mutex_init((_m_)); \
295 }
296
297 int
298 rf_Configure(RF_Raid_t *raidPtr, RF_Config_t *cfgPtr, RF_AutoConfig_t *ac)
299 {
300 RF_RowCol_t col;
301 void *tmpbuf;
302 RF_VoidPointerListElem_t *vple;
303 int rc, i;
304
305 RF_LOCK_LKMGR_MUTEX(configureMutex);
306 configureCount++;
307 if (isconfigged == 0) {
308 rf_mutex_init(&rf_printf_mutex);
309
310 /* initialize globals */
311
312 DO_INIT_CONFIGURE(rf_ConfigureAllocList);
313
314 /*
315 * Yes, this does make debugging general to the whole
316 * system instead of being array specific. Bummer, drag.
317 */
318 rf_ConfigureDebug(cfgPtr);
319 DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
320 #if RF_ACC_TRACE > 0
321 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
322 #endif
323 DO_INIT_CONFIGURE(rf_ConfigureMapModule);
324 DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
325 DO_INIT_CONFIGURE(rf_ConfigureCallback);
326 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
327 DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
328 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
329 DO_INIT_CONFIGURE(rf_ConfigureMCPair);
330 DO_INIT_CONFIGURE(rf_ConfigureDAGs);
331 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
332 DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
333 DO_INIT_CONFIGURE(rf_ConfigureCopyback);
334 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
335 isconfigged = 1;
336 }
337 RF_UNLOCK_LKMGR_MUTEX(configureMutex);
338
339 DO_RAID_MUTEX(&raidPtr->mutex);
340 /* set up the cleanup list. Do this after ConfigureDebug so that
341 * value of memDebug will be set */
342
343 rf_MakeAllocList(raidPtr->cleanupList);
344 if (raidPtr->cleanupList == NULL) {
345 DO_RAID_FAIL();
346 return (ENOMEM);
347 }
348 rf_ShutdownCreate(&raidPtr->shutdownList,
349 (void (*) (void *)) rf_FreeAllocList,
350 raidPtr->cleanupList);
351
352 raidPtr->numCol = cfgPtr->numCol;
353 raidPtr->numSpare = cfgPtr->numSpare;
354
355 raidPtr->status = rf_rs_optimal;
356 raidPtr->reconControl = NULL;
357
358 TAILQ_INIT(&(raidPtr->iodone));
359 simple_lock_init(&(raidPtr->iodone_lock));
360
361 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
362 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
363
364 raidPtr->outstandingCond = 0;
365
366 raidPtr->nAccOutstanding = 0;
367 raidPtr->waitShutdown = 0;
368
369 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);
370
371 raidPtr->waitForReconCond = 0;
372
373 if (ac!=NULL) {
374 /* We have an AutoConfig structure.. Don't do the
375 normal disk configuration... call the auto config
376 stuff */
377 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
378 } else {
379 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
380 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
381 }
382 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
383 * no. is set */
384 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);
385
386 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
387
388 DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus);
389
390 #if RF_INCLUDE_CHAINDECLUSTER > 0
391 for (col = 0; col < raidPtr->numCol; col++) {
392 /*
393 * XXX better distribution
394 */
395 raidPtr->hist_diskreq[col] = 0;
396 }
397 #endif
398 raidPtr->numNewFailures = 0;
399 raidPtr->copyback_in_progress = 0;
400 raidPtr->parity_rewrite_in_progress = 0;
401 raidPtr->adding_hot_spare = 0;
402 raidPtr->recon_in_progress = 0;
403 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;
404
405 /* autoconfigure and root_partition will actually get filled in
406 after the config is done */
407 raidPtr->autoconfigure = 0;
408 raidPtr->root_partition = 0;
409 raidPtr->last_unit = raidPtr->raidid;
410 raidPtr->config_order = 0;
411
412 if (rf_keepAccTotals) {
413 raidPtr->keep_acc_totals = 1;
414 }
415
416 /* Allocate a bunch of buffers to be used in low-memory conditions */
417 raidPtr->iobuf = NULL;
418 /* XXX next line needs tuning... */
419 raidPtr->numEmergencyBuffers = 10 * raidPtr->numCol;
420 #if DEBUG
421 printf("raid%d: allocating %d buffers of %d bytes.\n",
422 raidPtr->raidid,
423 raidPtr->numEmergencyBuffers,
424 (int)(raidPtr->Layout.sectorsPerStripeUnit <<
425 raidPtr->logBytesPerSector));
426 #endif
427 for (i = 0; i < raidPtr->numEmergencyBuffers; i++) {
428 tmpbuf = malloc( raidPtr->Layout.sectorsPerStripeUnit <<
429 raidPtr->logBytesPerSector,
430 M_RAIDFRAME, M_NOWAIT);
431 if (tmpbuf) {
432 vple = rf_AllocVPListElem();
433 vple->p= tmpbuf;
434 vple->next = raidPtr->iobuf;
435 raidPtr->iobuf = vple;
436 raidPtr->iobuf_count++;
437 } else {
438 printf("raid%d: failed to allocate emergency buffer!\n",
439 raidPtr->raidid);
440 }
441 }
442
443 /* XXX next line needs tuning too... */
444 raidPtr->numEmergencyStripeBuffers = 10;
445 for (i = 0; i < raidPtr->numEmergencyStripeBuffers; i++) {
446 tmpbuf = malloc( raidPtr->numCol * (raidPtr->Layout.sectorsPerStripeUnit <<
447 raidPtr->logBytesPerSector),
448 M_RAIDFRAME, M_NOWAIT);
449 if (tmpbuf) {
450 vple = rf_AllocVPListElem();
451 vple->p= tmpbuf;
452 vple->next = raidPtr->stripebuf;
453 raidPtr->stripebuf = vple;
454 raidPtr->stripebuf_count++;
455 } else {
456 printf("raid%d: failed to allocate emergency stripe buffer!\n",
457 raidPtr->raidid);
458 }
459 }
460
461
462 raidPtr->valid = 1;
463
464 printf("raid%d: %s\n", raidPtr->raidid,
465 raidPtr->Layout.map->configName);
466 printf("raid%d: Components:", raidPtr->raidid);
467
468 for (col = 0; col < raidPtr->numCol; col++) {
469 printf(" %s", raidPtr->Disks[col].devname);
470 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
471 printf("[**FAILED**]");
472 }
473 }
474 printf("\n");
475 printf("raid%d: Total Sectors: %lu (%lu MB)\n",
476 raidPtr->raidid,
477 (unsigned long) raidPtr->totalSectors,
478 (unsigned long) (raidPtr->totalSectors / 1024 *
479 (1 << raidPtr->logBytesPerSector) / 1024));
480
481 return (0);
482 }
483
484 static void
485 rf_ShutdownRDFreeList(void *ignored)
486 {
487 pool_destroy(&rf_pools.rad);
488 }
489
490 static int
491 rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
492 {
493
494 rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
495 "rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
496 rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
497 simple_lock_init(&rf_rad_lock);
498 return (0);
499 }
500
501 RF_RaidAccessDesc_t *
502 rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, RF_IoType_t type,
503 RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
504 caddr_t bufPtr, void *bp, RF_RaidAccessFlags_t flags,
505 const RF_AccessState_t *states)
506 {
507 RF_RaidAccessDesc_t *desc;
508
509 desc = pool_get(&rf_pools.rad, PR_WAITOK);
510
511 RF_LOCK_MUTEX(rf_rad_lock);
512 if (raidPtr->waitShutdown) {
513 /*
514 * Actually, we're shutting the array down. Free the desc
515 * and return NULL.
516 */
517
518 RF_UNLOCK_MUTEX(rf_rad_lock);
519 pool_put(&rf_pools.rad, desc);
520 return (NULL);
521 }
522 raidPtr->nAccOutstanding++;
523
524 RF_UNLOCK_MUTEX(rf_rad_lock);
525
526 desc->raidPtr = (void *) raidPtr;
527 desc->type = type;
528 desc->raidAddress = raidAddress;
529 desc->numBlocks = numBlocks;
530 desc->bufPtr = bufPtr;
531 desc->bp = bp;
532 desc->flags = flags;
533 desc->states = states;
534 desc->state = 0;
535 desc->dagList = NULL;
536
537 desc->status = 0;
538 #if RF_ACC_TRACE > 0
539 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t));
540 #endif
541 desc->callbackFunc = NULL;
542 desc->callbackArg = NULL;
543 desc->next = NULL;
544 desc->iobufs = NULL;
545 desc->stripebufs = NULL;
546
547 return (desc);
548 }
549
550 void
551 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *desc)
552 {
553 RF_Raid_t *raidPtr = desc->raidPtr;
554 RF_DagList_t *dagList, *temp;
555 RF_VoidPointerListElem_t *tmp;
556
557 RF_ASSERT(desc);
558
559 /* Cleanup the dagList(s) */
560 dagList = desc->dagList;
561 while(dagList != NULL) {
562 temp = dagList;
563 dagList = dagList->next;
564 rf_FreeDAGList(temp);
565 }
566
567 while (desc->iobufs) {
568 tmp = desc->iobufs;
569 desc->iobufs = desc->iobufs->next;
570 rf_FreeIOBuffer(raidPtr, tmp);
571 }
572
573 while (desc->stripebufs) {
574 tmp = desc->stripebufs;
575 desc->stripebufs = desc->stripebufs->next;
576 rf_FreeStripeBuffer(raidPtr, tmp);
577 }
578
579 pool_put(&rf_pools.rad, desc);
580 RF_LOCK_MUTEX(rf_rad_lock);
581 raidPtr->nAccOutstanding--;
582 if (raidPtr->waitShutdown) {
583 RF_SIGNAL_COND(raidPtr->outstandingCond);
584 }
585 RF_UNLOCK_MUTEX(rf_rad_lock);
586 }
587 /*********************************************************************
588 * Main routine for performing an access.
589 * Accesses are retried until a DAG can not be selected. This occurs
590 * when either the DAG library is incomplete or there are too many
591 * failures in a parity group.
592 *
593 * type should be read or write async_flag should be RF_TRUE or
594 * RF_FALSE bp_in is a buf pointer. void * to facilitate ignoring it
595 * outside the kernel
596 ********************************************************************/
597 int
598 rf_DoAccess(RF_Raid_t * raidPtr, RF_IoType_t type, int async_flag,
599 RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
600 caddr_t bufPtr, void *bp_in, RF_RaidAccessFlags_t flags)
601 {
602 RF_RaidAccessDesc_t *desc;
603 caddr_t lbufPtr = bufPtr;
604 struct buf *bp = (struct buf *) bp_in;
605
606 raidAddress += rf_raidSectorOffset;
607
608 #if RF_ACCESS_DEBUG
609 if (rf_accessDebug) {
610
611 printf("logBytes is: %d %d %d\n", raidPtr->raidid,
612 raidPtr->logBytesPerSector,
613 (int) rf_RaidAddressToByte(raidPtr, numBlocks));
614 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid,
615 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress,
616 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress),
617 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1),
618 (int) numBlocks,
619 (int) rf_RaidAddressToByte(raidPtr, numBlocks),
620 (long) bufPtr);
621 }
622 #endif
623 if (raidAddress + numBlocks > raidPtr->totalSectors) {
624
625 printf("DoAccess: raid addr %lu too large to access %lu sectors. Max legal addr is %lu\n",
626 (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors);
627
628
629 bp->b_flags |= B_ERROR;
630 bp->b_resid = bp->b_bcount;
631 bp->b_error = ENOSPC;
632 biodone(bp);
633 return (ENOSPC);
634 }
635 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress,
636 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states);
637
638 if (desc == NULL) {
639 return (ENOMEM);
640 }
641 #if RF_ACC_TRACE > 0
642 RF_ETIMER_START(desc->tracerec.tot_timer);
643 #endif
644 desc->async_flag = async_flag;
645
646 rf_ContinueRaidAccess(desc);
647
648 return (0);
649 }
650 #if 0
651 /* force the array into reconfigured mode without doing reconstruction */
652 int
653 rf_SetReconfiguredMode(RF_Raid_t *raidPtr, int col)
654 {
655 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
656 printf("Can't set reconfigured mode in dedicated-spare array\n");
657 RF_PANIC();
658 }
659 RF_LOCK_MUTEX(raidPtr->mutex);
660 raidPtr->numFailures++;
661 raidPtr->Disks[col].status = rf_ds_dist_spared;
662 raidPtr->status = rf_rs_reconfigured;
663 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
664 /* install spare table only if declustering + distributed sparing
665 * architecture. */
666 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
667 rf_InstallSpareTable(raidPtr, col);
668 RF_UNLOCK_MUTEX(raidPtr->mutex);
669 return (0);
670 }
671 #endif
672
673 int
674 rf_FailDisk(RF_Raid_t *raidPtr, int fcol, int initRecon)
675 {
676
677 /* need to suspend IO's here -- if there are DAGs in flight
678 and we pull the rug out from under ci_vp, Bad Things
679 can happen. */
680
681 rf_SuspendNewRequestsAndWait(raidPtr);
682
683 RF_LOCK_MUTEX(raidPtr->mutex);
684 if (raidPtr->Disks[fcol].status != rf_ds_failed) {
685 /* must be failing something that is valid, or else it's
686 already marked as failed (in which case we don't
687 want to mark it failed again!) */
688 raidPtr->numFailures++;
689 raidPtr->Disks[fcol].status = rf_ds_failed;
690 raidPtr->status = rf_rs_degraded;
691 }
692 RF_UNLOCK_MUTEX(raidPtr->mutex);
693
694 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
695
696 /* Close the component, so that it's not "locked" if someone
697 else want's to use it! */
698
699 rf_close_component(raidPtr, raidPtr->raid_cinfo[fcol].ci_vp,
700 raidPtr->Disks[fcol].auto_configured);
701
702 RF_LOCK_MUTEX(raidPtr->mutex);
703 raidPtr->raid_cinfo[fcol].ci_vp = NULL;
704
705 /* Need to mark the component as not being auto_configured
706 (in case it was previously). */
707
708 raidPtr->Disks[fcol].auto_configured = 0;
709 RF_UNLOCK_MUTEX(raidPtr->mutex);
710 /* now we can allow IO to continue -- we'll be suspending it
711 again in rf_ReconstructFailedDisk() if we have to.. */
712
713 rf_ResumeNewRequests(raidPtr);
714
715 if (initRecon)
716 rf_ReconstructFailedDisk(raidPtr, fcol);
717 return (0);
718 }
719 /* releases a thread that is waiting for the array to become quiesced.
720 * access_suspend_mutex should be locked upon calling this
721 */
722 void
723 rf_SignalQuiescenceLock(RF_Raid_t *raidPtr)
724 {
725 #if RF_DEBUG_QUIESCE
726 if (rf_quiesceDebug) {
727 printf("raid%d: Signalling quiescence lock\n",
728 raidPtr->raidid);
729 }
730 #endif
731 raidPtr->access_suspend_release = 1;
732
733 if (raidPtr->waiting_for_quiescence) {
734 SIGNAL_QUIESCENT_COND(raidPtr);
735 }
736 }
737 /* suspends all new requests to the array. No effect on accesses that are in flight. */
738 int
739 rf_SuspendNewRequestsAndWait(RF_Raid_t *raidPtr)
740 {
741 #if RF_DEBUG_QUIESCE
742 if (rf_quiesceDebug)
743 printf("raid%d: Suspending new reqs\n", raidPtr->raidid);
744 #endif
745 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
746 raidPtr->accesses_suspended++;
747 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1;
748
749 if (raidPtr->waiting_for_quiescence) {
750 raidPtr->access_suspend_release = 0;
751 while (!raidPtr->access_suspend_release) {
752 #if RF_DEBUG_QUIESCE
753 printf("raid%d: Suspending: Waiting for Quiescence\n",
754 raidPtr->raidid);
755 #endif
756 WAIT_FOR_QUIESCENCE(raidPtr);
757 raidPtr->waiting_for_quiescence = 0;
758 }
759 }
760 #if RF_DEBUG_QUIESCE
761 printf("raid%d: Quiescence reached..\n", raidPtr->raidid);
762 #endif
763
764 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
765 return (raidPtr->waiting_for_quiescence);
766 }
767 /* wake up everyone waiting for quiescence to be released */
768 void
769 rf_ResumeNewRequests(RF_Raid_t *raidPtr)
770 {
771 RF_CallbackDesc_t *t, *cb;
772
773 #if RF_DEBUG_QUIESCE
774 if (rf_quiesceDebug)
775 printf("Resuming new reqs\n");
776 #endif
777
778 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
779 raidPtr->accesses_suspended--;
780 if (raidPtr->accesses_suspended == 0)
781 cb = raidPtr->quiesce_wait_list;
782 else
783 cb = NULL;
784 raidPtr->quiesce_wait_list = NULL;
785 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
786
787 while (cb) {
788 t = cb;
789 cb = cb->next;
790 (t->callbackFunc) (t->callbackArg);
791 rf_FreeCallbackDesc(t);
792 }
793 }
794 /*****************************************************************************************
795 *
796 * debug routines
797 *
798 ****************************************************************************************/
799
800 static void
801 set_debug_option(char *name, long val)
802 {
803 RF_DebugName_t *p;
804
805 for (p = rf_debugNames; p->name; p++) {
806 if (!strcmp(p->name, name)) {
807 *(p->ptr) = val;
808 printf("[Set debug variable %s to %ld]\n", name, val);
809 return;
810 }
811 }
812 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name);
813 }
814
815
816 /* would like to use sscanf here, but apparently not available in kernel */
817 /*ARGSUSED*/
818 static void
819 rf_ConfigureDebug(RF_Config_t *cfgPtr)
820 {
821 char *val_p, *name_p, *white_p;
822 long val;
823 int i;
824
825 rf_ResetDebugOptions();
826 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) {
827 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]);
828 white_p = rf_find_white(name_p); /* skip to start of 2nd
829 * word */
830 val_p = rf_find_non_white(white_p);
831 if (*val_p == '0' && *(val_p + 1) == 'x')
832 val = rf_htoi(val_p + 2);
833 else
834 val = rf_atoi(val_p);
835 *white_p = '\0';
836 set_debug_option(name_p, val);
837 }
838 }
839
840 void
841 rf_print_panic_message(int line, char *file)
842 {
843 snprintf(rf_panicbuf, sizeof(rf_panicbuf),
844 "raidframe error at line %d file %s", line, file);
845 }
846
847 #ifdef RAID_DIAGNOSTIC
848 void
849 rf_print_assert_panic_message(int line, char *file, char *condition)
850 {
851 snprintf(rf_panicbuf, sizeof(rf_panicbuf),
852 "raidframe error at line %d file %s (failed asserting %s)\n",
853 line, file, condition);
854 }
855 #endif
856
857 void
858 rf_print_unable_to_init_mutex(char *file, int line, int rc)
859 {
860 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
861 file, line, rc);
862 }
863
864 void
865 rf_print_unable_to_add_shutdown(char *file, int line, int rc)
866 {
867 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n",
868 file, line, rc);
869 }
870