rf_driver.c revision 1.118 1 /* $NetBSD: rf_driver.c,v 1.118 2008/04/28 20:23:56 martin Exp $ */
2 /*-
3 * Copyright (c) 1999 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * Copyright (c) 1995 Carnegie-Mellon University.
33 * All rights reserved.
34 *
35 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II,
36 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59 /******************************************************************************
60 *
61 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver
62 *
63 * all routines are prefixed with rf_ (raidframe), to avoid conficts.
64 *
65 ******************************************************************************/
66
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.118 2008/04/28 20:23:56 martin Exp $");
70
71 #include "opt_raid_diagnostic.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/ioctl.h>
76 #include <sys/fcntl.h>
77 #include <sys/vnode.h>
78
79
80 #include "rf_archs.h"
81 #include "rf_threadstuff.h"
82
83 #include <sys/errno.h>
84
85 #include "rf_raid.h"
86 #include "rf_dag.h"
87 #include "rf_aselect.h"
88 #include "rf_diskqueue.h"
89 #include "rf_parityscan.h"
90 #include "rf_alloclist.h"
91 #include "rf_dagutils.h"
92 #include "rf_utils.h"
93 #include "rf_etimer.h"
94 #include "rf_acctrace.h"
95 #include "rf_general.h"
96 #include "rf_desc.h"
97 #include "rf_states.h"
98 #include "rf_decluster.h"
99 #include "rf_map.h"
100 #include "rf_revent.h"
101 #include "rf_callback.h"
102 #include "rf_engine.h"
103 #include "rf_mcpair.h"
104 #include "rf_nwayxor.h"
105 #include "rf_copyback.h"
106 #include "rf_driver.h"
107 #include "rf_options.h"
108 #include "rf_shutdown.h"
109 #include "rf_kintf.h"
110
111 #include <sys/buf.h>
112
113 #ifndef RF_ACCESS_DEBUG
114 #define RF_ACCESS_DEBUG 0
115 #endif
116
117 /* rad == RF_RaidAccessDesc_t */
118 RF_DECLARE_MUTEX(rf_rad_lock)
119 #define RF_MAX_FREE_RAD 128
120 #define RF_MIN_FREE_RAD 32
121
122 /* debug variables */
123 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */
124
125 /* main configuration routines */
126 static int raidframe_booted = 0;
127
128 static void rf_ConfigureDebug(RF_Config_t * cfgPtr);
129 static void set_debug_option(char *name, long val);
130 static void rf_UnconfigureArray(void);
131 static void rf_ShutdownRDFreeList(void *);
132 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
133
134 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved
135 * printfs by different stripes */
136
137 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended))
138 #define WAIT_FOR_QUIESCENCE(_raid_) \
139 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \
140 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex))
141
142 static int configureCount = 0; /* number of active configurations */
143 static int isconfigged = 0; /* is basic raidframe (non per-array)
144 * stuff configured */
145 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex) /* used to lock the configuration
146 * stuff */
147 static RF_ShutdownList_t *globalShutdown; /* non array-specific
148 * stuff */
149
150 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
151 static int rf_AllocEmergBuffers(RF_Raid_t *);
152 static void rf_FreeEmergBuffers(RF_Raid_t *);
153
154 /* called at system boot time */
155 int
156 rf_BootRaidframe()
157 {
158
159 if (raidframe_booted)
160 return (EBUSY);
161 raidframe_booted = 1;
162 mutex_init(&configureMutex, MUTEX_DEFAULT, IPL_NONE);
163 configureCount = 0;
164 isconfigged = 0;
165 globalShutdown = NULL;
166 return (0);
167 }
168
169 /*
170 * Called whenever an array is shutdown
171 */
172 static void
173 rf_UnconfigureArray()
174 {
175
176 RF_LOCK_LKMGR_MUTEX(configureMutex);
177 if (--configureCount == 0) { /* if no active configurations, shut
178 * everything down */
179 isconfigged = 0;
180 rf_ShutdownList(&globalShutdown);
181
182 /*
183 * We must wait until now, because the AllocList module
184 * uses the DebugMem module.
185 */
186 #if RF_DEBUG_MEM
187 if (rf_memDebug)
188 rf_print_unfreed();
189 #endif
190 }
191 RF_UNLOCK_LKMGR_MUTEX(configureMutex);
192 }
193
194 /*
195 * Called to shut down an array.
196 */
197 int
198 rf_Shutdown(RF_Raid_t *raidPtr)
199 {
200
201 if (!raidPtr->valid) {
202 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n");
203 return (EINVAL);
204 }
205 /*
206 * wait for outstanding IOs to land
207 * As described in rf_raid.h, we use the rad_freelist lock
208 * to protect the per-array info about outstanding descs
209 * since we need to do freelist locking anyway, and this
210 * cuts down on the amount of serialization we've got going
211 * on.
212 */
213 RF_LOCK_MUTEX(rf_rad_lock);
214 if (raidPtr->waitShutdown) {
215 RF_UNLOCK_MUTEX(rf_rad_lock);
216 return (EBUSY);
217 }
218 raidPtr->waitShutdown = 1;
219 while (raidPtr->nAccOutstanding) {
220 RF_WAIT_COND(raidPtr->outstandingCond, rf_rad_lock);
221 }
222 RF_UNLOCK_MUTEX(rf_rad_lock);
223
224 /* Wait for any parity re-writes to stop... */
225 while (raidPtr->parity_rewrite_in_progress) {
226 printf("Waiting for parity re-write to exit...\n");
227 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO,
228 "rfprwshutdown", 0);
229 }
230
231 raidPtr->valid = 0;
232
233 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE);
234
235 rf_UnconfigureVnodes(raidPtr);
236
237 rf_FreeEmergBuffers(raidPtr);
238
239 rf_ShutdownList(&raidPtr->shutdownList);
240
241 rf_UnconfigureArray();
242
243 return (0);
244 }
245
246
247 #define DO_INIT_CONFIGURE(f) { \
248 rc = f (&globalShutdown); \
249 if (rc) { \
250 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
251 rf_ShutdownList(&globalShutdown); \
252 configureCount--; \
253 RF_UNLOCK_LKMGR_MUTEX(configureMutex); \
254 return(rc); \
255 } \
256 }
257
258 #define DO_RAID_FAIL() { \
259 rf_UnconfigureVnodes(raidPtr); \
260 rf_FreeEmergBuffers(raidPtr); \
261 rf_ShutdownList(&raidPtr->shutdownList); \
262 rf_UnconfigureArray(); \
263 }
264
265 #define DO_RAID_INIT_CONFIGURE(f) { \
266 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \
267 if (rc) { \
268 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
269 DO_RAID_FAIL(); \
270 return(rc); \
271 } \
272 }
273
274 #define DO_RAID_MUTEX(_m_) { \
275 rf_mutex_init((_m_)); \
276 }
277
278 int
279 rf_Configure(RF_Raid_t *raidPtr, RF_Config_t *cfgPtr, RF_AutoConfig_t *ac)
280 {
281 RF_RowCol_t col;
282 int rc;
283
284 RF_LOCK_LKMGR_MUTEX(configureMutex);
285 configureCount++;
286 if (isconfigged == 0) {
287 rf_mutex_init(&rf_printf_mutex);
288
289 /* initialize globals */
290
291 DO_INIT_CONFIGURE(rf_ConfigureAllocList);
292
293 /*
294 * Yes, this does make debugging general to the whole
295 * system instead of being array specific. Bummer, drag.
296 */
297 rf_ConfigureDebug(cfgPtr);
298 DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
299 #if RF_ACC_TRACE > 0
300 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
301 #endif
302 DO_INIT_CONFIGURE(rf_ConfigureMapModule);
303 DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
304 DO_INIT_CONFIGURE(rf_ConfigureCallback);
305 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
306 DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
307 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
308 DO_INIT_CONFIGURE(rf_ConfigureMCPair);
309 DO_INIT_CONFIGURE(rf_ConfigureDAGs);
310 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
311 DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
312 DO_INIT_CONFIGURE(rf_ConfigureCopyback);
313 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
314 DO_INIT_CONFIGURE(rf_ConfigurePSStatus);
315 isconfigged = 1;
316 }
317 RF_UNLOCK_LKMGR_MUTEX(configureMutex);
318
319 DO_RAID_MUTEX(&raidPtr->mutex);
320 /* set up the cleanup list. Do this after ConfigureDebug so that
321 * value of memDebug will be set */
322
323 rf_MakeAllocList(raidPtr->cleanupList);
324 if (raidPtr->cleanupList == NULL) {
325 DO_RAID_FAIL();
326 return (ENOMEM);
327 }
328 rf_ShutdownCreate(&raidPtr->shutdownList,
329 (void (*) (void *)) rf_FreeAllocList,
330 raidPtr->cleanupList);
331
332 raidPtr->numCol = cfgPtr->numCol;
333 raidPtr->numSpare = cfgPtr->numSpare;
334
335 raidPtr->status = rf_rs_optimal;
336 raidPtr->reconControl = NULL;
337
338 TAILQ_INIT(&(raidPtr->iodone));
339 simple_lock_init(&(raidPtr->iodone_lock));
340
341 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
342 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
343
344 raidPtr->outstandingCond = 0;
345
346 raidPtr->nAccOutstanding = 0;
347 raidPtr->waitShutdown = 0;
348
349 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);
350
351 raidPtr->waitForReconCond = 0;
352
353 if (ac!=NULL) {
354 /* We have an AutoConfig structure.. Don't do the
355 normal disk configuration... call the auto config
356 stuff */
357 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
358 } else {
359 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
360 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
361 }
362 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
363 * no. is set */
364 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);
365
366 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
367
368 /* Initialize per-RAID PSS bits */
369 rf_InitPSStatus(raidPtr);
370
371 #if RF_INCLUDE_CHAINDECLUSTER > 0
372 for (col = 0; col < raidPtr->numCol; col++) {
373 /*
374 * XXX better distribution
375 */
376 raidPtr->hist_diskreq[col] = 0;
377 }
378 #endif
379 raidPtr->numNewFailures = 0;
380 raidPtr->copyback_in_progress = 0;
381 raidPtr->parity_rewrite_in_progress = 0;
382 raidPtr->adding_hot_spare = 0;
383 raidPtr->recon_in_progress = 0;
384 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;
385
386 /* autoconfigure and root_partition will actually get filled in
387 after the config is done */
388 raidPtr->autoconfigure = 0;
389 raidPtr->root_partition = 0;
390 raidPtr->last_unit = raidPtr->raidid;
391 raidPtr->config_order = 0;
392
393 if (rf_keepAccTotals) {
394 raidPtr->keep_acc_totals = 1;
395 }
396
397 /* Allocate a bunch of buffers to be used in low-memory conditions */
398 raidPtr->iobuf = NULL;
399
400 rc = rf_AllocEmergBuffers(raidPtr);
401 if (rc) {
402 printf("raid%d: Unable to allocate emergency buffers.\n",
403 raidPtr->raidid);
404 DO_RAID_FAIL();
405 return(rc);
406 }
407
408 raidPtr->valid = 1;
409
410 printf("raid%d: %s\n", raidPtr->raidid,
411 raidPtr->Layout.map->configName);
412 printf("raid%d: Components:", raidPtr->raidid);
413
414 for (col = 0; col < raidPtr->numCol; col++) {
415 printf(" %s", raidPtr->Disks[col].devname);
416 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
417 printf("[**FAILED**]");
418 }
419 }
420 printf("\n");
421 printf("raid%d: Total Sectors: %" PRIu64 " (%" PRIu64 " MB)\n",
422 raidPtr->raidid,
423 raidPtr->totalSectors,
424 (raidPtr->totalSectors / 1024 *
425 (1 << raidPtr->logBytesPerSector) / 1024));
426
427 return (0);
428 }
429
430
431 /*
432
433 Routines to allocate and free the "emergency buffers" for a given
434 RAID set. These emergency buffers will be used when the kernel runs
435 out of kernel memory.
436
437 */
438
439 static int
440 rf_AllocEmergBuffers(RF_Raid_t *raidPtr)
441 {
442 void *tmpbuf;
443 RF_VoidPointerListElem_t *vple;
444 int i;
445
446 /* XXX next line needs tuning... */
447 raidPtr->numEmergencyBuffers = 10 * raidPtr->numCol;
448 #if DEBUG
449 printf("raid%d: allocating %d buffers of %d bytes.\n",
450 raidPtr->raidid,
451 raidPtr->numEmergencyBuffers,
452 (int)(raidPtr->Layout.sectorsPerStripeUnit <<
453 raidPtr->logBytesPerSector));
454 #endif
455 for (i = 0; i < raidPtr->numEmergencyBuffers; i++) {
456 tmpbuf = malloc( raidPtr->Layout.sectorsPerStripeUnit <<
457 raidPtr->logBytesPerSector,
458 M_RAIDFRAME, M_WAITOK);
459 if (tmpbuf) {
460 vple = rf_AllocVPListElem();
461 vple->p= tmpbuf;
462 vple->next = raidPtr->iobuf;
463 raidPtr->iobuf = vple;
464 raidPtr->iobuf_count++;
465 } else {
466 printf("raid%d: failed to allocate emergency buffer!\n",
467 raidPtr->raidid);
468 return 1;
469 }
470 }
471
472 /* XXX next line needs tuning too... */
473 raidPtr->numEmergencyStripeBuffers = 10;
474 for (i = 0; i < raidPtr->numEmergencyStripeBuffers; i++) {
475 tmpbuf = malloc( raidPtr->numCol * (raidPtr->Layout.sectorsPerStripeUnit <<
476 raidPtr->logBytesPerSector),
477 M_RAIDFRAME, M_WAITOK);
478 if (tmpbuf) {
479 vple = rf_AllocVPListElem();
480 vple->p= tmpbuf;
481 vple->next = raidPtr->stripebuf;
482 raidPtr->stripebuf = vple;
483 raidPtr->stripebuf_count++;
484 } else {
485 printf("raid%d: failed to allocate emergency stripe buffer!\n",
486 raidPtr->raidid);
487 return 1;
488 }
489 }
490
491 return (0);
492 }
493
494 static void
495 rf_FreeEmergBuffers(RF_Raid_t *raidPtr)
496 {
497 RF_VoidPointerListElem_t *tmp;
498
499 /* Free the emergency IO buffers */
500 while (raidPtr->iobuf != NULL) {
501 tmp = raidPtr->iobuf;
502 raidPtr->iobuf = raidPtr->iobuf->next;
503 free(tmp->p, M_RAIDFRAME);
504 rf_FreeVPListElem(tmp);
505 }
506
507 /* Free the emergency stripe buffers */
508 while (raidPtr->stripebuf != NULL) {
509 tmp = raidPtr->stripebuf;
510 raidPtr->stripebuf = raidPtr->stripebuf->next;
511 free(tmp->p, M_RAIDFRAME);
512 rf_FreeVPListElem(tmp);
513 }
514 }
515
516
517 static void
518 rf_ShutdownRDFreeList(void *ignored)
519 {
520 pool_destroy(&rf_pools.rad);
521 }
522
523 static int
524 rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
525 {
526
527 rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
528 "rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
529 rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
530 simple_lock_init(&rf_rad_lock);
531 return (0);
532 }
533
534 RF_RaidAccessDesc_t *
535 rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, RF_IoType_t type,
536 RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
537 void *bufPtr, void *bp, RF_RaidAccessFlags_t flags,
538 const RF_AccessState_t *states)
539 {
540 RF_RaidAccessDesc_t *desc;
541
542 desc = pool_get(&rf_pools.rad, PR_WAITOK);
543
544 RF_LOCK_MUTEX(rf_rad_lock);
545 if (raidPtr->waitShutdown) {
546 /*
547 * Actually, we're shutting the array down. Free the desc
548 * and return NULL.
549 */
550
551 RF_UNLOCK_MUTEX(rf_rad_lock);
552 pool_put(&rf_pools.rad, desc);
553 return (NULL);
554 }
555 raidPtr->nAccOutstanding++;
556
557 RF_UNLOCK_MUTEX(rf_rad_lock);
558
559 desc->raidPtr = (void *) raidPtr;
560 desc->type = type;
561 desc->raidAddress = raidAddress;
562 desc->numBlocks = numBlocks;
563 desc->bufPtr = bufPtr;
564 desc->bp = bp;
565 desc->flags = flags;
566 desc->states = states;
567 desc->state = 0;
568 desc->dagList = NULL;
569
570 desc->status = 0;
571 desc->numRetries = 0;
572 #if RF_ACC_TRACE > 0
573 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t));
574 #endif
575 desc->callbackFunc = NULL;
576 desc->callbackArg = NULL;
577 desc->next = NULL;
578 desc->iobufs = NULL;
579 desc->stripebufs = NULL;
580
581 return (desc);
582 }
583
584 void
585 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *desc)
586 {
587 RF_Raid_t *raidPtr = desc->raidPtr;
588 RF_DagList_t *dagList, *temp;
589 RF_VoidPointerListElem_t *tmp;
590
591 RF_ASSERT(desc);
592
593 /* Cleanup the dagList(s) */
594 dagList = desc->dagList;
595 while(dagList != NULL) {
596 temp = dagList;
597 dagList = dagList->next;
598 rf_FreeDAGList(temp);
599 }
600
601 while (desc->iobufs) {
602 tmp = desc->iobufs;
603 desc->iobufs = desc->iobufs->next;
604 rf_FreeIOBuffer(raidPtr, tmp);
605 }
606
607 while (desc->stripebufs) {
608 tmp = desc->stripebufs;
609 desc->stripebufs = desc->stripebufs->next;
610 rf_FreeStripeBuffer(raidPtr, tmp);
611 }
612
613 pool_put(&rf_pools.rad, desc);
614 RF_LOCK_MUTEX(rf_rad_lock);
615 raidPtr->nAccOutstanding--;
616 if (raidPtr->waitShutdown) {
617 RF_SIGNAL_COND(raidPtr->outstandingCond);
618 }
619 RF_UNLOCK_MUTEX(rf_rad_lock);
620 }
621 /*********************************************************************
622 * Main routine for performing an access.
623 * Accesses are retried until a DAG can not be selected. This occurs
624 * when either the DAG library is incomplete or there are too many
625 * failures in a parity group.
626 *
627 * type should be read or write async_flag should be RF_TRUE or
628 * RF_FALSE bp_in is a buf pointer. void *to facilitate ignoring it
629 * outside the kernel
630 ********************************************************************/
631 int
632 rf_DoAccess(RF_Raid_t * raidPtr, RF_IoType_t type, int async_flag,
633 RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
634 void *bufPtr, struct buf *bp, RF_RaidAccessFlags_t flags)
635 {
636 RF_RaidAccessDesc_t *desc;
637 void *lbufPtr = bufPtr;
638
639 raidAddress += rf_raidSectorOffset;
640
641 #if RF_ACCESS_DEBUG
642 if (rf_accessDebug) {
643
644 printf("logBytes is: %d %d %d\n", raidPtr->raidid,
645 raidPtr->logBytesPerSector,
646 (int) rf_RaidAddressToByte(raidPtr, numBlocks));
647 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid,
648 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress,
649 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress),
650 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1),
651 (int) numBlocks,
652 (int) rf_RaidAddressToByte(raidPtr, numBlocks),
653 (long) bufPtr);
654 }
655 #endif
656
657 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress,
658 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states);
659
660 if (desc == NULL) {
661 return (ENOMEM);
662 }
663 #if RF_ACC_TRACE > 0
664 RF_ETIMER_START(desc->tracerec.tot_timer);
665 #endif
666 desc->async_flag = async_flag;
667
668 rf_ContinueRaidAccess(desc);
669
670 return (0);
671 }
672 #if 0
673 /* force the array into reconfigured mode without doing reconstruction */
674 int
675 rf_SetReconfiguredMode(RF_Raid_t *raidPtr, int col)
676 {
677 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
678 printf("Can't set reconfigured mode in dedicated-spare array\n");
679 RF_PANIC();
680 }
681 RF_LOCK_MUTEX(raidPtr->mutex);
682 raidPtr->numFailures++;
683 raidPtr->Disks[col].status = rf_ds_dist_spared;
684 raidPtr->status = rf_rs_reconfigured;
685 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
686 /* install spare table only if declustering + distributed sparing
687 * architecture. */
688 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
689 rf_InstallSpareTable(raidPtr, col);
690 RF_UNLOCK_MUTEX(raidPtr->mutex);
691 return (0);
692 }
693 #endif
694
695 int
696 rf_FailDisk(RF_Raid_t *raidPtr, int fcol, int initRecon)
697 {
698
699 /* need to suspend IO's here -- if there are DAGs in flight
700 and we pull the rug out from under ci_vp, Bad Things
701 can happen. */
702
703 rf_SuspendNewRequestsAndWait(raidPtr);
704
705 RF_LOCK_MUTEX(raidPtr->mutex);
706 if (raidPtr->Disks[fcol].status != rf_ds_failed) {
707 /* must be failing something that is valid, or else it's
708 already marked as failed (in which case we don't
709 want to mark it failed again!) */
710 raidPtr->numFailures++;
711 raidPtr->Disks[fcol].status = rf_ds_failed;
712 raidPtr->status = rf_rs_degraded;
713 }
714 RF_UNLOCK_MUTEX(raidPtr->mutex);
715
716 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
717
718 /* Close the component, so that it's not "locked" if someone
719 else want's to use it! */
720
721 rf_close_component(raidPtr, raidPtr->raid_cinfo[fcol].ci_vp,
722 raidPtr->Disks[fcol].auto_configured);
723
724 RF_LOCK_MUTEX(raidPtr->mutex);
725 raidPtr->raid_cinfo[fcol].ci_vp = NULL;
726
727 /* Need to mark the component as not being auto_configured
728 (in case it was previously). */
729
730 raidPtr->Disks[fcol].auto_configured = 0;
731 RF_UNLOCK_MUTEX(raidPtr->mutex);
732 /* now we can allow IO to continue -- we'll be suspending it
733 again in rf_ReconstructFailedDisk() if we have to.. */
734
735 rf_ResumeNewRequests(raidPtr);
736
737 if (initRecon)
738 rf_ReconstructFailedDisk(raidPtr, fcol);
739 return (0);
740 }
741 /* releases a thread that is waiting for the array to become quiesced.
742 * access_suspend_mutex should be locked upon calling this
743 */
744 void
745 rf_SignalQuiescenceLock(RF_Raid_t *raidPtr)
746 {
747 #if RF_DEBUG_QUIESCE
748 if (rf_quiesceDebug) {
749 printf("raid%d: Signalling quiescence lock\n",
750 raidPtr->raidid);
751 }
752 #endif
753 raidPtr->access_suspend_release = 1;
754
755 if (raidPtr->waiting_for_quiescence) {
756 SIGNAL_QUIESCENT_COND(raidPtr);
757 }
758 }
759 /* suspends all new requests to the array. No effect on accesses that are in flight. */
760 int
761 rf_SuspendNewRequestsAndWait(RF_Raid_t *raidPtr)
762 {
763 #if RF_DEBUG_QUIESCE
764 if (rf_quiesceDebug)
765 printf("raid%d: Suspending new reqs\n", raidPtr->raidid);
766 #endif
767 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
768 raidPtr->accesses_suspended++;
769 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1;
770
771 if (raidPtr->waiting_for_quiescence) {
772 raidPtr->access_suspend_release = 0;
773 while (!raidPtr->access_suspend_release) {
774 #if RF_DEBUG_QUIESCE
775 printf("raid%d: Suspending: Waiting for Quiescence\n",
776 raidPtr->raidid);
777 #endif
778 WAIT_FOR_QUIESCENCE(raidPtr);
779 raidPtr->waiting_for_quiescence = 0;
780 }
781 }
782 #if RF_DEBUG_QUIESCE
783 printf("raid%d: Quiescence reached..\n", raidPtr->raidid);
784 #endif
785
786 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
787 return (raidPtr->waiting_for_quiescence);
788 }
789 /* wake up everyone waiting for quiescence to be released */
790 void
791 rf_ResumeNewRequests(RF_Raid_t *raidPtr)
792 {
793 RF_CallbackDesc_t *t, *cb;
794
795 #if RF_DEBUG_QUIESCE
796 if (rf_quiesceDebug)
797 printf("raid%d: Resuming new requests\n", raidPtr->raidid);
798 #endif
799
800 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
801 raidPtr->accesses_suspended--;
802 if (raidPtr->accesses_suspended == 0)
803 cb = raidPtr->quiesce_wait_list;
804 else
805 cb = NULL;
806 raidPtr->quiesce_wait_list = NULL;
807 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
808
809 while (cb) {
810 t = cb;
811 cb = cb->next;
812 (t->callbackFunc) (t->callbackArg);
813 rf_FreeCallbackDesc(t);
814 }
815 }
816 /*****************************************************************************************
817 *
818 * debug routines
819 *
820 ****************************************************************************************/
821
822 static void
823 set_debug_option(char *name, long val)
824 {
825 RF_DebugName_t *p;
826
827 for (p = rf_debugNames; p->name; p++) {
828 if (!strcmp(p->name, name)) {
829 *(p->ptr) = val;
830 printf("[Set debug variable %s to %ld]\n", name, val);
831 return;
832 }
833 }
834 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name);
835 }
836
837
838 /* would like to use sscanf here, but apparently not available in kernel */
839 /*ARGSUSED*/
840 static void
841 rf_ConfigureDebug(RF_Config_t *cfgPtr)
842 {
843 char *val_p, *name_p, *white_p;
844 long val;
845 int i;
846
847 rf_ResetDebugOptions();
848 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) {
849 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]);
850 white_p = rf_find_white(name_p); /* skip to start of 2nd
851 * word */
852 val_p = rf_find_non_white(white_p);
853 if (*val_p == '0' && *(val_p + 1) == 'x')
854 val = rf_htoi(val_p + 2);
855 else
856 val = rf_atoi(val_p);
857 *white_p = '\0';
858 set_debug_option(name_p, val);
859 }
860 }
861
862 void
863 rf_print_panic_message(int line, const char *file)
864 {
865 snprintf(rf_panicbuf, sizeof(rf_panicbuf),
866 "raidframe error at line %d file %s", line, file);
867 }
868
869 #ifdef RAID_DIAGNOSTIC
870 void
871 rf_print_assert_panic_message(int line, const char *file, const char *condition)
872 {
873 snprintf(rf_panicbuf, sizeof(rf_panicbuf),
874 "raidframe error at line %d file %s (failed asserting %s)\n",
875 line, file, condition);
876 }
877 #endif
878
879 void
880 rf_print_unable_to_init_mutex(const char *file, int line, int rc)
881 {
882 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
883 file, line, rc);
884 }
885
886 void
887 rf_print_unable_to_add_shutdown(const char *file, int line, int rc)
888 {
889 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n",
890 file, line, rc);
891 }
892