rf_netbsdkintf.c revision 1.183 1 /* $NetBSD: rf_netbsdkintf.c,v 1.183 2004/11/16 16:45:51 oster Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1990, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * This code is derived from software contributed to Berkeley by
43 * the Systems Programming Group of the University of Utah Computer
44 * Science Department.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * from: Utah $Hdr: cd.c 1.6 90/11/28$
71 *
72 * @(#)cd.c 8.2 (Berkeley) 11/16/93
73 */
74
75 /*
76 * Copyright (c) 1988 University of Utah.
77 *
78 * This code is derived from software contributed to Berkeley by
79 * the Systems Programming Group of the University of Utah Computer
80 * Science Department.
81 *
82 * Redistribution and use in source and binary forms, with or without
83 * modification, are permitted provided that the following conditions
84 * are met:
85 * 1. Redistributions of source code must retain the above copyright
86 * notice, this list of conditions and the following disclaimer.
87 * 2. Redistributions in binary form must reproduce the above copyright
88 * notice, this list of conditions and the following disclaimer in the
89 * documentation and/or other materials provided with the distribution.
90 * 3. All advertising materials mentioning features or use of this software
91 * must display the following acknowledgement:
92 * This product includes software developed by the University of
93 * California, Berkeley and its contributors.
94 * 4. Neither the name of the University nor the names of its contributors
95 * may be used to endorse or promote products derived from this software
96 * without specific prior written permission.
97 *
98 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
99 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
100 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
101 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
102 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
103 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
104 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
105 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
106 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
107 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
108 * SUCH DAMAGE.
109 *
110 * from: Utah $Hdr: cd.c 1.6 90/11/28$
111 *
112 * @(#)cd.c 8.2 (Berkeley) 11/16/93
113 */
114
115 /*
116 * Copyright (c) 1995 Carnegie-Mellon University.
117 * All rights reserved.
118 *
119 * Authors: Mark Holland, Jim Zelenka
120 *
121 * Permission to use, copy, modify and distribute this software and
122 * its documentation is hereby granted, provided that both the copyright
123 * notice and this permission notice appear in all copies of the
124 * software, derivative works or modified versions, and any portions
125 * thereof, and that both notices appear in supporting documentation.
126 *
127 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
128 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
129 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
130 *
131 * Carnegie Mellon requests users of this software to return to
132 *
133 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
134 * School of Computer Science
135 * Carnegie Mellon University
136 * Pittsburgh PA 15213-3890
137 *
138 * any improvements or extensions that they make and grant Carnegie the
139 * rights to redistribute these changes.
140 */
141
142 /***********************************************************
143 *
144 * rf_kintf.c -- the kernel interface routines for RAIDframe
145 *
146 ***********************************************************/
147
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.183 2004/11/16 16:45:51 oster Exp $");
150
151 #include <sys/param.h>
152 #include <sys/errno.h>
153 #include <sys/pool.h>
154 #include <sys/proc.h>
155 #include <sys/queue.h>
156 #include <sys/disk.h>
157 #include <sys/device.h>
158 #include <sys/stat.h>
159 #include <sys/ioctl.h>
160 #include <sys/fcntl.h>
161 #include <sys/systm.h>
162 #include <sys/namei.h>
163 #include <sys/vnode.h>
164 #include <sys/disklabel.h>
165 #include <sys/conf.h>
166 #include <sys/lock.h>
167 #include <sys/buf.h>
168 #include <sys/bufq.h>
169 #include <sys/user.h>
170 #include <sys/reboot.h>
171
172 #include <dev/raidframe/raidframevar.h>
173 #include <dev/raidframe/raidframeio.h>
174 #include "raid.h"
175 #include "opt_raid_autoconfig.h"
176 #include "rf_raid.h"
177 #include "rf_copyback.h"
178 #include "rf_dag.h"
179 #include "rf_dagflags.h"
180 #include "rf_desc.h"
181 #include "rf_diskqueue.h"
182 #include "rf_etimer.h"
183 #include "rf_general.h"
184 #include "rf_kintf.h"
185 #include "rf_options.h"
186 #include "rf_driver.h"
187 #include "rf_parityscan.h"
188 #include "rf_threadstuff.h"
189
190 #ifdef DEBUG
191 int rf_kdebug_level = 0;
192 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
193 #else /* DEBUG */
194 #define db1_printf(a) { }
195 #endif /* DEBUG */
196
197 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
198
199 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
200
201 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
202 * spare table */
203 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
204 * installation process */
205
206 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
207
208 /* prototypes */
209 static void KernelWakeupFunc(struct buf * bp);
210 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
211 dev_t dev, RF_SectorNum_t startSect,
212 RF_SectorCount_t numSect, caddr_t buf,
213 void (*cbFunc) (struct buf *), void *cbArg,
214 int logBytesPerSector, struct proc * b_proc);
215 static void raidinit(RF_Raid_t *);
216
217 void raidattach(int);
218
219 dev_type_open(raidopen);
220 dev_type_close(raidclose);
221 dev_type_read(raidread);
222 dev_type_write(raidwrite);
223 dev_type_ioctl(raidioctl);
224 dev_type_strategy(raidstrategy);
225 dev_type_dump(raiddump);
226 dev_type_size(raidsize);
227
228 const struct bdevsw raid_bdevsw = {
229 raidopen, raidclose, raidstrategy, raidioctl,
230 raiddump, raidsize, D_DISK
231 };
232
233 const struct cdevsw raid_cdevsw = {
234 raidopen, raidclose, raidread, raidwrite, raidioctl,
235 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
236 };
237
238 /*
239 * Pilfered from ccd.c
240 */
241
242 struct raidbuf {
243 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
244 struct buf *rf_obp; /* ptr. to original I/O buf */
245 RF_DiskQueueData_t *req;/* the request that this was part of.. */
246 };
247
248 /* XXX Not sure if the following should be replacing the raidPtrs above,
249 or if it should be used in conjunction with that...
250 */
251
252 struct raid_softc {
253 int sc_flags; /* flags */
254 int sc_cflags; /* configuration flags */
255 size_t sc_size; /* size of the raid device */
256 char sc_xname[20]; /* XXX external name */
257 struct disk sc_dkdev; /* generic disk device info */
258 struct bufq_state buf_queue; /* used for the device queue */
259 };
260 /* sc_flags */
261 #define RAIDF_INITED 0x01 /* unit has been initialized */
262 #define RAIDF_WLABEL 0x02 /* label area is writable */
263 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
264 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
265 #define RAIDF_LOCKED 0x80 /* unit is locked */
266
267 #define raidunit(x) DISKUNIT(x)
268 int numraid = 0;
269
270 /*
271 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
272 * Be aware that large numbers can allow the driver to consume a lot of
273 * kernel memory, especially on writes, and in degraded mode reads.
274 *
275 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
276 * a single 64K write will typically require 64K for the old data,
277 * 64K for the old parity, and 64K for the new parity, for a total
278 * of 192K (if the parity buffer is not re-used immediately).
279 * Even it if is used immediately, that's still 128K, which when multiplied
280 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
281 *
282 * Now in degraded mode, for example, a 64K read on the above setup may
283 * require data reconstruction, which will require *all* of the 4 remaining
284 * disks to participate -- 4 * 32K/disk == 128K again.
285 */
286
287 #ifndef RAIDOUTSTANDING
288 #define RAIDOUTSTANDING 6
289 #endif
290
291 #define RAIDLABELDEV(dev) \
292 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
293
294 /* declared here, and made public, for the benefit of KVM stuff.. */
295 struct raid_softc *raid_softc;
296
297 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
298 struct disklabel *);
299 static void raidgetdisklabel(dev_t);
300 static void raidmakedisklabel(struct raid_softc *);
301
302 static int raidlock(struct raid_softc *);
303 static void raidunlock(struct raid_softc *);
304
305 static void rf_markalldirty(RF_Raid_t *);
306
307 struct device *raidrootdev;
308
309 void rf_ReconThread(struct rf_recon_req *);
310 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
311 void rf_CopybackThread(RF_Raid_t *raidPtr);
312 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
313 int rf_autoconfig(struct device *self);
314 void rf_buildroothack(RF_ConfigSet_t *);
315
316 RF_AutoConfig_t *rf_find_raid_components(void);
317 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
318 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
319 static int rf_reasonable_label(RF_ComponentLabel_t *);
320 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
321 int rf_set_autoconfig(RF_Raid_t *, int);
322 int rf_set_rootpartition(RF_Raid_t *, int);
323 void rf_release_all_vps(RF_ConfigSet_t *);
324 void rf_cleanup_config_set(RF_ConfigSet_t *);
325 int rf_have_enough_components(RF_ConfigSet_t *);
326 int rf_auto_config_set(RF_ConfigSet_t *, int *);
327
328 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
329 allow autoconfig to take place.
330 Note that this is overridden by having
331 RAID_AUTOCONFIG as an option in the
332 kernel config file. */
333
334 struct RF_Pools_s rf_pools;
335
336 void
337 raidattach(int num)
338 {
339 int raidID;
340 int i, rc;
341
342 #ifdef DEBUG
343 printf("raidattach: Asked for %d units\n", num);
344 #endif
345
346 if (num <= 0) {
347 #ifdef DIAGNOSTIC
348 panic("raidattach: count <= 0");
349 #endif
350 return;
351 }
352 /* This is where all the initialization stuff gets done. */
353
354 numraid = num;
355
356 /* Make some space for requested number of units... */
357
358 RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
359 if (raidPtrs == NULL) {
360 panic("raidPtrs is NULL!!");
361 }
362
363 /* Initialize the component buffer pool. */
364 rf_pool_init(&rf_pools.cbuf, sizeof(struct raidbuf),
365 "raidpl", num * RAIDOUTSTANDING,
366 2 * num * RAIDOUTSTANDING);
367
368 rf_mutex_init(&rf_sparet_wait_mutex);
369
370 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
371
372 for (i = 0; i < num; i++)
373 raidPtrs[i] = NULL;
374 rc = rf_BootRaidframe();
375 if (rc == 0)
376 printf("Kernelized RAIDframe activated\n");
377 else
378 panic("Serious error booting RAID!!");
379
380 /* put together some datastructures like the CCD device does.. This
381 * lets us lock the device and what-not when it gets opened. */
382
383 raid_softc = (struct raid_softc *)
384 malloc(num * sizeof(struct raid_softc),
385 M_RAIDFRAME, M_NOWAIT);
386 if (raid_softc == NULL) {
387 printf("WARNING: no memory for RAIDframe driver\n");
388 return;
389 }
390
391 memset(raid_softc, 0, num * sizeof(struct raid_softc));
392
393 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
394 M_RAIDFRAME, M_NOWAIT);
395 if (raidrootdev == NULL) {
396 panic("No memory for RAIDframe driver!!?!?!");
397 }
398
399 for (raidID = 0; raidID < num; raidID++) {
400 bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
401
402 raidrootdev[raidID].dv_class = DV_DISK;
403 raidrootdev[raidID].dv_cfdata = NULL;
404 raidrootdev[raidID].dv_unit = raidID;
405 raidrootdev[raidID].dv_parent = NULL;
406 raidrootdev[raidID].dv_flags = 0;
407 snprintf(raidrootdev[raidID].dv_xname,
408 sizeof(raidrootdev[raidID].dv_xname), "raid%d", raidID);
409
410 RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
411 (RF_Raid_t *));
412 if (raidPtrs[raidID] == NULL) {
413 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
414 numraid = raidID;
415 return;
416 }
417 }
418
419 #ifdef RAID_AUTOCONFIG
420 raidautoconfig = 1;
421 #endif
422
423 /*
424 * Register a finalizer which will be used to auto-config RAID
425 * sets once all real hardware devices have been found.
426 */
427 if (config_finalize_register(NULL, rf_autoconfig) != 0)
428 printf("WARNING: unable to register RAIDframe finalizer\n");
429 }
430
431 int
432 rf_autoconfig(struct device *self)
433 {
434 RF_AutoConfig_t *ac_list;
435 RF_ConfigSet_t *config_sets;
436
437 if (raidautoconfig == 0)
438 return (0);
439
440 /* XXX This code can only be run once. */
441 raidautoconfig = 0;
442
443 /* 1. locate all RAID components on the system */
444 #ifdef DEBUG
445 printf("Searching for RAID components...\n");
446 #endif
447 ac_list = rf_find_raid_components();
448
449 /* 2. Sort them into their respective sets. */
450 config_sets = rf_create_auto_sets(ac_list);
451
452 /*
453 * 3. Evaluate each set andconfigure the valid ones.
454 * This gets done in rf_buildroothack().
455 */
456 rf_buildroothack(config_sets);
457
458 return (1);
459 }
460
461 void
462 rf_buildroothack(RF_ConfigSet_t *config_sets)
463 {
464 RF_ConfigSet_t *cset;
465 RF_ConfigSet_t *next_cset;
466 int retcode;
467 int raidID;
468 int rootID;
469 int num_root;
470
471 rootID = 0;
472 num_root = 0;
473 cset = config_sets;
474 while(cset != NULL ) {
475 next_cset = cset->next;
476 if (rf_have_enough_components(cset) &&
477 cset->ac->clabel->autoconfigure==1) {
478 retcode = rf_auto_config_set(cset,&raidID);
479 if (!retcode) {
480 if (cset->rootable) {
481 rootID = raidID;
482 num_root++;
483 }
484 } else {
485 /* The autoconfig didn't work :( */
486 #if DEBUG
487 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
488 #endif
489 rf_release_all_vps(cset);
490 }
491 } else {
492 /* we're not autoconfiguring this set...
493 release the associated resources */
494 rf_release_all_vps(cset);
495 }
496 /* cleanup */
497 rf_cleanup_config_set(cset);
498 cset = next_cset;
499 }
500
501 /* we found something bootable... */
502
503 if (num_root == 1) {
504 booted_device = &raidrootdev[rootID];
505 } else if (num_root > 1) {
506 /* we can't guess.. require the user to answer... */
507 boothowto |= RB_ASKNAME;
508 }
509 }
510
511
512 int
513 raidsize(dev_t dev)
514 {
515 struct raid_softc *rs;
516 struct disklabel *lp;
517 int part, unit, omask, size;
518
519 unit = raidunit(dev);
520 if (unit >= numraid)
521 return (-1);
522 rs = &raid_softc[unit];
523
524 if ((rs->sc_flags & RAIDF_INITED) == 0)
525 return (-1);
526
527 part = DISKPART(dev);
528 omask = rs->sc_dkdev.dk_openmask & (1 << part);
529 lp = rs->sc_dkdev.dk_label;
530
531 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
532 return (-1);
533
534 if (lp->d_partitions[part].p_fstype != FS_SWAP)
535 size = -1;
536 else
537 size = lp->d_partitions[part].p_size *
538 (lp->d_secsize / DEV_BSIZE);
539
540 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
541 return (-1);
542
543 return (size);
544
545 }
546
547 int
548 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
549 {
550 /* Not implemented. */
551 return ENXIO;
552 }
553 /* ARGSUSED */
554 int
555 raidopen(dev_t dev, int flags, int fmt, struct proc *p)
556 {
557 int unit = raidunit(dev);
558 struct raid_softc *rs;
559 struct disklabel *lp;
560 int part, pmask;
561 int error = 0;
562
563 if (unit >= numraid)
564 return (ENXIO);
565 rs = &raid_softc[unit];
566
567 if ((error = raidlock(rs)) != 0)
568 return (error);
569 lp = rs->sc_dkdev.dk_label;
570
571 part = DISKPART(dev);
572 pmask = (1 << part);
573
574 if ((rs->sc_flags & RAIDF_INITED) &&
575 (rs->sc_dkdev.dk_openmask == 0))
576 raidgetdisklabel(dev);
577
578 /* make sure that this partition exists */
579
580 if (part != RAW_PART) {
581 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
582 ((part >= lp->d_npartitions) ||
583 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
584 error = ENXIO;
585 raidunlock(rs);
586 return (error);
587 }
588 }
589 /* Prevent this unit from being unconfigured while open. */
590 switch (fmt) {
591 case S_IFCHR:
592 rs->sc_dkdev.dk_copenmask |= pmask;
593 break;
594
595 case S_IFBLK:
596 rs->sc_dkdev.dk_bopenmask |= pmask;
597 break;
598 }
599
600 if ((rs->sc_dkdev.dk_openmask == 0) &&
601 ((rs->sc_flags & RAIDF_INITED) != 0)) {
602 /* First one... mark things as dirty... Note that we *MUST*
603 have done a configure before this. I DO NOT WANT TO BE
604 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
605 THAT THEY BELONG TOGETHER!!!!! */
606 /* XXX should check to see if we're only open for reading
607 here... If so, we needn't do this, but then need some
608 other way of keeping track of what's happened.. */
609
610 rf_markalldirty( raidPtrs[unit] );
611 }
612
613
614 rs->sc_dkdev.dk_openmask =
615 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
616
617 raidunlock(rs);
618
619 return (error);
620
621
622 }
623 /* ARGSUSED */
624 int
625 raidclose(dev_t dev, int flags, int fmt, struct proc *p)
626 {
627 int unit = raidunit(dev);
628 struct raid_softc *rs;
629 int error = 0;
630 int part;
631
632 if (unit >= numraid)
633 return (ENXIO);
634 rs = &raid_softc[unit];
635
636 if ((error = raidlock(rs)) != 0)
637 return (error);
638
639 part = DISKPART(dev);
640
641 /* ...that much closer to allowing unconfiguration... */
642 switch (fmt) {
643 case S_IFCHR:
644 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
645 break;
646
647 case S_IFBLK:
648 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
649 break;
650 }
651 rs->sc_dkdev.dk_openmask =
652 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
653
654 if ((rs->sc_dkdev.dk_openmask == 0) &&
655 ((rs->sc_flags & RAIDF_INITED) != 0)) {
656 /* Last one... device is not unconfigured yet.
657 Device shutdown has taken care of setting the
658 clean bits if RAIDF_INITED is not set
659 mark things as clean... */
660
661 rf_update_component_labels(raidPtrs[unit],
662 RF_FINAL_COMPONENT_UPDATE);
663 if (doing_shutdown) {
664 /* last one, and we're going down, so
665 lights out for this RAID set too. */
666 error = rf_Shutdown(raidPtrs[unit]);
667
668 /* It's no longer initialized... */
669 rs->sc_flags &= ~RAIDF_INITED;
670
671 /* Detach the disk. */
672 disk_detach(&rs->sc_dkdev);
673 }
674 }
675
676 raidunlock(rs);
677 return (0);
678
679 }
680
681 void
682 raidstrategy(struct buf *bp)
683 {
684 int s;
685
686 unsigned int raidID = raidunit(bp->b_dev);
687 RF_Raid_t *raidPtr;
688 struct raid_softc *rs = &raid_softc[raidID];
689 int wlabel;
690
691 if ((rs->sc_flags & RAIDF_INITED) ==0) {
692 bp->b_error = ENXIO;
693 bp->b_flags |= B_ERROR;
694 bp->b_resid = bp->b_bcount;
695 biodone(bp);
696 return;
697 }
698 if (raidID >= numraid || !raidPtrs[raidID]) {
699 bp->b_error = ENODEV;
700 bp->b_flags |= B_ERROR;
701 bp->b_resid = bp->b_bcount;
702 biodone(bp);
703 return;
704 }
705 raidPtr = raidPtrs[raidID];
706 if (!raidPtr->valid) {
707 bp->b_error = ENODEV;
708 bp->b_flags |= B_ERROR;
709 bp->b_resid = bp->b_bcount;
710 biodone(bp);
711 return;
712 }
713 if (bp->b_bcount == 0) {
714 db1_printf(("b_bcount is zero..\n"));
715 biodone(bp);
716 return;
717 }
718
719 /*
720 * Do bounds checking and adjust transfer. If there's an
721 * error, the bounds check will flag that for us.
722 */
723
724 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
725 if (DISKPART(bp->b_dev) != RAW_PART)
726 if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
727 db1_printf(("Bounds check failed!!:%d %d\n",
728 (int) bp->b_blkno, (int) wlabel));
729 biodone(bp);
730 return;
731 }
732 s = splbio();
733
734 bp->b_resid = 0;
735
736 /* stuff it onto our queue */
737 BUFQ_PUT(&rs->buf_queue, bp);
738
739 raidstart(raidPtrs[raidID]);
740
741 splx(s);
742 }
743 /* ARGSUSED */
744 int
745 raidread(dev_t dev, struct uio *uio, int flags)
746 {
747 int unit = raidunit(dev);
748 struct raid_softc *rs;
749
750 if (unit >= numraid)
751 return (ENXIO);
752 rs = &raid_softc[unit];
753
754 if ((rs->sc_flags & RAIDF_INITED) == 0)
755 return (ENXIO);
756
757 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
758
759 }
760 /* ARGSUSED */
761 int
762 raidwrite(dev_t dev, struct uio *uio, int flags)
763 {
764 int unit = raidunit(dev);
765 struct raid_softc *rs;
766
767 if (unit >= numraid)
768 return (ENXIO);
769 rs = &raid_softc[unit];
770
771 if ((rs->sc_flags & RAIDF_INITED) == 0)
772 return (ENXIO);
773
774 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
775
776 }
777
778 int
779 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
780 {
781 int unit = raidunit(dev);
782 int error = 0;
783 int part, pmask;
784 struct raid_softc *rs;
785 RF_Config_t *k_cfg, *u_cfg;
786 RF_Raid_t *raidPtr;
787 RF_RaidDisk_t *diskPtr;
788 RF_AccTotals_t *totals;
789 RF_DeviceConfig_t *d_cfg, **ucfgp;
790 u_char *specific_buf;
791 int retcode = 0;
792 int column;
793 int raidid;
794 struct rf_recon_req *rrcopy, *rr;
795 RF_ComponentLabel_t *clabel;
796 RF_ComponentLabel_t ci_label;
797 RF_ComponentLabel_t **clabel_ptr;
798 RF_SingleComponent_t *sparePtr,*componentPtr;
799 RF_SingleComponent_t hot_spare;
800 RF_SingleComponent_t component;
801 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
802 int i, j, d;
803 #ifdef __HAVE_OLD_DISKLABEL
804 struct disklabel newlabel;
805 #endif
806
807 if (unit >= numraid)
808 return (ENXIO);
809 rs = &raid_softc[unit];
810 raidPtr = raidPtrs[unit];
811
812 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
813 (int) DISKPART(dev), (int) unit, (int) cmd));
814
815 /* Must be open for writes for these commands... */
816 switch (cmd) {
817 case DIOCSDINFO:
818 case DIOCWDINFO:
819 #ifdef __HAVE_OLD_DISKLABEL
820 case ODIOCWDINFO:
821 case ODIOCSDINFO:
822 #endif
823 case DIOCWLABEL:
824 if ((flag & FWRITE) == 0)
825 return (EBADF);
826 }
827
828 /* Must be initialized for these... */
829 switch (cmd) {
830 case DIOCGDINFO:
831 case DIOCSDINFO:
832 case DIOCWDINFO:
833 #ifdef __HAVE_OLD_DISKLABEL
834 case ODIOCGDINFO:
835 case ODIOCWDINFO:
836 case ODIOCSDINFO:
837 case ODIOCGDEFLABEL:
838 #endif
839 case DIOCGPART:
840 case DIOCWLABEL:
841 case DIOCGDEFLABEL:
842 case RAIDFRAME_SHUTDOWN:
843 case RAIDFRAME_REWRITEPARITY:
844 case RAIDFRAME_GET_INFO:
845 case RAIDFRAME_RESET_ACCTOTALS:
846 case RAIDFRAME_GET_ACCTOTALS:
847 case RAIDFRAME_KEEP_ACCTOTALS:
848 case RAIDFRAME_GET_SIZE:
849 case RAIDFRAME_FAIL_DISK:
850 case RAIDFRAME_COPYBACK:
851 case RAIDFRAME_CHECK_RECON_STATUS:
852 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
853 case RAIDFRAME_GET_COMPONENT_LABEL:
854 case RAIDFRAME_SET_COMPONENT_LABEL:
855 case RAIDFRAME_ADD_HOT_SPARE:
856 case RAIDFRAME_REMOVE_HOT_SPARE:
857 case RAIDFRAME_INIT_LABELS:
858 case RAIDFRAME_REBUILD_IN_PLACE:
859 case RAIDFRAME_CHECK_PARITY:
860 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
861 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
862 case RAIDFRAME_CHECK_COPYBACK_STATUS:
863 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
864 case RAIDFRAME_SET_AUTOCONFIG:
865 case RAIDFRAME_SET_ROOT:
866 case RAIDFRAME_DELETE_COMPONENT:
867 case RAIDFRAME_INCORPORATE_HOT_SPARE:
868 if ((rs->sc_flags & RAIDF_INITED) == 0)
869 return (ENXIO);
870 }
871
872 switch (cmd) {
873
874 /* configure the system */
875 case RAIDFRAME_CONFIGURE:
876
877 if (raidPtr->valid) {
878 /* There is a valid RAID set running on this unit! */
879 printf("raid%d: Device already configured!\n",unit);
880 return(EINVAL);
881 }
882
883 /* copy-in the configuration information */
884 /* data points to a pointer to the configuration structure */
885
886 u_cfg = *((RF_Config_t **) data);
887 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
888 if (k_cfg == NULL) {
889 return (ENOMEM);
890 }
891 retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
892 if (retcode) {
893 RF_Free(k_cfg, sizeof(RF_Config_t));
894 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
895 retcode));
896 return (retcode);
897 }
898 /* allocate a buffer for the layout-specific data, and copy it
899 * in */
900 if (k_cfg->layoutSpecificSize) {
901 if (k_cfg->layoutSpecificSize > 10000) {
902 /* sanity check */
903 RF_Free(k_cfg, sizeof(RF_Config_t));
904 return (EINVAL);
905 }
906 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
907 (u_char *));
908 if (specific_buf == NULL) {
909 RF_Free(k_cfg, sizeof(RF_Config_t));
910 return (ENOMEM);
911 }
912 retcode = copyin(k_cfg->layoutSpecific, specific_buf,
913 k_cfg->layoutSpecificSize);
914 if (retcode) {
915 RF_Free(k_cfg, sizeof(RF_Config_t));
916 RF_Free(specific_buf,
917 k_cfg->layoutSpecificSize);
918 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
919 retcode));
920 return (retcode);
921 }
922 } else
923 specific_buf = NULL;
924 k_cfg->layoutSpecific = specific_buf;
925
926 /* should do some kind of sanity check on the configuration.
927 * Store the sum of all the bytes in the last byte? */
928
929 /* configure the system */
930
931 /*
932 * Clear the entire RAID descriptor, just to make sure
933 * there is no stale data left in the case of a
934 * reconfiguration
935 */
936 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
937 raidPtr->raidid = unit;
938
939 retcode = rf_Configure(raidPtr, k_cfg, NULL);
940
941 if (retcode == 0) {
942
943 /* allow this many simultaneous IO's to
944 this RAID device */
945 raidPtr->openings = RAIDOUTSTANDING;
946
947 raidinit(raidPtr);
948 rf_markalldirty(raidPtr);
949 }
950 /* free the buffers. No return code here. */
951 if (k_cfg->layoutSpecificSize) {
952 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
953 }
954 RF_Free(k_cfg, sizeof(RF_Config_t));
955
956 return (retcode);
957
958 /* shutdown the system */
959 case RAIDFRAME_SHUTDOWN:
960
961 if ((error = raidlock(rs)) != 0)
962 return (error);
963
964 /*
965 * If somebody has a partition mounted, we shouldn't
966 * shutdown.
967 */
968
969 part = DISKPART(dev);
970 pmask = (1 << part);
971 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
972 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
973 (rs->sc_dkdev.dk_copenmask & pmask))) {
974 raidunlock(rs);
975 return (EBUSY);
976 }
977
978 retcode = rf_Shutdown(raidPtr);
979
980 /* It's no longer initialized... */
981 rs->sc_flags &= ~RAIDF_INITED;
982
983 /* Detach the disk. */
984 disk_detach(&rs->sc_dkdev);
985
986 raidunlock(rs);
987
988 return (retcode);
989 case RAIDFRAME_GET_COMPONENT_LABEL:
990 clabel_ptr = (RF_ComponentLabel_t **) data;
991 /* need to read the component label for the disk indicated
992 by row,column in clabel */
993
994 /* For practice, let's get it directly fromdisk, rather
995 than from the in-core copy */
996 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
997 (RF_ComponentLabel_t *));
998 if (clabel == NULL)
999 return (ENOMEM);
1000
1001 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1002
1003 retcode = copyin( *clabel_ptr, clabel,
1004 sizeof(RF_ComponentLabel_t));
1005
1006 if (retcode) {
1007 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1008 return(retcode);
1009 }
1010
1011 clabel->row = 0; /* Don't allow looking at anything else.*/
1012
1013 column = clabel->column;
1014
1015 if ((column < 0) || (column >= raidPtr->numCol +
1016 raidPtr->numSpare)) {
1017 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1018 return(EINVAL);
1019 }
1020
1021 raidread_component_label(raidPtr->Disks[column].dev,
1022 raidPtr->raid_cinfo[column].ci_vp,
1023 clabel );
1024
1025 retcode = copyout(clabel, *clabel_ptr,
1026 sizeof(RF_ComponentLabel_t));
1027 RF_Free(clabel, sizeof(RF_ComponentLabel_t));
1028 return (retcode);
1029
1030 case RAIDFRAME_SET_COMPONENT_LABEL:
1031 clabel = (RF_ComponentLabel_t *) data;
1032
1033 /* XXX check the label for valid stuff... */
1034 /* Note that some things *should not* get modified --
1035 the user should be re-initing the labels instead of
1036 trying to patch things.
1037 */
1038
1039 raidid = raidPtr->raidid;
1040 #if DEBUG
1041 printf("raid%d: Got component label:\n", raidid);
1042 printf("raid%d: Version: %d\n", raidid, clabel->version);
1043 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1044 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1045 printf("raid%d: Column: %d\n", raidid, clabel->column);
1046 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1047 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1048 printf("raid%d: Status: %d\n", raidid, clabel->status);
1049 #endif
1050 clabel->row = 0;
1051 column = clabel->column;
1052
1053 if ((column < 0) || (column >= raidPtr->numCol)) {
1054 return(EINVAL);
1055 }
1056
1057 /* XXX this isn't allowed to do anything for now :-) */
1058
1059 /* XXX and before it is, we need to fill in the rest
1060 of the fields!?!?!?! */
1061 #if 0
1062 raidwrite_component_label(
1063 raidPtr->Disks[column].dev,
1064 raidPtr->raid_cinfo[column].ci_vp,
1065 clabel );
1066 #endif
1067 return (0);
1068
1069 case RAIDFRAME_INIT_LABELS:
1070 clabel = (RF_ComponentLabel_t *) data;
1071 /*
1072 we only want the serial number from
1073 the above. We get all the rest of the information
1074 from the config that was used to create this RAID
1075 set.
1076 */
1077
1078 raidPtr->serial_number = clabel->serial_number;
1079
1080 raid_init_component_label(raidPtr, &ci_label);
1081 ci_label.serial_number = clabel->serial_number;
1082 ci_label.row = 0; /* we dont' pretend to support more */
1083
1084 for(column=0;column<raidPtr->numCol;column++) {
1085 diskPtr = &raidPtr->Disks[column];
1086 if (!RF_DEAD_DISK(diskPtr->status)) {
1087 ci_label.partitionSize = diskPtr->partitionSize;
1088 ci_label.column = column;
1089 raidwrite_component_label(
1090 raidPtr->Disks[column].dev,
1091 raidPtr->raid_cinfo[column].ci_vp,
1092 &ci_label );
1093 }
1094 }
1095
1096 return (retcode);
1097 case RAIDFRAME_SET_AUTOCONFIG:
1098 d = rf_set_autoconfig(raidPtr, *(int *) data);
1099 printf("raid%d: New autoconfig value is: %d\n",
1100 raidPtr->raidid, d);
1101 *(int *) data = d;
1102 return (retcode);
1103
1104 case RAIDFRAME_SET_ROOT:
1105 d = rf_set_rootpartition(raidPtr, *(int *) data);
1106 printf("raid%d: New rootpartition value is: %d\n",
1107 raidPtr->raidid, d);
1108 *(int *) data = d;
1109 return (retcode);
1110
1111 /* initialize all parity */
1112 case RAIDFRAME_REWRITEPARITY:
1113
1114 if (raidPtr->Layout.map->faultsTolerated == 0) {
1115 /* Parity for RAID 0 is trivially correct */
1116 raidPtr->parity_good = RF_RAID_CLEAN;
1117 return(0);
1118 }
1119
1120 if (raidPtr->parity_rewrite_in_progress == 1) {
1121 /* Re-write is already in progress! */
1122 return(EINVAL);
1123 }
1124
1125 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1126 rf_RewriteParityThread,
1127 raidPtr,"raid_parity");
1128 return (retcode);
1129
1130
1131 case RAIDFRAME_ADD_HOT_SPARE:
1132 sparePtr = (RF_SingleComponent_t *) data;
1133 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1134 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1135 return(retcode);
1136
1137 case RAIDFRAME_REMOVE_HOT_SPARE:
1138 return(retcode);
1139
1140 case RAIDFRAME_DELETE_COMPONENT:
1141 componentPtr = (RF_SingleComponent_t *)data;
1142 memcpy( &component, componentPtr,
1143 sizeof(RF_SingleComponent_t));
1144 retcode = rf_delete_component(raidPtr, &component);
1145 return(retcode);
1146
1147 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1148 componentPtr = (RF_SingleComponent_t *)data;
1149 memcpy( &component, componentPtr,
1150 sizeof(RF_SingleComponent_t));
1151 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1152 return(retcode);
1153
1154 case RAIDFRAME_REBUILD_IN_PLACE:
1155
1156 if (raidPtr->Layout.map->faultsTolerated == 0) {
1157 /* Can't do this on a RAID 0!! */
1158 return(EINVAL);
1159 }
1160
1161 if (raidPtr->recon_in_progress == 1) {
1162 /* a reconstruct is already in progress! */
1163 return(EINVAL);
1164 }
1165
1166 componentPtr = (RF_SingleComponent_t *) data;
1167 memcpy( &component, componentPtr,
1168 sizeof(RF_SingleComponent_t));
1169 component.row = 0; /* we don't support any more */
1170 column = component.column;
1171
1172 if ((column < 0) || (column >= raidPtr->numCol)) {
1173 return(EINVAL);
1174 }
1175
1176 RF_LOCK_MUTEX(raidPtr->mutex);
1177 if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1178 (raidPtr->numFailures > 0)) {
1179 /* XXX 0 above shouldn't be constant!!! */
1180 /* some component other than this has failed.
1181 Let's not make things worse than they already
1182 are... */
1183 printf("raid%d: Unable to reconstruct to disk at:\n",
1184 raidPtr->raidid);
1185 printf("raid%d: Col: %d Too many failures.\n",
1186 raidPtr->raidid, column);
1187 RF_UNLOCK_MUTEX(raidPtr->mutex);
1188 return (EINVAL);
1189 }
1190 if (raidPtr->Disks[column].status ==
1191 rf_ds_reconstructing) {
1192 printf("raid%d: Unable to reconstruct to disk at:\n",
1193 raidPtr->raidid);
1194 printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr->raidid, column);
1195
1196 RF_UNLOCK_MUTEX(raidPtr->mutex);
1197 return (EINVAL);
1198 }
1199 if (raidPtr->Disks[column].status == rf_ds_spared) {
1200 RF_UNLOCK_MUTEX(raidPtr->mutex);
1201 return (EINVAL);
1202 }
1203 RF_UNLOCK_MUTEX(raidPtr->mutex);
1204
1205 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1206 if (rrcopy == NULL)
1207 return(ENOMEM);
1208
1209 rrcopy->raidPtr = (void *) raidPtr;
1210 rrcopy->col = column;
1211
1212 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1213 rf_ReconstructInPlaceThread,
1214 rrcopy,"raid_reconip");
1215 return(retcode);
1216
1217 case RAIDFRAME_GET_INFO:
1218 if (!raidPtr->valid)
1219 return (ENODEV);
1220 ucfgp = (RF_DeviceConfig_t **) data;
1221 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1222 (RF_DeviceConfig_t *));
1223 if (d_cfg == NULL)
1224 return (ENOMEM);
1225 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1226 d_cfg->rows = 1; /* there is only 1 row now */
1227 d_cfg->cols = raidPtr->numCol;
1228 d_cfg->ndevs = raidPtr->numCol;
1229 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1230 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1231 return (ENOMEM);
1232 }
1233 d_cfg->nspares = raidPtr->numSpare;
1234 if (d_cfg->nspares >= RF_MAX_DISKS) {
1235 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1236 return (ENOMEM);
1237 }
1238 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1239 d = 0;
1240 for (j = 0; j < d_cfg->cols; j++) {
1241 d_cfg->devs[d] = raidPtr->Disks[j];
1242 d++;
1243 }
1244 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1245 d_cfg->spares[i] = raidPtr->Disks[j];
1246 }
1247 retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1248 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1249
1250 return (retcode);
1251
1252 case RAIDFRAME_CHECK_PARITY:
1253 *(int *) data = raidPtr->parity_good;
1254 return (0);
1255
1256 case RAIDFRAME_RESET_ACCTOTALS:
1257 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1258 return (0);
1259
1260 case RAIDFRAME_GET_ACCTOTALS:
1261 totals = (RF_AccTotals_t *) data;
1262 *totals = raidPtr->acc_totals;
1263 return (0);
1264
1265 case RAIDFRAME_KEEP_ACCTOTALS:
1266 raidPtr->keep_acc_totals = *(int *)data;
1267 return (0);
1268
1269 case RAIDFRAME_GET_SIZE:
1270 *(int *) data = raidPtr->totalSectors;
1271 return (0);
1272
1273 /* fail a disk & optionally start reconstruction */
1274 case RAIDFRAME_FAIL_DISK:
1275
1276 if (raidPtr->Layout.map->faultsTolerated == 0) {
1277 /* Can't do this on a RAID 0!! */
1278 return(EINVAL);
1279 }
1280
1281 rr = (struct rf_recon_req *) data;
1282 rr->row = 0;
1283 if (rr->col < 0 || rr->col >= raidPtr->numCol)
1284 return (EINVAL);
1285
1286
1287 RF_LOCK_MUTEX(raidPtr->mutex);
1288 if ((raidPtr->Disks[rr->col].status ==
1289 rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1290 /* some other component has failed. Let's not make
1291 things worse. XXX wrong for RAID6 */
1292 RF_UNLOCK_MUTEX(raidPtr->mutex);
1293 return (EINVAL);
1294 }
1295 if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1296 /* Can't fail a spared disk! */
1297 RF_UNLOCK_MUTEX(raidPtr->mutex);
1298 return (EINVAL);
1299 }
1300 RF_UNLOCK_MUTEX(raidPtr->mutex);
1301
1302 /* make a copy of the recon request so that we don't rely on
1303 * the user's buffer */
1304 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1305 if (rrcopy == NULL)
1306 return(ENOMEM);
1307 memcpy(rrcopy, rr, sizeof(*rr));
1308 rrcopy->raidPtr = (void *) raidPtr;
1309
1310 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1311 rf_ReconThread,
1312 rrcopy,"raid_recon");
1313 return (0);
1314
1315 /* invoke a copyback operation after recon on whatever disk
1316 * needs it, if any */
1317 case RAIDFRAME_COPYBACK:
1318
1319 if (raidPtr->Layout.map->faultsTolerated == 0) {
1320 /* This makes no sense on a RAID 0!! */
1321 return(EINVAL);
1322 }
1323
1324 if (raidPtr->copyback_in_progress == 1) {
1325 /* Copyback is already in progress! */
1326 return(EINVAL);
1327 }
1328
1329 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1330 rf_CopybackThread,
1331 raidPtr,"raid_copyback");
1332 return (retcode);
1333
1334 /* return the percentage completion of reconstruction */
1335 case RAIDFRAME_CHECK_RECON_STATUS:
1336 if (raidPtr->Layout.map->faultsTolerated == 0) {
1337 /* This makes no sense on a RAID 0, so tell the
1338 user it's done. */
1339 *(int *) data = 100;
1340 return(0);
1341 }
1342 if (raidPtr->status != rf_rs_reconstructing)
1343 *(int *) data = 100;
1344 else {
1345 if (raidPtr->reconControl->numRUsTotal > 0) {
1346 *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1347 } else {
1348 *(int *) data = 0;
1349 }
1350 }
1351 return (0);
1352 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1353 progressInfoPtr = (RF_ProgressInfo_t **) data;
1354 if (raidPtr->status != rf_rs_reconstructing) {
1355 progressInfo.remaining = 0;
1356 progressInfo.completed = 100;
1357 progressInfo.total = 100;
1358 } else {
1359 progressInfo.total =
1360 raidPtr->reconControl->numRUsTotal;
1361 progressInfo.completed =
1362 raidPtr->reconControl->numRUsComplete;
1363 progressInfo.remaining = progressInfo.total -
1364 progressInfo.completed;
1365 }
1366 retcode = copyout(&progressInfo, *progressInfoPtr,
1367 sizeof(RF_ProgressInfo_t));
1368 return (retcode);
1369
1370 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1371 if (raidPtr->Layout.map->faultsTolerated == 0) {
1372 /* This makes no sense on a RAID 0, so tell the
1373 user it's done. */
1374 *(int *) data = 100;
1375 return(0);
1376 }
1377 if (raidPtr->parity_rewrite_in_progress == 1) {
1378 *(int *) data = 100 *
1379 raidPtr->parity_rewrite_stripes_done /
1380 raidPtr->Layout.numStripe;
1381 } else {
1382 *(int *) data = 100;
1383 }
1384 return (0);
1385
1386 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1387 progressInfoPtr = (RF_ProgressInfo_t **) data;
1388 if (raidPtr->parity_rewrite_in_progress == 1) {
1389 progressInfo.total = raidPtr->Layout.numStripe;
1390 progressInfo.completed =
1391 raidPtr->parity_rewrite_stripes_done;
1392 progressInfo.remaining = progressInfo.total -
1393 progressInfo.completed;
1394 } else {
1395 progressInfo.remaining = 0;
1396 progressInfo.completed = 100;
1397 progressInfo.total = 100;
1398 }
1399 retcode = copyout(&progressInfo, *progressInfoPtr,
1400 sizeof(RF_ProgressInfo_t));
1401 return (retcode);
1402
1403 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1404 if (raidPtr->Layout.map->faultsTolerated == 0) {
1405 /* This makes no sense on a RAID 0 */
1406 *(int *) data = 100;
1407 return(0);
1408 }
1409 if (raidPtr->copyback_in_progress == 1) {
1410 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1411 raidPtr->Layout.numStripe;
1412 } else {
1413 *(int *) data = 100;
1414 }
1415 return (0);
1416
1417 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1418 progressInfoPtr = (RF_ProgressInfo_t **) data;
1419 if (raidPtr->copyback_in_progress == 1) {
1420 progressInfo.total = raidPtr->Layout.numStripe;
1421 progressInfo.completed =
1422 raidPtr->copyback_stripes_done;
1423 progressInfo.remaining = progressInfo.total -
1424 progressInfo.completed;
1425 } else {
1426 progressInfo.remaining = 0;
1427 progressInfo.completed = 100;
1428 progressInfo.total = 100;
1429 }
1430 retcode = copyout(&progressInfo, *progressInfoPtr,
1431 sizeof(RF_ProgressInfo_t));
1432 return (retcode);
1433
1434 /* the sparetable daemon calls this to wait for the kernel to
1435 * need a spare table. this ioctl does not return until a
1436 * spare table is needed. XXX -- calling mpsleep here in the
1437 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1438 * -- I should either compute the spare table in the kernel,
1439 * or have a different -- XXX XXX -- interface (a different
1440 * character device) for delivering the table -- XXX */
1441 #if 0
1442 case RAIDFRAME_SPARET_WAIT:
1443 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1444 while (!rf_sparet_wait_queue)
1445 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1446 waitreq = rf_sparet_wait_queue;
1447 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1448 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1449
1450 /* structure assignment */
1451 *((RF_SparetWait_t *) data) = *waitreq;
1452
1453 RF_Free(waitreq, sizeof(*waitreq));
1454 return (0);
1455
1456 /* wakes up a process waiting on SPARET_WAIT and puts an error
1457 * code in it that will cause the dameon to exit */
1458 case RAIDFRAME_ABORT_SPARET_WAIT:
1459 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1460 waitreq->fcol = -1;
1461 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1462 waitreq->next = rf_sparet_wait_queue;
1463 rf_sparet_wait_queue = waitreq;
1464 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1465 wakeup(&rf_sparet_wait_queue);
1466 return (0);
1467
1468 /* used by the spare table daemon to deliver a spare table
1469 * into the kernel */
1470 case RAIDFRAME_SEND_SPARET:
1471
1472 /* install the spare table */
1473 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1474
1475 /* respond to the requestor. the return status of the spare
1476 * table installation is passed in the "fcol" field */
1477 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1478 waitreq->fcol = retcode;
1479 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1480 waitreq->next = rf_sparet_resp_queue;
1481 rf_sparet_resp_queue = waitreq;
1482 wakeup(&rf_sparet_resp_queue);
1483 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1484
1485 return (retcode);
1486 #endif
1487
1488 default:
1489 break; /* fall through to the os-specific code below */
1490
1491 }
1492
1493 if (!raidPtr->valid)
1494 return (EINVAL);
1495
1496 /*
1497 * Add support for "regular" device ioctls here.
1498 */
1499
1500 switch (cmd) {
1501 case DIOCGDINFO:
1502 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1503 break;
1504 #ifdef __HAVE_OLD_DISKLABEL
1505 case ODIOCGDINFO:
1506 newlabel = *(rs->sc_dkdev.dk_label);
1507 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1508 return ENOTTY;
1509 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1510 break;
1511 #endif
1512
1513 case DIOCGPART:
1514 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1515 ((struct partinfo *) data)->part =
1516 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1517 break;
1518
1519 case DIOCWDINFO:
1520 case DIOCSDINFO:
1521 #ifdef __HAVE_OLD_DISKLABEL
1522 case ODIOCWDINFO:
1523 case ODIOCSDINFO:
1524 #endif
1525 {
1526 struct disklabel *lp;
1527 #ifdef __HAVE_OLD_DISKLABEL
1528 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1529 memset(&newlabel, 0, sizeof newlabel);
1530 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1531 lp = &newlabel;
1532 } else
1533 #endif
1534 lp = (struct disklabel *)data;
1535
1536 if ((error = raidlock(rs)) != 0)
1537 return (error);
1538
1539 rs->sc_flags |= RAIDF_LABELLING;
1540
1541 error = setdisklabel(rs->sc_dkdev.dk_label,
1542 lp, 0, rs->sc_dkdev.dk_cpulabel);
1543 if (error == 0) {
1544 if (cmd == DIOCWDINFO
1545 #ifdef __HAVE_OLD_DISKLABEL
1546 || cmd == ODIOCWDINFO
1547 #endif
1548 )
1549 error = writedisklabel(RAIDLABELDEV(dev),
1550 raidstrategy, rs->sc_dkdev.dk_label,
1551 rs->sc_dkdev.dk_cpulabel);
1552 }
1553 rs->sc_flags &= ~RAIDF_LABELLING;
1554
1555 raidunlock(rs);
1556
1557 if (error)
1558 return (error);
1559 break;
1560 }
1561
1562 case DIOCWLABEL:
1563 if (*(int *) data != 0)
1564 rs->sc_flags |= RAIDF_WLABEL;
1565 else
1566 rs->sc_flags &= ~RAIDF_WLABEL;
1567 break;
1568
1569 case DIOCGDEFLABEL:
1570 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1571 break;
1572
1573 #ifdef __HAVE_OLD_DISKLABEL
1574 case ODIOCGDEFLABEL:
1575 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1576 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1577 return ENOTTY;
1578 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1579 break;
1580 #endif
1581
1582 default:
1583 retcode = ENOTTY;
1584 }
1585 return (retcode);
1586
1587 }
1588
1589
1590 /* raidinit -- complete the rest of the initialization for the
1591 RAIDframe device. */
1592
1593
1594 static void
1595 raidinit(RF_Raid_t *raidPtr)
1596 {
1597 struct raid_softc *rs;
1598 int unit;
1599
1600 unit = raidPtr->raidid;
1601
1602 rs = &raid_softc[unit];
1603
1604 /* XXX should check return code first... */
1605 rs->sc_flags |= RAIDF_INITED;
1606
1607 /* XXX doesn't check bounds. */
1608 snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1609
1610 rs->sc_dkdev.dk_name = rs->sc_xname;
1611
1612 /* disk_attach actually creates space for the CPU disklabel, among
1613 * other things, so it's critical to call this *BEFORE* we try putzing
1614 * with disklabels. */
1615
1616 disk_attach(&rs->sc_dkdev);
1617
1618 /* XXX There may be a weird interaction here between this, and
1619 * protectedSectors, as used in RAIDframe. */
1620
1621 rs->sc_size = raidPtr->totalSectors;
1622 }
1623 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1624 /* wake up the daemon & tell it to get us a spare table
1625 * XXX
1626 * the entries in the queues should be tagged with the raidPtr
1627 * so that in the extremely rare case that two recons happen at once,
1628 * we know for which device were requesting a spare table
1629 * XXX
1630 *
1631 * XXX This code is not currently used. GO
1632 */
1633 int
1634 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1635 {
1636 int retcode;
1637
1638 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1639 req->next = rf_sparet_wait_queue;
1640 rf_sparet_wait_queue = req;
1641 wakeup(&rf_sparet_wait_queue);
1642
1643 /* mpsleep unlocks the mutex */
1644 while (!rf_sparet_resp_queue) {
1645 tsleep(&rf_sparet_resp_queue, PRIBIO,
1646 "raidframe getsparetable", 0);
1647 }
1648 req = rf_sparet_resp_queue;
1649 rf_sparet_resp_queue = req->next;
1650 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1651
1652 retcode = req->fcol;
1653 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1654 * alloc'd */
1655 return (retcode);
1656 }
1657 #endif
1658
1659 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1660 * bp & passes it down.
1661 * any calls originating in the kernel must use non-blocking I/O
1662 * do some extra sanity checking to return "appropriate" error values for
1663 * certain conditions (to make some standard utilities work)
1664 *
1665 * Formerly known as: rf_DoAccessKernel
1666 */
1667 void
1668 raidstart(RF_Raid_t *raidPtr)
1669 {
1670 RF_SectorCount_t num_blocks, pb, sum;
1671 RF_RaidAddr_t raid_addr;
1672 struct partition *pp;
1673 daddr_t blocknum;
1674 int unit;
1675 struct raid_softc *rs;
1676 int do_async;
1677 struct buf *bp;
1678 int rc;
1679
1680 unit = raidPtr->raidid;
1681 rs = &raid_softc[unit];
1682
1683 /* quick check to see if anything has died recently */
1684 RF_LOCK_MUTEX(raidPtr->mutex);
1685 if (raidPtr->numNewFailures > 0) {
1686 RF_UNLOCK_MUTEX(raidPtr->mutex);
1687 rf_update_component_labels(raidPtr,
1688 RF_NORMAL_COMPONENT_UPDATE);
1689 RF_LOCK_MUTEX(raidPtr->mutex);
1690 raidPtr->numNewFailures--;
1691 }
1692
1693 /* Check to see if we're at the limit... */
1694 while (raidPtr->openings > 0) {
1695 RF_UNLOCK_MUTEX(raidPtr->mutex);
1696
1697 /* get the next item, if any, from the queue */
1698 if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
1699 /* nothing more to do */
1700 return;
1701 }
1702
1703 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1704 * partition.. Need to make it absolute to the underlying
1705 * device.. */
1706
1707 blocknum = bp->b_blkno;
1708 if (DISKPART(bp->b_dev) != RAW_PART) {
1709 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1710 blocknum += pp->p_offset;
1711 }
1712
1713 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1714 (int) blocknum));
1715
1716 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1717 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1718
1719 /* *THIS* is where we adjust what block we're going to...
1720 * but DO NOT TOUCH bp->b_blkno!!! */
1721 raid_addr = blocknum;
1722
1723 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1724 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1725 sum = raid_addr + num_blocks + pb;
1726 if (1 || rf_debugKernelAccess) {
1727 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1728 (int) raid_addr, (int) sum, (int) num_blocks,
1729 (int) pb, (int) bp->b_resid));
1730 }
1731 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1732 || (sum < num_blocks) || (sum < pb)) {
1733 bp->b_error = ENOSPC;
1734 bp->b_flags |= B_ERROR;
1735 bp->b_resid = bp->b_bcount;
1736 biodone(bp);
1737 RF_LOCK_MUTEX(raidPtr->mutex);
1738 continue;
1739 }
1740 /*
1741 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1742 */
1743
1744 if (bp->b_bcount & raidPtr->sectorMask) {
1745 bp->b_error = EINVAL;
1746 bp->b_flags |= B_ERROR;
1747 bp->b_resid = bp->b_bcount;
1748 biodone(bp);
1749 RF_LOCK_MUTEX(raidPtr->mutex);
1750 continue;
1751
1752 }
1753 db1_printf(("Calling DoAccess..\n"));
1754
1755
1756 RF_LOCK_MUTEX(raidPtr->mutex);
1757 raidPtr->openings--;
1758 RF_UNLOCK_MUTEX(raidPtr->mutex);
1759
1760 /*
1761 * Everything is async.
1762 */
1763 do_async = 1;
1764
1765 disk_busy(&rs->sc_dkdev);
1766
1767 /* XXX we're still at splbio() here... do we *really*
1768 need to be? */
1769
1770 /* don't ever condition on bp->b_flags & B_WRITE.
1771 * always condition on B_READ instead */
1772
1773 rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1774 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1775 do_async, raid_addr, num_blocks,
1776 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1777
1778 if (rc) {
1779 bp->b_error = rc;
1780 bp->b_flags |= B_ERROR;
1781 bp->b_resid = bp->b_bcount;
1782 biodone(bp);
1783 /* continue loop */
1784 }
1785
1786 RF_LOCK_MUTEX(raidPtr->mutex);
1787 }
1788 RF_UNLOCK_MUTEX(raidPtr->mutex);
1789 }
1790
1791
1792
1793
1794 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1795
1796 int
1797 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
1798 {
1799 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1800 struct buf *bp;
1801 struct raidbuf *raidbp = NULL;
1802
1803 req->queue = queue;
1804
1805 #if DIAGNOSTIC
1806 if (queue->raidPtr->raidid >= numraid) {
1807 printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
1808 numraid);
1809 panic("Invalid Unit number in rf_DispatchKernelIO");
1810 }
1811 #endif
1812
1813 bp = req->bp;
1814 #if 1
1815 /* XXX when there is a physical disk failure, someone is passing us a
1816 * buffer that contains old stuff!! Attempt to deal with this problem
1817 * without taking a performance hit... (not sure where the real bug
1818 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1819
1820 if (bp->b_flags & B_ERROR) {
1821 bp->b_flags &= ~B_ERROR;
1822 }
1823 if (bp->b_error != 0) {
1824 bp->b_error = 0;
1825 }
1826 #endif
1827 raidbp = pool_get(&rf_pools.cbuf, PR_NOWAIT);
1828 if (raidbp == NULL) {
1829 bp->b_flags |= B_ERROR;
1830 bp->b_error = ENOMEM;
1831 return (ENOMEM);
1832 }
1833 BUF_INIT(&raidbp->rf_buf);
1834
1835 /*
1836 * context for raidiodone
1837 */
1838 raidbp->rf_obp = bp;
1839 raidbp->req = req;
1840
1841 BIO_COPYPRIO(&raidbp->rf_buf, bp);
1842
1843 switch (req->type) {
1844 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1845 /* XXX need to do something extra here.. */
1846 /* I'm leaving this in, as I've never actually seen it used,
1847 * and I'd like folks to report it... GO */
1848 printf(("WAKEUP CALLED\n"));
1849 queue->numOutstanding++;
1850
1851 /* XXX need to glue the original buffer into this?? */
1852
1853 KernelWakeupFunc(&raidbp->rf_buf);
1854 break;
1855
1856 case RF_IO_TYPE_READ:
1857 case RF_IO_TYPE_WRITE:
1858 #if RF_ACC_TRACE > 0
1859 if (req->tracerec) {
1860 RF_ETIMER_START(req->tracerec->timer);
1861 }
1862 #endif
1863 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1864 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1865 req->sectorOffset, req->numSector,
1866 req->buf, KernelWakeupFunc, (void *) req,
1867 queue->raidPtr->logBytesPerSector, req->b_proc);
1868
1869 if (rf_debugKernelAccess) {
1870 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1871 (long) bp->b_blkno));
1872 }
1873 queue->numOutstanding++;
1874 queue->last_deq_sector = req->sectorOffset;
1875 /* acc wouldn't have been let in if there were any pending
1876 * reqs at any other priority */
1877 queue->curPriority = req->priority;
1878
1879 db1_printf(("Going for %c to unit %d col %d\n",
1880 req->type, queue->raidPtr->raidid,
1881 queue->col));
1882 db1_printf(("sector %d count %d (%d bytes) %d\n",
1883 (int) req->sectorOffset, (int) req->numSector,
1884 (int) (req->numSector <<
1885 queue->raidPtr->logBytesPerSector),
1886 (int) queue->raidPtr->logBytesPerSector));
1887 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1888 raidbp->rf_buf.b_vp->v_numoutput++;
1889 }
1890 VOP_STRATEGY(raidbp->rf_buf.b_vp, &raidbp->rf_buf);
1891
1892 break;
1893
1894 default:
1895 panic("bad req->type in rf_DispatchKernelIO");
1896 }
1897 db1_printf(("Exiting from DispatchKernelIO\n"));
1898
1899 return (0);
1900 }
1901 /* this is the callback function associated with a I/O invoked from
1902 kernel code.
1903 */
1904 static void
1905 KernelWakeupFunc(struct buf *vbp)
1906 {
1907 RF_DiskQueueData_t *req = NULL;
1908 RF_DiskQueue_t *queue;
1909 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1910 struct buf *bp;
1911 int s;
1912
1913 s = splbio();
1914 db1_printf(("recovering the request queue:\n"));
1915 req = raidbp->req;
1916
1917 bp = raidbp->rf_obp;
1918
1919 queue = (RF_DiskQueue_t *) req->queue;
1920
1921 if (raidbp->rf_buf.b_flags & B_ERROR) {
1922 bp->b_flags |= B_ERROR;
1923 bp->b_error = raidbp->rf_buf.b_error ?
1924 raidbp->rf_buf.b_error : EIO;
1925 }
1926
1927 /* XXX methinks this could be wrong... */
1928 #if 1
1929 bp->b_resid = raidbp->rf_buf.b_resid;
1930 #endif
1931 #if RF_ACC_TRACE > 0
1932 if (req->tracerec) {
1933 RF_ETIMER_STOP(req->tracerec->timer);
1934 RF_ETIMER_EVAL(req->tracerec->timer);
1935 RF_LOCK_MUTEX(rf_tracing_mutex);
1936 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1937 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1938 req->tracerec->num_phys_ios++;
1939 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1940 }
1941 #endif
1942 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1943
1944 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1945 * ballistic, and mark the component as hosed... */
1946
1947 if (bp->b_flags & B_ERROR) {
1948 /* Mark the disk as dead */
1949 /* but only mark it once... */
1950 /* and only if it wouldn't leave this RAID set
1951 completely broken */
1952 if ((queue->raidPtr->Disks[queue->col].status ==
1953 rf_ds_optimal) && (queue->raidPtr->numFailures <
1954 queue->raidPtr->Layout.map->faultsTolerated)) {
1955 printf("raid%d: IO Error. Marking %s as failed.\n",
1956 queue->raidPtr->raidid,
1957 queue->raidPtr->Disks[queue->col].devname);
1958 queue->raidPtr->Disks[queue->col].status =
1959 rf_ds_failed;
1960 queue->raidPtr->status = rf_rs_degraded;
1961 queue->raidPtr->numFailures++;
1962 queue->raidPtr->numNewFailures++;
1963 } else { /* Disk is already dead... */
1964 /* printf("Disk already marked as dead!\n"); */
1965 }
1966
1967 }
1968
1969 pool_put(&rf_pools.cbuf, raidbp);
1970
1971 /* Fill in the error value */
1972
1973 req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
1974
1975 simple_lock(&queue->raidPtr->iodone_lock);
1976
1977 /* Drop this one on the "finished" queue... */
1978 TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
1979
1980 /* Let the raidio thread know there is work to be done. */
1981 wakeup(&(queue->raidPtr->iodone));
1982
1983 simple_unlock(&queue->raidPtr->iodone_lock);
1984
1985 splx(s);
1986 }
1987
1988
1989
1990 /*
1991 * initialize a buf structure for doing an I/O in the kernel.
1992 */
1993 static void
1994 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
1995 RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
1996 void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
1997 struct proc *b_proc)
1998 {
1999 /* bp->b_flags = B_PHYS | rw_flag; */
2000 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
2001 bp->b_bcount = numSect << logBytesPerSector;
2002 bp->b_bufsize = bp->b_bcount;
2003 bp->b_error = 0;
2004 bp->b_dev = dev;
2005 bp->b_data = buf;
2006 bp->b_blkno = startSect;
2007 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2008 if (bp->b_bcount == 0) {
2009 panic("bp->b_bcount is zero in InitBP!!");
2010 }
2011 bp->b_proc = b_proc;
2012 bp->b_iodone = cbFunc;
2013 bp->b_vp = b_vp;
2014
2015 }
2016
2017 static void
2018 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2019 struct disklabel *lp)
2020 {
2021 memset(lp, 0, sizeof(*lp));
2022
2023 /* fabricate a label... */
2024 lp->d_secperunit = raidPtr->totalSectors;
2025 lp->d_secsize = raidPtr->bytesPerSector;
2026 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2027 lp->d_ntracks = 4 * raidPtr->numCol;
2028 lp->d_ncylinders = raidPtr->totalSectors /
2029 (lp->d_nsectors * lp->d_ntracks);
2030 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2031
2032 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2033 lp->d_type = DTYPE_RAID;
2034 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2035 lp->d_rpm = 3600;
2036 lp->d_interleave = 1;
2037 lp->d_flags = 0;
2038
2039 lp->d_partitions[RAW_PART].p_offset = 0;
2040 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2041 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2042 lp->d_npartitions = RAW_PART + 1;
2043
2044 lp->d_magic = DISKMAGIC;
2045 lp->d_magic2 = DISKMAGIC;
2046 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2047
2048 }
2049 /*
2050 * Read the disklabel from the raid device. If one is not present, fake one
2051 * up.
2052 */
2053 static void
2054 raidgetdisklabel(dev_t dev)
2055 {
2056 int unit = raidunit(dev);
2057 struct raid_softc *rs = &raid_softc[unit];
2058 const char *errstring;
2059 struct disklabel *lp = rs->sc_dkdev.dk_label;
2060 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2061 RF_Raid_t *raidPtr;
2062
2063 db1_printf(("Getting the disklabel...\n"));
2064
2065 memset(clp, 0, sizeof(*clp));
2066
2067 raidPtr = raidPtrs[unit];
2068
2069 raidgetdefaultlabel(raidPtr, rs, lp);
2070
2071 /*
2072 * Call the generic disklabel extraction routine.
2073 */
2074 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2075 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2076 if (errstring)
2077 raidmakedisklabel(rs);
2078 else {
2079 int i;
2080 struct partition *pp;
2081
2082 /*
2083 * Sanity check whether the found disklabel is valid.
2084 *
2085 * This is necessary since total size of the raid device
2086 * may vary when an interleave is changed even though exactly
2087 * same componets are used, and old disklabel may used
2088 * if that is found.
2089 */
2090 if (lp->d_secperunit != rs->sc_size)
2091 printf("raid%d: WARNING: %s: "
2092 "total sector size in disklabel (%d) != "
2093 "the size of raid (%ld)\n", unit, rs->sc_xname,
2094 lp->d_secperunit, (long) rs->sc_size);
2095 for (i = 0; i < lp->d_npartitions; i++) {
2096 pp = &lp->d_partitions[i];
2097 if (pp->p_offset + pp->p_size > rs->sc_size)
2098 printf("raid%d: WARNING: %s: end of partition `%c' "
2099 "exceeds the size of raid (%ld)\n",
2100 unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2101 }
2102 }
2103
2104 }
2105 /*
2106 * Take care of things one might want to take care of in the event
2107 * that a disklabel isn't present.
2108 */
2109 static void
2110 raidmakedisklabel(struct raid_softc *rs)
2111 {
2112 struct disklabel *lp = rs->sc_dkdev.dk_label;
2113 db1_printf(("Making a label..\n"));
2114
2115 /*
2116 * For historical reasons, if there's no disklabel present
2117 * the raw partition must be marked FS_BSDFFS.
2118 */
2119
2120 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2121
2122 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2123
2124 lp->d_checksum = dkcksum(lp);
2125 }
2126 /*
2127 * Lookup the provided name in the filesystem. If the file exists,
2128 * is a valid block device, and isn't being used by anyone else,
2129 * set *vpp to the file's vnode.
2130 * You'll find the original of this in ccd.c
2131 */
2132 int
2133 raidlookup(char *path, struct proc *p, struct vnode **vpp)
2134 {
2135 struct nameidata nd;
2136 struct vnode *vp;
2137 struct vattr va;
2138 int error;
2139
2140 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2141 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2142 return (error);
2143 }
2144 vp = nd.ni_vp;
2145 if (vp->v_usecount > 1) {
2146 VOP_UNLOCK(vp, 0);
2147 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2148 return (EBUSY);
2149 }
2150 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2151 VOP_UNLOCK(vp, 0);
2152 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2153 return (error);
2154 }
2155 /* XXX: eventually we should handle VREG, too. */
2156 if (va.va_type != VBLK) {
2157 VOP_UNLOCK(vp, 0);
2158 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2159 return (ENOTBLK);
2160 }
2161 VOP_UNLOCK(vp, 0);
2162 *vpp = vp;
2163 return (0);
2164 }
2165 /*
2166 * Wait interruptibly for an exclusive lock.
2167 *
2168 * XXX
2169 * Several drivers do this; it should be abstracted and made MP-safe.
2170 * (Hmm... where have we seen this warning before :-> GO )
2171 */
2172 static int
2173 raidlock(struct raid_softc *rs)
2174 {
2175 int error;
2176
2177 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2178 rs->sc_flags |= RAIDF_WANTED;
2179 if ((error =
2180 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2181 return (error);
2182 }
2183 rs->sc_flags |= RAIDF_LOCKED;
2184 return (0);
2185 }
2186 /*
2187 * Unlock and wake up any waiters.
2188 */
2189 static void
2190 raidunlock(struct raid_softc *rs)
2191 {
2192
2193 rs->sc_flags &= ~RAIDF_LOCKED;
2194 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2195 rs->sc_flags &= ~RAIDF_WANTED;
2196 wakeup(rs);
2197 }
2198 }
2199
2200
2201 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2202 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2203
2204 int
2205 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2206 {
2207 RF_ComponentLabel_t clabel;
2208 raidread_component_label(dev, b_vp, &clabel);
2209 clabel.mod_counter = mod_counter;
2210 clabel.clean = RF_RAID_CLEAN;
2211 raidwrite_component_label(dev, b_vp, &clabel);
2212 return(0);
2213 }
2214
2215
2216 int
2217 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2218 {
2219 RF_ComponentLabel_t clabel;
2220 raidread_component_label(dev, b_vp, &clabel);
2221 clabel.mod_counter = mod_counter;
2222 clabel.clean = RF_RAID_DIRTY;
2223 raidwrite_component_label(dev, b_vp, &clabel);
2224 return(0);
2225 }
2226
2227 /* ARGSUSED */
2228 int
2229 raidread_component_label(dev_t dev, struct vnode *b_vp,
2230 RF_ComponentLabel_t *clabel)
2231 {
2232 struct buf *bp;
2233 const struct bdevsw *bdev;
2234 int error;
2235
2236 /* XXX should probably ensure that we don't try to do this if
2237 someone has changed rf_protected_sectors. */
2238
2239 if (b_vp == NULL) {
2240 /* For whatever reason, this component is not valid.
2241 Don't try to read a component label from it. */
2242 return(EINVAL);
2243 }
2244
2245 /* get a block of the appropriate size... */
2246 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2247 bp->b_dev = dev;
2248
2249 /* get our ducks in a row for the read */
2250 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2251 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2252 bp->b_flags |= B_READ;
2253 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2254
2255 bdev = bdevsw_lookup(bp->b_dev);
2256 if (bdev == NULL)
2257 return (ENXIO);
2258 (*bdev->d_strategy)(bp);
2259
2260 error = biowait(bp);
2261
2262 if (!error) {
2263 memcpy(clabel, bp->b_data,
2264 sizeof(RF_ComponentLabel_t));
2265 }
2266
2267 brelse(bp);
2268 return(error);
2269 }
2270 /* ARGSUSED */
2271 int
2272 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2273 RF_ComponentLabel_t *clabel)
2274 {
2275 struct buf *bp;
2276 const struct bdevsw *bdev;
2277 int error;
2278
2279 /* get a block of the appropriate size... */
2280 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2281 bp->b_dev = dev;
2282
2283 /* get our ducks in a row for the write */
2284 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2285 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2286 bp->b_flags |= B_WRITE;
2287 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2288
2289 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2290
2291 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2292
2293 bdev = bdevsw_lookup(bp->b_dev);
2294 if (bdev == NULL)
2295 return (ENXIO);
2296 (*bdev->d_strategy)(bp);
2297 error = biowait(bp);
2298 brelse(bp);
2299 if (error) {
2300 #if 1
2301 printf("Failed to write RAID component info!\n");
2302 #endif
2303 }
2304
2305 return(error);
2306 }
2307
2308 void
2309 rf_markalldirty(RF_Raid_t *raidPtr)
2310 {
2311 RF_ComponentLabel_t clabel;
2312 int sparecol;
2313 int c;
2314 int j;
2315 int scol = -1;
2316
2317 raidPtr->mod_counter++;
2318 for (c = 0; c < raidPtr->numCol; c++) {
2319 /* we don't want to touch (at all) a disk that has
2320 failed */
2321 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2322 raidread_component_label(
2323 raidPtr->Disks[c].dev,
2324 raidPtr->raid_cinfo[c].ci_vp,
2325 &clabel);
2326 if (clabel.status == rf_ds_spared) {
2327 /* XXX do something special...
2328 but whatever you do, don't
2329 try to access it!! */
2330 } else {
2331 raidmarkdirty(
2332 raidPtr->Disks[c].dev,
2333 raidPtr->raid_cinfo[c].ci_vp,
2334 raidPtr->mod_counter);
2335 }
2336 }
2337 }
2338
2339 for( c = 0; c < raidPtr->numSpare ; c++) {
2340 sparecol = raidPtr->numCol + c;
2341 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2342 /*
2343
2344 we claim this disk is "optimal" if it's
2345 rf_ds_used_spare, as that means it should be
2346 directly substitutable for the disk it replaced.
2347 We note that too...
2348
2349 */
2350
2351 for(j=0;j<raidPtr->numCol;j++) {
2352 if (raidPtr->Disks[j].spareCol == sparecol) {
2353 scol = j;
2354 break;
2355 }
2356 }
2357
2358 raidread_component_label(
2359 raidPtr->Disks[sparecol].dev,
2360 raidPtr->raid_cinfo[sparecol].ci_vp,
2361 &clabel);
2362 /* make sure status is noted */
2363
2364 raid_init_component_label(raidPtr, &clabel);
2365
2366 clabel.row = 0;
2367 clabel.column = scol;
2368 /* Note: we *don't* change status from rf_ds_used_spare
2369 to rf_ds_optimal */
2370 /* clabel.status = rf_ds_optimal; */
2371
2372 raidmarkdirty(raidPtr->Disks[sparecol].dev,
2373 raidPtr->raid_cinfo[sparecol].ci_vp,
2374 raidPtr->mod_counter);
2375 }
2376 }
2377 }
2378
2379
2380 void
2381 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2382 {
2383 RF_ComponentLabel_t clabel;
2384 int sparecol;
2385 int c;
2386 int j;
2387 int scol;
2388
2389 scol = -1;
2390
2391 /* XXX should do extra checks to make sure things really are clean,
2392 rather than blindly setting the clean bit... */
2393
2394 raidPtr->mod_counter++;
2395
2396 for (c = 0; c < raidPtr->numCol; c++) {
2397 if (raidPtr->Disks[c].status == rf_ds_optimal) {
2398 raidread_component_label(
2399 raidPtr->Disks[c].dev,
2400 raidPtr->raid_cinfo[c].ci_vp,
2401 &clabel);
2402 /* make sure status is noted */
2403 clabel.status = rf_ds_optimal;
2404 /* bump the counter */
2405 clabel.mod_counter = raidPtr->mod_counter;
2406
2407 raidwrite_component_label(
2408 raidPtr->Disks[c].dev,
2409 raidPtr->raid_cinfo[c].ci_vp,
2410 &clabel);
2411 if (final == RF_FINAL_COMPONENT_UPDATE) {
2412 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2413 raidmarkclean(
2414 raidPtr->Disks[c].dev,
2415 raidPtr->raid_cinfo[c].ci_vp,
2416 raidPtr->mod_counter);
2417 }
2418 }
2419 }
2420 /* else we don't touch it.. */
2421 }
2422
2423 for( c = 0; c < raidPtr->numSpare ; c++) {
2424 sparecol = raidPtr->numCol + c;
2425 /* Need to ensure that the reconstruct actually completed! */
2426 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2427 /*
2428
2429 we claim this disk is "optimal" if it's
2430 rf_ds_used_spare, as that means it should be
2431 directly substitutable for the disk it replaced.
2432 We note that too...
2433
2434 */
2435
2436 for(j=0;j<raidPtr->numCol;j++) {
2437 if (raidPtr->Disks[j].spareCol == sparecol) {
2438 scol = j;
2439 break;
2440 }
2441 }
2442
2443 /* XXX shouldn't *really* need this... */
2444 raidread_component_label(
2445 raidPtr->Disks[sparecol].dev,
2446 raidPtr->raid_cinfo[sparecol].ci_vp,
2447 &clabel);
2448 /* make sure status is noted */
2449
2450 raid_init_component_label(raidPtr, &clabel);
2451
2452 clabel.mod_counter = raidPtr->mod_counter;
2453 clabel.column = scol;
2454 clabel.status = rf_ds_optimal;
2455
2456 raidwrite_component_label(
2457 raidPtr->Disks[sparecol].dev,
2458 raidPtr->raid_cinfo[sparecol].ci_vp,
2459 &clabel);
2460 if (final == RF_FINAL_COMPONENT_UPDATE) {
2461 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2462 raidmarkclean( raidPtr->Disks[sparecol].dev,
2463 raidPtr->raid_cinfo[sparecol].ci_vp,
2464 raidPtr->mod_counter);
2465 }
2466 }
2467 }
2468 }
2469 }
2470
2471 void
2472 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2473 {
2474 struct proc *p;
2475
2476 p = raidPtr->engine_thread;
2477
2478 if (vp != NULL) {
2479 if (auto_configured == 1) {
2480 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2481 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2482 vput(vp);
2483
2484 } else {
2485 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2486 }
2487 }
2488 }
2489
2490
2491 void
2492 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2493 {
2494 int r,c;
2495 struct vnode *vp;
2496 int acd;
2497
2498
2499 /* We take this opportunity to close the vnodes like we should.. */
2500
2501 for (c = 0; c < raidPtr->numCol; c++) {
2502 vp = raidPtr->raid_cinfo[c].ci_vp;
2503 acd = raidPtr->Disks[c].auto_configured;
2504 rf_close_component(raidPtr, vp, acd);
2505 raidPtr->raid_cinfo[c].ci_vp = NULL;
2506 raidPtr->Disks[c].auto_configured = 0;
2507 }
2508
2509 for (r = 0; r < raidPtr->numSpare; r++) {
2510 vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2511 acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2512 rf_close_component(raidPtr, vp, acd);
2513 raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2514 raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2515 }
2516 }
2517
2518
2519 void
2520 rf_ReconThread(struct rf_recon_req *req)
2521 {
2522 int s;
2523 RF_Raid_t *raidPtr;
2524
2525 s = splbio();
2526 raidPtr = (RF_Raid_t *) req->raidPtr;
2527 raidPtr->recon_in_progress = 1;
2528
2529 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2530 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2531
2532 RF_Free(req, sizeof(*req));
2533
2534 raidPtr->recon_in_progress = 0;
2535 splx(s);
2536
2537 /* That's all... */
2538 kthread_exit(0); /* does not return */
2539 }
2540
2541 void
2542 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2543 {
2544 int retcode;
2545 int s;
2546
2547 raidPtr->parity_rewrite_in_progress = 1;
2548 s = splbio();
2549 retcode = rf_RewriteParity(raidPtr);
2550 splx(s);
2551 if (retcode) {
2552 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2553 } else {
2554 /* set the clean bit! If we shutdown correctly,
2555 the clean bit on each component label will get
2556 set */
2557 raidPtr->parity_good = RF_RAID_CLEAN;
2558 }
2559 raidPtr->parity_rewrite_in_progress = 0;
2560
2561 /* Anyone waiting for us to stop? If so, inform them... */
2562 if (raidPtr->waitShutdown) {
2563 wakeup(&raidPtr->parity_rewrite_in_progress);
2564 }
2565
2566 /* That's all... */
2567 kthread_exit(0); /* does not return */
2568 }
2569
2570
2571 void
2572 rf_CopybackThread(RF_Raid_t *raidPtr)
2573 {
2574 int s;
2575
2576 raidPtr->copyback_in_progress = 1;
2577 s = splbio();
2578 rf_CopybackReconstructedData(raidPtr);
2579 splx(s);
2580 raidPtr->copyback_in_progress = 0;
2581
2582 /* That's all... */
2583 kthread_exit(0); /* does not return */
2584 }
2585
2586
2587 void
2588 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2589 {
2590 int s;
2591 RF_Raid_t *raidPtr;
2592
2593 s = splbio();
2594 raidPtr = req->raidPtr;
2595 raidPtr->recon_in_progress = 1;
2596 rf_ReconstructInPlace(raidPtr, req->col);
2597 RF_Free(req, sizeof(*req));
2598 raidPtr->recon_in_progress = 0;
2599 splx(s);
2600
2601 /* That's all... */
2602 kthread_exit(0); /* does not return */
2603 }
2604
2605 RF_AutoConfig_t *
2606 rf_find_raid_components()
2607 {
2608 struct vnode *vp;
2609 struct disklabel label;
2610 struct device *dv;
2611 dev_t dev;
2612 int bmajor;
2613 int error;
2614 int i;
2615 int good_one;
2616 RF_ComponentLabel_t *clabel;
2617 RF_AutoConfig_t *ac_list;
2618 RF_AutoConfig_t *ac;
2619
2620
2621 /* initialize the AutoConfig list */
2622 ac_list = NULL;
2623
2624 /* we begin by trolling through *all* the devices on the system */
2625
2626 for (dv = alldevs.tqh_first; dv != NULL;
2627 dv = dv->dv_list.tqe_next) {
2628
2629 /* we are only interested in disks... */
2630 if (dv->dv_class != DV_DISK)
2631 continue;
2632
2633 /* we don't care about floppies... */
2634 if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
2635 continue;
2636 }
2637
2638 /* we don't care about CD's... */
2639 if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
2640 continue;
2641 }
2642
2643 /* hdfd is the Atari/Hades floppy driver */
2644 if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
2645 continue;
2646 }
2647 /* fdisa is the Atari/Milan floppy driver */
2648 if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
2649 continue;
2650 }
2651
2652 /* need to find the device_name_to_block_device_major stuff */
2653 bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
2654
2655 /* get a vnode for the raw partition of this disk */
2656
2657 dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
2658 if (bdevvp(dev, &vp))
2659 panic("RAID can't alloc vnode");
2660
2661 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2662
2663 if (error) {
2664 /* "Who cares." Continue looking
2665 for something that exists*/
2666 vput(vp);
2667 continue;
2668 }
2669
2670 /* Ok, the disk exists. Go get the disklabel. */
2671 error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
2672 if (error) {
2673 /*
2674 * XXX can't happen - open() would
2675 * have errored out (or faked up one)
2676 */
2677 if (error != ENOTTY)
2678 printf("RAIDframe: can't get label for dev "
2679 "%s (%d)\n", dv->dv_xname, error);
2680 }
2681
2682 /* don't need this any more. We'll allocate it again
2683 a little later if we really do... */
2684 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2685 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2686 vput(vp);
2687
2688 if (error)
2689 continue;
2690
2691 for (i=0; i < label.d_npartitions; i++) {
2692 /* We only support partitions marked as RAID */
2693 if (label.d_partitions[i].p_fstype != FS_RAID)
2694 continue;
2695
2696 dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
2697 if (bdevvp(dev, &vp))
2698 panic("RAID can't alloc vnode");
2699
2700 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2701 if (error) {
2702 /* Whatever... */
2703 vput(vp);
2704 continue;
2705 }
2706
2707 good_one = 0;
2708
2709 clabel = (RF_ComponentLabel_t *)
2710 malloc(sizeof(RF_ComponentLabel_t),
2711 M_RAIDFRAME, M_NOWAIT);
2712 if (clabel == NULL) {
2713 /* XXX CLEANUP HERE */
2714 printf("RAID auto config: out of memory!\n");
2715 return(NULL); /* XXX probably should panic? */
2716 }
2717
2718 if (!raidread_component_label(dev, vp, clabel)) {
2719 /* Got the label. Does it look reasonable? */
2720 if (rf_reasonable_label(clabel) &&
2721 (clabel->partitionSize <=
2722 label.d_partitions[i].p_size)) {
2723 #if DEBUG
2724 printf("Component on: %s%c: %d\n",
2725 dv->dv_xname, 'a'+i,
2726 label.d_partitions[i].p_size);
2727 rf_print_component_label(clabel);
2728 #endif
2729 /* if it's reasonable, add it,
2730 else ignore it. */
2731 ac = (RF_AutoConfig_t *)
2732 malloc(sizeof(RF_AutoConfig_t),
2733 M_RAIDFRAME,
2734 M_NOWAIT);
2735 if (ac == NULL) {
2736 /* XXX should panic?? */
2737 return(NULL);
2738 }
2739
2740 snprintf(ac->devname,
2741 sizeof(ac->devname), "%s%c",
2742 dv->dv_xname, 'a'+i);
2743 ac->dev = dev;
2744 ac->vp = vp;
2745 ac->clabel = clabel;
2746 ac->next = ac_list;
2747 ac_list = ac;
2748 good_one = 1;
2749 }
2750 }
2751 if (!good_one) {
2752 /* cleanup */
2753 free(clabel, M_RAIDFRAME);
2754 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2755 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2756 vput(vp);
2757 }
2758 }
2759 }
2760 return(ac_list);
2761 }
2762
2763 static int
2764 rf_reasonable_label(RF_ComponentLabel_t *clabel)
2765 {
2766
2767 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2768 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2769 ((clabel->clean == RF_RAID_CLEAN) ||
2770 (clabel->clean == RF_RAID_DIRTY)) &&
2771 clabel->row >=0 &&
2772 clabel->column >= 0 &&
2773 clabel->num_rows > 0 &&
2774 clabel->num_columns > 0 &&
2775 clabel->row < clabel->num_rows &&
2776 clabel->column < clabel->num_columns &&
2777 clabel->blockSize > 0 &&
2778 clabel->numBlocks > 0) {
2779 /* label looks reasonable enough... */
2780 return(1);
2781 }
2782 return(0);
2783 }
2784
2785
2786 #if DEBUG
2787 void
2788 rf_print_component_label(RF_ComponentLabel_t *clabel)
2789 {
2790 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2791 clabel->row, clabel->column,
2792 clabel->num_rows, clabel->num_columns);
2793 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2794 clabel->version, clabel->serial_number,
2795 clabel->mod_counter);
2796 printf(" Clean: %s Status: %d\n",
2797 clabel->clean ? "Yes" : "No", clabel->status );
2798 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2799 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2800 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2801 (char) clabel->parityConfig, clabel->blockSize,
2802 clabel->numBlocks);
2803 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2804 printf(" Contains root partition: %s\n",
2805 clabel->root_partition ? "Yes" : "No" );
2806 printf(" Last configured as: raid%d\n", clabel->last_unit );
2807 #if 0
2808 printf(" Config order: %d\n", clabel->config_order);
2809 #endif
2810
2811 }
2812 #endif
2813
2814 RF_ConfigSet_t *
2815 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
2816 {
2817 RF_AutoConfig_t *ac;
2818 RF_ConfigSet_t *config_sets;
2819 RF_ConfigSet_t *cset;
2820 RF_AutoConfig_t *ac_next;
2821
2822
2823 config_sets = NULL;
2824
2825 /* Go through the AutoConfig list, and figure out which components
2826 belong to what sets. */
2827 ac = ac_list;
2828 while(ac!=NULL) {
2829 /* we're going to putz with ac->next, so save it here
2830 for use at the end of the loop */
2831 ac_next = ac->next;
2832
2833 if (config_sets == NULL) {
2834 /* will need at least this one... */
2835 config_sets = (RF_ConfigSet_t *)
2836 malloc(sizeof(RF_ConfigSet_t),
2837 M_RAIDFRAME, M_NOWAIT);
2838 if (config_sets == NULL) {
2839 panic("rf_create_auto_sets: No memory!");
2840 }
2841 /* this one is easy :) */
2842 config_sets->ac = ac;
2843 config_sets->next = NULL;
2844 config_sets->rootable = 0;
2845 ac->next = NULL;
2846 } else {
2847 /* which set does this component fit into? */
2848 cset = config_sets;
2849 while(cset!=NULL) {
2850 if (rf_does_it_fit(cset, ac)) {
2851 /* looks like it matches... */
2852 ac->next = cset->ac;
2853 cset->ac = ac;
2854 break;
2855 }
2856 cset = cset->next;
2857 }
2858 if (cset==NULL) {
2859 /* didn't find a match above... new set..*/
2860 cset = (RF_ConfigSet_t *)
2861 malloc(sizeof(RF_ConfigSet_t),
2862 M_RAIDFRAME, M_NOWAIT);
2863 if (cset == NULL) {
2864 panic("rf_create_auto_sets: No memory!");
2865 }
2866 cset->ac = ac;
2867 ac->next = NULL;
2868 cset->next = config_sets;
2869 cset->rootable = 0;
2870 config_sets = cset;
2871 }
2872 }
2873 ac = ac_next;
2874 }
2875
2876
2877 return(config_sets);
2878 }
2879
2880 static int
2881 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
2882 {
2883 RF_ComponentLabel_t *clabel1, *clabel2;
2884
2885 /* If this one matches the *first* one in the set, that's good
2886 enough, since the other members of the set would have been
2887 through here too... */
2888 /* note that we are not checking partitionSize here..
2889
2890 Note that we are also not checking the mod_counters here.
2891 If everything else matches execpt the mod_counter, that's
2892 good enough for this test. We will deal with the mod_counters
2893 a little later in the autoconfiguration process.
2894
2895 (clabel1->mod_counter == clabel2->mod_counter) &&
2896
2897 The reason we don't check for this is that failed disks
2898 will have lower modification counts. If those disks are
2899 not added to the set they used to belong to, then they will
2900 form their own set, which may result in 2 different sets,
2901 for example, competing to be configured at raid0, and
2902 perhaps competing to be the root filesystem set. If the
2903 wrong ones get configured, or both attempt to become /,
2904 weird behaviour and or serious lossage will occur. Thus we
2905 need to bring them into the fold here, and kick them out at
2906 a later point.
2907
2908 */
2909
2910 clabel1 = cset->ac->clabel;
2911 clabel2 = ac->clabel;
2912 if ((clabel1->version == clabel2->version) &&
2913 (clabel1->serial_number == clabel2->serial_number) &&
2914 (clabel1->num_rows == clabel2->num_rows) &&
2915 (clabel1->num_columns == clabel2->num_columns) &&
2916 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2917 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2918 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2919 (clabel1->parityConfig == clabel2->parityConfig) &&
2920 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2921 (clabel1->blockSize == clabel2->blockSize) &&
2922 (clabel1->numBlocks == clabel2->numBlocks) &&
2923 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2924 (clabel1->root_partition == clabel2->root_partition) &&
2925 (clabel1->last_unit == clabel2->last_unit) &&
2926 (clabel1->config_order == clabel2->config_order)) {
2927 /* if it get's here, it almost *has* to be a match */
2928 } else {
2929 /* it's not consistent with somebody in the set..
2930 punt */
2931 return(0);
2932 }
2933 /* all was fine.. it must fit... */
2934 return(1);
2935 }
2936
2937 int
2938 rf_have_enough_components(RF_ConfigSet_t *cset)
2939 {
2940 RF_AutoConfig_t *ac;
2941 RF_AutoConfig_t *auto_config;
2942 RF_ComponentLabel_t *clabel;
2943 int c;
2944 int num_cols;
2945 int num_missing;
2946 int mod_counter;
2947 int mod_counter_found;
2948 int even_pair_failed;
2949 char parity_type;
2950
2951
2952 /* check to see that we have enough 'live' components
2953 of this set. If so, we can configure it if necessary */
2954
2955 num_cols = cset->ac->clabel->num_columns;
2956 parity_type = cset->ac->clabel->parityConfig;
2957
2958 /* XXX Check for duplicate components!?!?!? */
2959
2960 /* Determine what the mod_counter is supposed to be for this set. */
2961
2962 mod_counter_found = 0;
2963 mod_counter = 0;
2964 ac = cset->ac;
2965 while(ac!=NULL) {
2966 if (mod_counter_found==0) {
2967 mod_counter = ac->clabel->mod_counter;
2968 mod_counter_found = 1;
2969 } else {
2970 if (ac->clabel->mod_counter > mod_counter) {
2971 mod_counter = ac->clabel->mod_counter;
2972 }
2973 }
2974 ac = ac->next;
2975 }
2976
2977 num_missing = 0;
2978 auto_config = cset->ac;
2979
2980 even_pair_failed = 0;
2981 for(c=0; c<num_cols; c++) {
2982 ac = auto_config;
2983 while(ac!=NULL) {
2984 if ((ac->clabel->column == c) &&
2985 (ac->clabel->mod_counter == mod_counter)) {
2986 /* it's this one... */
2987 #if DEBUG
2988 printf("Found: %s at %d\n",
2989 ac->devname,c);
2990 #endif
2991 break;
2992 }
2993 ac=ac->next;
2994 }
2995 if (ac==NULL) {
2996 /* Didn't find one here! */
2997 /* special case for RAID 1, especially
2998 where there are more than 2
2999 components (where RAIDframe treats
3000 things a little differently :( ) */
3001 if (parity_type == '1') {
3002 if (c%2 == 0) { /* even component */
3003 even_pair_failed = 1;
3004 } else { /* odd component. If
3005 we're failed, and
3006 so is the even
3007 component, it's
3008 "Good Night, Charlie" */
3009 if (even_pair_failed == 1) {
3010 return(0);
3011 }
3012 }
3013 } else {
3014 /* normal accounting */
3015 num_missing++;
3016 }
3017 }
3018 if ((parity_type == '1') && (c%2 == 1)) {
3019 /* Just did an even component, and we didn't
3020 bail.. reset the even_pair_failed flag,
3021 and go on to the next component.... */
3022 even_pair_failed = 0;
3023 }
3024 }
3025
3026 clabel = cset->ac->clabel;
3027
3028 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3029 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3030 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3031 /* XXX this needs to be made *much* more general */
3032 /* Too many failures */
3033 return(0);
3034 }
3035 /* otherwise, all is well, and we've got enough to take a kick
3036 at autoconfiguring this set */
3037 return(1);
3038 }
3039
3040 void
3041 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3042 RF_Raid_t *raidPtr)
3043 {
3044 RF_ComponentLabel_t *clabel;
3045 int i;
3046
3047 clabel = ac->clabel;
3048
3049 /* 1. Fill in the common stuff */
3050 config->numRow = clabel->num_rows = 1;
3051 config->numCol = clabel->num_columns;
3052 config->numSpare = 0; /* XXX should this be set here? */
3053 config->sectPerSU = clabel->sectPerSU;
3054 config->SUsPerPU = clabel->SUsPerPU;
3055 config->SUsPerRU = clabel->SUsPerRU;
3056 config->parityConfig = clabel->parityConfig;
3057 /* XXX... */
3058 strcpy(config->diskQueueType,"fifo");
3059 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3060 config->layoutSpecificSize = 0; /* XXX ?? */
3061
3062 while(ac!=NULL) {
3063 /* row/col values will be in range due to the checks
3064 in reasonable_label() */
3065 strcpy(config->devnames[0][ac->clabel->column],
3066 ac->devname);
3067 ac = ac->next;
3068 }
3069
3070 for(i=0;i<RF_MAXDBGV;i++) {
3071 config->debugVars[i][0] = 0;
3072 }
3073 }
3074
3075 int
3076 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3077 {
3078 RF_ComponentLabel_t clabel;
3079 struct vnode *vp;
3080 dev_t dev;
3081 int column;
3082 int sparecol;
3083
3084 raidPtr->autoconfigure = new_value;
3085
3086 for(column=0; column<raidPtr->numCol; column++) {
3087 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3088 dev = raidPtr->Disks[column].dev;
3089 vp = raidPtr->raid_cinfo[column].ci_vp;
3090 raidread_component_label(dev, vp, &clabel);
3091 clabel.autoconfigure = new_value;
3092 raidwrite_component_label(dev, vp, &clabel);
3093 }
3094 }
3095 for(column = 0; column < raidPtr->numSpare ; column++) {
3096 sparecol = raidPtr->numCol + column;
3097 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3098 dev = raidPtr->Disks[sparecol].dev;
3099 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3100 raidread_component_label(dev, vp, &clabel);
3101 clabel.autoconfigure = new_value;
3102 raidwrite_component_label(dev, vp, &clabel);
3103 }
3104 }
3105 return(new_value);
3106 }
3107
3108 int
3109 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3110 {
3111 RF_ComponentLabel_t clabel;
3112 struct vnode *vp;
3113 dev_t dev;
3114 int column;
3115 int sparecol;
3116
3117 raidPtr->root_partition = new_value;
3118 for(column=0; column<raidPtr->numCol; column++) {
3119 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3120 dev = raidPtr->Disks[column].dev;
3121 vp = raidPtr->raid_cinfo[column].ci_vp;
3122 raidread_component_label(dev, vp, &clabel);
3123 clabel.root_partition = new_value;
3124 raidwrite_component_label(dev, vp, &clabel);
3125 }
3126 }
3127 for(column = 0; column < raidPtr->numSpare ; column++) {
3128 sparecol = raidPtr->numCol + column;
3129 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3130 dev = raidPtr->Disks[sparecol].dev;
3131 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3132 raidread_component_label(dev, vp, &clabel);
3133 clabel.root_partition = new_value;
3134 raidwrite_component_label(dev, vp, &clabel);
3135 }
3136 }
3137 return(new_value);
3138 }
3139
3140 void
3141 rf_release_all_vps(RF_ConfigSet_t *cset)
3142 {
3143 RF_AutoConfig_t *ac;
3144
3145 ac = cset->ac;
3146 while(ac!=NULL) {
3147 /* Close the vp, and give it back */
3148 if (ac->vp) {
3149 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3150 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3151 vput(ac->vp);
3152 ac->vp = NULL;
3153 }
3154 ac = ac->next;
3155 }
3156 }
3157
3158
3159 void
3160 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3161 {
3162 RF_AutoConfig_t *ac;
3163 RF_AutoConfig_t *next_ac;
3164
3165 ac = cset->ac;
3166 while(ac!=NULL) {
3167 next_ac = ac->next;
3168 /* nuke the label */
3169 free(ac->clabel, M_RAIDFRAME);
3170 /* cleanup the config structure */
3171 free(ac, M_RAIDFRAME);
3172 /* "next.." */
3173 ac = next_ac;
3174 }
3175 /* and, finally, nuke the config set */
3176 free(cset, M_RAIDFRAME);
3177 }
3178
3179
3180 void
3181 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3182 {
3183 /* current version number */
3184 clabel->version = RF_COMPONENT_LABEL_VERSION;
3185 clabel->serial_number = raidPtr->serial_number;
3186 clabel->mod_counter = raidPtr->mod_counter;
3187 clabel->num_rows = 1;
3188 clabel->num_columns = raidPtr->numCol;
3189 clabel->clean = RF_RAID_DIRTY; /* not clean */
3190 clabel->status = rf_ds_optimal; /* "It's good!" */
3191
3192 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3193 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3194 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3195
3196 clabel->blockSize = raidPtr->bytesPerSector;
3197 clabel->numBlocks = raidPtr->sectorsPerDisk;
3198
3199 /* XXX not portable */
3200 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3201 clabel->maxOutstanding = raidPtr->maxOutstanding;
3202 clabel->autoconfigure = raidPtr->autoconfigure;
3203 clabel->root_partition = raidPtr->root_partition;
3204 clabel->last_unit = raidPtr->raidid;
3205 clabel->config_order = raidPtr->config_order;
3206 }
3207
3208 int
3209 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3210 {
3211 RF_Raid_t *raidPtr;
3212 RF_Config_t *config;
3213 int raidID;
3214 int retcode;
3215
3216 #if DEBUG
3217 printf("RAID autoconfigure\n");
3218 #endif
3219
3220 retcode = 0;
3221 *unit = -1;
3222
3223 /* 1. Create a config structure */
3224
3225 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3226 M_RAIDFRAME,
3227 M_NOWAIT);
3228 if (config==NULL) {
3229 printf("Out of mem!?!?\n");
3230 /* XXX do something more intelligent here. */
3231 return(1);
3232 }
3233
3234 memset(config, 0, sizeof(RF_Config_t));
3235
3236 /*
3237 2. Figure out what RAID ID this one is supposed to live at
3238 See if we can get the same RAID dev that it was configured
3239 on last time..
3240 */
3241
3242 raidID = cset->ac->clabel->last_unit;
3243 if ((raidID < 0) || (raidID >= numraid)) {
3244 /* let's not wander off into lala land. */
3245 raidID = numraid - 1;
3246 }
3247 if (raidPtrs[raidID]->valid != 0) {
3248
3249 /*
3250 Nope... Go looking for an alternative...
3251 Start high so we don't immediately use raid0 if that's
3252 not taken.
3253 */
3254
3255 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3256 if (raidPtrs[raidID]->valid == 0) {
3257 /* can use this one! */
3258 break;
3259 }
3260 }
3261 }
3262
3263 if (raidID < 0) {
3264 /* punt... */
3265 printf("Unable to auto configure this set!\n");
3266 printf("(Out of RAID devs!)\n");
3267 return(1);
3268 }
3269
3270 #if DEBUG
3271 printf("Configuring raid%d:\n",raidID);
3272 #endif
3273
3274 raidPtr = raidPtrs[raidID];
3275
3276 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3277 raidPtr->raidid = raidID;
3278 raidPtr->openings = RAIDOUTSTANDING;
3279
3280 /* 3. Build the configuration structure */
3281 rf_create_configuration(cset->ac, config, raidPtr);
3282
3283 /* 4. Do the configuration */
3284 retcode = rf_Configure(raidPtr, config, cset->ac);
3285
3286 if (retcode == 0) {
3287
3288 raidinit(raidPtrs[raidID]);
3289
3290 rf_markalldirty(raidPtrs[raidID]);
3291 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3292 if (cset->ac->clabel->root_partition==1) {
3293 /* everything configured just fine. Make a note
3294 that this set is eligible to be root. */
3295 cset->rootable = 1;
3296 /* XXX do this here? */
3297 raidPtrs[raidID]->root_partition = 1;
3298 }
3299 }
3300
3301 /* 5. Cleanup */
3302 free(config, M_RAIDFRAME);
3303
3304 *unit = raidID;
3305 return(retcode);
3306 }
3307
3308 void
3309 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3310 {
3311 struct buf *bp;
3312
3313 bp = (struct buf *)desc->bp;
3314 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3315 (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3316 }
3317
3318 void
3319 rf_pool_init(struct pool *p, size_t size, char *w_chan,
3320 size_t min, size_t max)
3321 {
3322 pool_init(p, size, 0, 0, 0, w_chan, NULL);
3323 pool_sethiwat(p, max);
3324 pool_prime(p, min);
3325 pool_setlowat(p, min);
3326 }
3327