rf_netbsdkintf.c revision 1.170 1 /* $NetBSD: rf_netbsdkintf.c,v 1.170 2003/12/31 03:02:57 oster Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1990, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * This code is derived from software contributed to Berkeley by
43 * the Systems Programming Group of the University of Utah Computer
44 * Science Department.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * from: Utah $Hdr: cd.c 1.6 90/11/28$
71 *
72 * @(#)cd.c 8.2 (Berkeley) 11/16/93
73 */
74
75 /*
76 * Copyright (c) 1988 University of Utah.
77 *
78 * This code is derived from software contributed to Berkeley by
79 * the Systems Programming Group of the University of Utah Computer
80 * Science Department.
81 *
82 * Redistribution and use in source and binary forms, with or without
83 * modification, are permitted provided that the following conditions
84 * are met:
85 * 1. Redistributions of source code must retain the above copyright
86 * notice, this list of conditions and the following disclaimer.
87 * 2. Redistributions in binary form must reproduce the above copyright
88 * notice, this list of conditions and the following disclaimer in the
89 * documentation and/or other materials provided with the distribution.
90 * 3. All advertising materials mentioning features or use of this software
91 * must display the following acknowledgement:
92 * This product includes software developed by the University of
93 * California, Berkeley and its contributors.
94 * 4. Neither the name of the University nor the names of its contributors
95 * may be used to endorse or promote products derived from this software
96 * without specific prior written permission.
97 *
98 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
99 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
100 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
101 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
102 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
103 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
104 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
105 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
106 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
107 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
108 * SUCH DAMAGE.
109 *
110 * from: Utah $Hdr: cd.c 1.6 90/11/28$
111 *
112 * @(#)cd.c 8.2 (Berkeley) 11/16/93
113 */
114
115 /*
116 * Copyright (c) 1995 Carnegie-Mellon University.
117 * All rights reserved.
118 *
119 * Authors: Mark Holland, Jim Zelenka
120 *
121 * Permission to use, copy, modify and distribute this software and
122 * its documentation is hereby granted, provided that both the copyright
123 * notice and this permission notice appear in all copies of the
124 * software, derivative works or modified versions, and any portions
125 * thereof, and that both notices appear in supporting documentation.
126 *
127 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
128 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
129 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
130 *
131 * Carnegie Mellon requests users of this software to return to
132 *
133 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
134 * School of Computer Science
135 * Carnegie Mellon University
136 * Pittsburgh PA 15213-3890
137 *
138 * any improvements or extensions that they make and grant Carnegie the
139 * rights to redistribute these changes.
140 */
141
142 /***********************************************************
143 *
144 * rf_kintf.c -- the kernel interface routines for RAIDframe
145 *
146 ***********************************************************/
147
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.170 2003/12/31 03:02:57 oster Exp $");
150
151 #include <sys/param.h>
152 #include <sys/errno.h>
153 #include <sys/pool.h>
154 #include <sys/proc.h>
155 #include <sys/queue.h>
156 #include <sys/disk.h>
157 #include <sys/device.h>
158 #include <sys/stat.h>
159 #include <sys/ioctl.h>
160 #include <sys/fcntl.h>
161 #include <sys/systm.h>
162 #include <sys/namei.h>
163 #include <sys/vnode.h>
164 #include <sys/disklabel.h>
165 #include <sys/conf.h>
166 #include <sys/lock.h>
167 #include <sys/buf.h>
168 #include <sys/user.h>
169 #include <sys/reboot.h>
170
171 #include <dev/raidframe/raidframevar.h>
172 #include <dev/raidframe/raidframeio.h>
173 #include "raid.h"
174 #include "opt_raid_autoconfig.h"
175 #include "rf_raid.h"
176 #include "rf_copyback.h"
177 #include "rf_dag.h"
178 #include "rf_dagflags.h"
179 #include "rf_desc.h"
180 #include "rf_diskqueue.h"
181 #include "rf_etimer.h"
182 #include "rf_general.h"
183 #include "rf_kintf.h"
184 #include "rf_options.h"
185 #include "rf_driver.h"
186 #include "rf_parityscan.h"
187 #include "rf_threadstuff.h"
188
189 #ifdef DEBUG
190 int rf_kdebug_level = 0;
191 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
192 #else /* DEBUG */
193 #define db1_printf(a) { }
194 #endif /* DEBUG */
195
196 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
197
198 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
199
200 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
201 * spare table */
202 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
203 * installation process */
204
205 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
206
207 /* prototypes */
208 static void KernelWakeupFunc(struct buf * bp);
209 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
210 dev_t dev, RF_SectorNum_t startSect,
211 RF_SectorCount_t numSect, caddr_t buf,
212 void (*cbFunc) (struct buf *), void *cbArg,
213 int logBytesPerSector, struct proc * b_proc);
214 static void raidinit(RF_Raid_t *);
215
216 void raidattach(int);
217
218 dev_type_open(raidopen);
219 dev_type_close(raidclose);
220 dev_type_read(raidread);
221 dev_type_write(raidwrite);
222 dev_type_ioctl(raidioctl);
223 dev_type_strategy(raidstrategy);
224 dev_type_dump(raiddump);
225 dev_type_size(raidsize);
226
227 const struct bdevsw raid_bdevsw = {
228 raidopen, raidclose, raidstrategy, raidioctl,
229 raiddump, raidsize, D_DISK
230 };
231
232 const struct cdevsw raid_cdevsw = {
233 raidopen, raidclose, raidread, raidwrite, raidioctl,
234 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
235 };
236
237 /*
238 * Pilfered from ccd.c
239 */
240
241 struct raidbuf {
242 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
243 struct buf *rf_obp; /* ptr. to original I/O buf */
244 RF_DiskQueueData_t *req;/* the request that this was part of.. */
245 };
246
247 /* component buffer pool */
248 struct pool raidframe_cbufpool;
249
250 /* XXX Not sure if the following should be replacing the raidPtrs above,
251 or if it should be used in conjunction with that...
252 */
253
254 struct raid_softc {
255 int sc_flags; /* flags */
256 int sc_cflags; /* configuration flags */
257 size_t sc_size; /* size of the raid device */
258 char sc_xname[20]; /* XXX external name */
259 struct disk sc_dkdev; /* generic disk device info */
260 struct bufq_state buf_queue; /* used for the device queue */
261 };
262 /* sc_flags */
263 #define RAIDF_INITED 0x01 /* unit has been initialized */
264 #define RAIDF_WLABEL 0x02 /* label area is writable */
265 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
266 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
267 #define RAIDF_LOCKED 0x80 /* unit is locked */
268
269 #define raidunit(x) DISKUNIT(x)
270 int numraid = 0;
271
272 /*
273 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
274 * Be aware that large numbers can allow the driver to consume a lot of
275 * kernel memory, especially on writes, and in degraded mode reads.
276 *
277 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
278 * a single 64K write will typically require 64K for the old data,
279 * 64K for the old parity, and 64K for the new parity, for a total
280 * of 192K (if the parity buffer is not re-used immediately).
281 * Even it if is used immediately, that's still 128K, which when multiplied
282 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
283 *
284 * Now in degraded mode, for example, a 64K read on the above setup may
285 * require data reconstruction, which will require *all* of the 4 remaining
286 * disks to participate -- 4 * 32K/disk == 128K again.
287 */
288
289 #ifndef RAIDOUTSTANDING
290 #define RAIDOUTSTANDING 6
291 #endif
292
293 #define RAIDLABELDEV(dev) \
294 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
295
296 /* declared here, and made public, for the benefit of KVM stuff.. */
297 struct raid_softc *raid_softc;
298
299 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
300 struct disklabel *);
301 static void raidgetdisklabel(dev_t);
302 static void raidmakedisklabel(struct raid_softc *);
303
304 static int raidlock(struct raid_softc *);
305 static void raidunlock(struct raid_softc *);
306
307 static void rf_markalldirty(RF_Raid_t *);
308
309 struct device *raidrootdev;
310
311 void rf_ReconThread(struct rf_recon_req *);
312 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
313 void rf_CopybackThread(RF_Raid_t *raidPtr);
314 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
315 int rf_autoconfig(struct device *self);
316 void rf_buildroothack(RF_ConfigSet_t *);
317
318 RF_AutoConfig_t *rf_find_raid_components(void);
319 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
320 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
321 static int rf_reasonable_label(RF_ComponentLabel_t *);
322 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
323 int rf_set_autoconfig(RF_Raid_t *, int);
324 int rf_set_rootpartition(RF_Raid_t *, int);
325 void rf_release_all_vps(RF_ConfigSet_t *);
326 void rf_cleanup_config_set(RF_ConfigSet_t *);
327 int rf_have_enough_components(RF_ConfigSet_t *);
328 int rf_auto_config_set(RF_ConfigSet_t *, int *);
329
330 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
331 allow autoconfig to take place.
332 Note that this is overridden by having
333 RAID_AUTOCONFIG as an option in the
334 kernel config file. */
335
336 void
337 raidattach(int num)
338 {
339 int raidID;
340 int i, rc;
341
342 #ifdef DEBUG
343 printf("raidattach: Asked for %d units\n", num);
344 #endif
345
346 if (num <= 0) {
347 #ifdef DIAGNOSTIC
348 panic("raidattach: count <= 0");
349 #endif
350 return;
351 }
352 /* This is where all the initialization stuff gets done. */
353
354 numraid = num;
355
356 /* Make some space for requested number of units... */
357
358 RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
359 if (raidPtrs == NULL) {
360 panic("raidPtrs is NULL!!");
361 }
362
363 /* Initialize the component buffer pool. */
364 pool_init(&raidframe_cbufpool, sizeof(struct raidbuf), 0,
365 0, 0, "raidpl", NULL);
366
367 rf_mutex_init(&rf_sparet_wait_mutex);
368
369 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
370
371 for (i = 0; i < num; i++)
372 raidPtrs[i] = NULL;
373 rc = rf_BootRaidframe();
374 if (rc == 0)
375 printf("Kernelized RAIDframe activated\n");
376 else
377 panic("Serious error booting RAID!!");
378
379 /* put together some datastructures like the CCD device does.. This
380 * lets us lock the device and what-not when it gets opened. */
381
382 raid_softc = (struct raid_softc *)
383 malloc(num * sizeof(struct raid_softc),
384 M_RAIDFRAME, M_NOWAIT);
385 if (raid_softc == NULL) {
386 printf("WARNING: no memory for RAIDframe driver\n");
387 return;
388 }
389
390 memset(raid_softc, 0, num * sizeof(struct raid_softc));
391
392 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
393 M_RAIDFRAME, M_NOWAIT);
394 if (raidrootdev == NULL) {
395 panic("No memory for RAIDframe driver!!?!?!");
396 }
397
398 for (raidID = 0; raidID < num; raidID++) {
399 bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
400
401 raidrootdev[raidID].dv_class = DV_DISK;
402 raidrootdev[raidID].dv_cfdata = NULL;
403 raidrootdev[raidID].dv_unit = raidID;
404 raidrootdev[raidID].dv_parent = NULL;
405 raidrootdev[raidID].dv_flags = 0;
406 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
407
408 RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
409 (RF_Raid_t *));
410 if (raidPtrs[raidID] == NULL) {
411 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
412 numraid = raidID;
413 return;
414 }
415 }
416
417 #ifdef RAID_AUTOCONFIG
418 raidautoconfig = 1;
419 #endif
420
421 /*
422 * Register a finalizer which will be used to auto-config RAID
423 * sets once all real hardware devices have been found.
424 */
425 if (config_finalize_register(NULL, rf_autoconfig) != 0)
426 printf("WARNING: unable to register RAIDframe finalizer\n");
427 }
428
429 int
430 rf_autoconfig(struct device *self)
431 {
432 RF_AutoConfig_t *ac_list;
433 RF_ConfigSet_t *config_sets;
434
435 if (raidautoconfig == 0)
436 return (0);
437
438 /* XXX This code can only be run once. */
439 raidautoconfig = 0;
440
441 /* 1. locate all RAID components on the system */
442 #ifdef DEBUG
443 printf("Searching for RAID components...\n");
444 #endif
445 ac_list = rf_find_raid_components();
446
447 /* 2. Sort them into their respective sets. */
448 config_sets = rf_create_auto_sets(ac_list);
449
450 /*
451 * 3. Evaluate each set andconfigure the valid ones.
452 * This gets done in rf_buildroothack().
453 */
454 rf_buildroothack(config_sets);
455
456 return (1);
457 }
458
459 void
460 rf_buildroothack(RF_ConfigSet_t *config_sets)
461 {
462 RF_ConfigSet_t *cset;
463 RF_ConfigSet_t *next_cset;
464 int retcode;
465 int raidID;
466 int rootID;
467 int num_root;
468
469 rootID = 0;
470 num_root = 0;
471 cset = config_sets;
472 while(cset != NULL ) {
473 next_cset = cset->next;
474 if (rf_have_enough_components(cset) &&
475 cset->ac->clabel->autoconfigure==1) {
476 retcode = rf_auto_config_set(cset,&raidID);
477 if (!retcode) {
478 if (cset->rootable) {
479 rootID = raidID;
480 num_root++;
481 }
482 } else {
483 /* The autoconfig didn't work :( */
484 #if DEBUG
485 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
486 #endif
487 rf_release_all_vps(cset);
488 }
489 } else {
490 /* we're not autoconfiguring this set...
491 release the associated resources */
492 rf_release_all_vps(cset);
493 }
494 /* cleanup */
495 rf_cleanup_config_set(cset);
496 cset = next_cset;
497 }
498
499 /* we found something bootable... */
500
501 if (num_root == 1) {
502 booted_device = &raidrootdev[rootID];
503 } else if (num_root > 1) {
504 /* we can't guess.. require the user to answer... */
505 boothowto |= RB_ASKNAME;
506 }
507 }
508
509
510 int
511 raidsize(dev_t dev)
512 {
513 struct raid_softc *rs;
514 struct disklabel *lp;
515 int part, unit, omask, size;
516
517 unit = raidunit(dev);
518 if (unit >= numraid)
519 return (-1);
520 rs = &raid_softc[unit];
521
522 if ((rs->sc_flags & RAIDF_INITED) == 0)
523 return (-1);
524
525 part = DISKPART(dev);
526 omask = rs->sc_dkdev.dk_openmask & (1 << part);
527 lp = rs->sc_dkdev.dk_label;
528
529 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
530 return (-1);
531
532 if (lp->d_partitions[part].p_fstype != FS_SWAP)
533 size = -1;
534 else
535 size = lp->d_partitions[part].p_size *
536 (lp->d_secsize / DEV_BSIZE);
537
538 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
539 return (-1);
540
541 return (size);
542
543 }
544
545 int
546 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
547 {
548 /* Not implemented. */
549 return ENXIO;
550 }
551 /* ARGSUSED */
552 int
553 raidopen(dev_t dev, int flags, int fmt, struct proc *p)
554 {
555 int unit = raidunit(dev);
556 struct raid_softc *rs;
557 struct disklabel *lp;
558 int part, pmask;
559 int error = 0;
560
561 if (unit >= numraid)
562 return (ENXIO);
563 rs = &raid_softc[unit];
564
565 if ((error = raidlock(rs)) != 0)
566 return (error);
567 lp = rs->sc_dkdev.dk_label;
568
569 part = DISKPART(dev);
570 pmask = (1 << part);
571
572 if ((rs->sc_flags & RAIDF_INITED) &&
573 (rs->sc_dkdev.dk_openmask == 0))
574 raidgetdisklabel(dev);
575
576 /* make sure that this partition exists */
577
578 if (part != RAW_PART) {
579 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
580 ((part >= lp->d_npartitions) ||
581 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
582 error = ENXIO;
583 raidunlock(rs);
584 return (error);
585 }
586 }
587 /* Prevent this unit from being unconfigured while open. */
588 switch (fmt) {
589 case S_IFCHR:
590 rs->sc_dkdev.dk_copenmask |= pmask;
591 break;
592
593 case S_IFBLK:
594 rs->sc_dkdev.dk_bopenmask |= pmask;
595 break;
596 }
597
598 if ((rs->sc_dkdev.dk_openmask == 0) &&
599 ((rs->sc_flags & RAIDF_INITED) != 0)) {
600 /* First one... mark things as dirty... Note that we *MUST*
601 have done a configure before this. I DO NOT WANT TO BE
602 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
603 THAT THEY BELONG TOGETHER!!!!! */
604 /* XXX should check to see if we're only open for reading
605 here... If so, we needn't do this, but then need some
606 other way of keeping track of what's happened.. */
607
608 rf_markalldirty( raidPtrs[unit] );
609 }
610
611
612 rs->sc_dkdev.dk_openmask =
613 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
614
615 raidunlock(rs);
616
617 return (error);
618
619
620 }
621 /* ARGSUSED */
622 int
623 raidclose(dev_t dev, int flags, int fmt, struct proc *p)
624 {
625 int unit = raidunit(dev);
626 struct raid_softc *rs;
627 int error = 0;
628 int part;
629
630 if (unit >= numraid)
631 return (ENXIO);
632 rs = &raid_softc[unit];
633
634 if ((error = raidlock(rs)) != 0)
635 return (error);
636
637 part = DISKPART(dev);
638
639 /* ...that much closer to allowing unconfiguration... */
640 switch (fmt) {
641 case S_IFCHR:
642 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
643 break;
644
645 case S_IFBLK:
646 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
647 break;
648 }
649 rs->sc_dkdev.dk_openmask =
650 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
651
652 if ((rs->sc_dkdev.dk_openmask == 0) &&
653 ((rs->sc_flags & RAIDF_INITED) != 0)) {
654 /* Last one... device is not unconfigured yet.
655 Device shutdown has taken care of setting the
656 clean bits if RAIDF_INITED is not set
657 mark things as clean... */
658
659 rf_update_component_labels(raidPtrs[unit],
660 RF_FINAL_COMPONENT_UPDATE);
661 if (doing_shutdown) {
662 /* last one, and we're going down, so
663 lights out for this RAID set too. */
664 error = rf_Shutdown(raidPtrs[unit]);
665
666 /* It's no longer initialized... */
667 rs->sc_flags &= ~RAIDF_INITED;
668
669 /* Detach the disk. */
670 disk_detach(&rs->sc_dkdev);
671 }
672 }
673
674 raidunlock(rs);
675 return (0);
676
677 }
678
679 void
680 raidstrategy(struct buf *bp)
681 {
682 int s;
683
684 unsigned int raidID = raidunit(bp->b_dev);
685 RF_Raid_t *raidPtr;
686 struct raid_softc *rs = &raid_softc[raidID];
687 int wlabel;
688
689 if ((rs->sc_flags & RAIDF_INITED) ==0) {
690 bp->b_error = ENXIO;
691 bp->b_flags |= B_ERROR;
692 bp->b_resid = bp->b_bcount;
693 biodone(bp);
694 return;
695 }
696 if (raidID >= numraid || !raidPtrs[raidID]) {
697 bp->b_error = ENODEV;
698 bp->b_flags |= B_ERROR;
699 bp->b_resid = bp->b_bcount;
700 biodone(bp);
701 return;
702 }
703 raidPtr = raidPtrs[raidID];
704 if (!raidPtr->valid) {
705 bp->b_error = ENODEV;
706 bp->b_flags |= B_ERROR;
707 bp->b_resid = bp->b_bcount;
708 biodone(bp);
709 return;
710 }
711 if (bp->b_bcount == 0) {
712 db1_printf(("b_bcount is zero..\n"));
713 biodone(bp);
714 return;
715 }
716
717 /*
718 * Do bounds checking and adjust transfer. If there's an
719 * error, the bounds check will flag that for us.
720 */
721
722 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
723 if (DISKPART(bp->b_dev) != RAW_PART)
724 if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
725 db1_printf(("Bounds check failed!!:%d %d\n",
726 (int) bp->b_blkno, (int) wlabel));
727 biodone(bp);
728 return;
729 }
730 s = splbio();
731
732 bp->b_resid = 0;
733
734 /* stuff it onto our queue */
735 BUFQ_PUT(&rs->buf_queue, bp);
736
737 raidstart(raidPtrs[raidID]);
738
739 splx(s);
740 }
741 /* ARGSUSED */
742 int
743 raidread(dev_t dev, struct uio *uio, int flags)
744 {
745 int unit = raidunit(dev);
746 struct raid_softc *rs;
747
748 if (unit >= numraid)
749 return (ENXIO);
750 rs = &raid_softc[unit];
751
752 if ((rs->sc_flags & RAIDF_INITED) == 0)
753 return (ENXIO);
754
755 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
756
757 }
758 /* ARGSUSED */
759 int
760 raidwrite(dev_t dev, struct uio *uio, int flags)
761 {
762 int unit = raidunit(dev);
763 struct raid_softc *rs;
764
765 if (unit >= numraid)
766 return (ENXIO);
767 rs = &raid_softc[unit];
768
769 if ((rs->sc_flags & RAIDF_INITED) == 0)
770 return (ENXIO);
771
772 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
773
774 }
775
776 int
777 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
778 {
779 int unit = raidunit(dev);
780 int error = 0;
781 int part, pmask;
782 struct raid_softc *rs;
783 RF_Config_t *k_cfg, *u_cfg;
784 RF_Raid_t *raidPtr;
785 RF_RaidDisk_t *diskPtr;
786 RF_AccTotals_t *totals;
787 RF_DeviceConfig_t *d_cfg, **ucfgp;
788 u_char *specific_buf;
789 int retcode = 0;
790 int column;
791 int raidid;
792 struct rf_recon_req *rrcopy, *rr;
793 RF_ComponentLabel_t *clabel;
794 RF_ComponentLabel_t ci_label;
795 RF_ComponentLabel_t **clabel_ptr;
796 RF_SingleComponent_t *sparePtr,*componentPtr;
797 RF_SingleComponent_t hot_spare;
798 RF_SingleComponent_t component;
799 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
800 int i, j, d;
801 #ifdef __HAVE_OLD_DISKLABEL
802 struct disklabel newlabel;
803 #endif
804
805 if (unit >= numraid)
806 return (ENXIO);
807 rs = &raid_softc[unit];
808 raidPtr = raidPtrs[unit];
809
810 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
811 (int) DISKPART(dev), (int) unit, (int) cmd));
812
813 /* Must be open for writes for these commands... */
814 switch (cmd) {
815 case DIOCSDINFO:
816 case DIOCWDINFO:
817 #ifdef __HAVE_OLD_DISKLABEL
818 case ODIOCWDINFO:
819 case ODIOCSDINFO:
820 #endif
821 case DIOCWLABEL:
822 if ((flag & FWRITE) == 0)
823 return (EBADF);
824 }
825
826 /* Must be initialized for these... */
827 switch (cmd) {
828 case DIOCGDINFO:
829 case DIOCSDINFO:
830 case DIOCWDINFO:
831 #ifdef __HAVE_OLD_DISKLABEL
832 case ODIOCGDINFO:
833 case ODIOCWDINFO:
834 case ODIOCSDINFO:
835 case ODIOCGDEFLABEL:
836 #endif
837 case DIOCGPART:
838 case DIOCWLABEL:
839 case DIOCGDEFLABEL:
840 case RAIDFRAME_SHUTDOWN:
841 case RAIDFRAME_REWRITEPARITY:
842 case RAIDFRAME_GET_INFO:
843 case RAIDFRAME_RESET_ACCTOTALS:
844 case RAIDFRAME_GET_ACCTOTALS:
845 case RAIDFRAME_KEEP_ACCTOTALS:
846 case RAIDFRAME_GET_SIZE:
847 case RAIDFRAME_FAIL_DISK:
848 case RAIDFRAME_COPYBACK:
849 case RAIDFRAME_CHECK_RECON_STATUS:
850 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
851 case RAIDFRAME_GET_COMPONENT_LABEL:
852 case RAIDFRAME_SET_COMPONENT_LABEL:
853 case RAIDFRAME_ADD_HOT_SPARE:
854 case RAIDFRAME_REMOVE_HOT_SPARE:
855 case RAIDFRAME_INIT_LABELS:
856 case RAIDFRAME_REBUILD_IN_PLACE:
857 case RAIDFRAME_CHECK_PARITY:
858 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
859 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
860 case RAIDFRAME_CHECK_COPYBACK_STATUS:
861 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
862 case RAIDFRAME_SET_AUTOCONFIG:
863 case RAIDFRAME_SET_ROOT:
864 case RAIDFRAME_DELETE_COMPONENT:
865 case RAIDFRAME_INCORPORATE_HOT_SPARE:
866 if ((rs->sc_flags & RAIDF_INITED) == 0)
867 return (ENXIO);
868 }
869
870 switch (cmd) {
871
872 /* configure the system */
873 case RAIDFRAME_CONFIGURE:
874
875 if (raidPtr->valid) {
876 /* There is a valid RAID set running on this unit! */
877 printf("raid%d: Device already configured!\n",unit);
878 return(EINVAL);
879 }
880
881 /* copy-in the configuration information */
882 /* data points to a pointer to the configuration structure */
883
884 u_cfg = *((RF_Config_t **) data);
885 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
886 if (k_cfg == NULL) {
887 return (ENOMEM);
888 }
889 retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
890 if (retcode) {
891 RF_Free(k_cfg, sizeof(RF_Config_t));
892 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
893 retcode));
894 return (retcode);
895 }
896 /* allocate a buffer for the layout-specific data, and copy it
897 * in */
898 if (k_cfg->layoutSpecificSize) {
899 if (k_cfg->layoutSpecificSize > 10000) {
900 /* sanity check */
901 RF_Free(k_cfg, sizeof(RF_Config_t));
902 return (EINVAL);
903 }
904 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
905 (u_char *));
906 if (specific_buf == NULL) {
907 RF_Free(k_cfg, sizeof(RF_Config_t));
908 return (ENOMEM);
909 }
910 retcode = copyin(k_cfg->layoutSpecific, specific_buf,
911 k_cfg->layoutSpecificSize);
912 if (retcode) {
913 RF_Free(k_cfg, sizeof(RF_Config_t));
914 RF_Free(specific_buf,
915 k_cfg->layoutSpecificSize);
916 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
917 retcode));
918 return (retcode);
919 }
920 } else
921 specific_buf = NULL;
922 k_cfg->layoutSpecific = specific_buf;
923
924 /* should do some kind of sanity check on the configuration.
925 * Store the sum of all the bytes in the last byte? */
926
927 /* configure the system */
928
929 /*
930 * Clear the entire RAID descriptor, just to make sure
931 * there is no stale data left in the case of a
932 * reconfiguration
933 */
934 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
935 raidPtr->raidid = unit;
936
937 retcode = rf_Configure(raidPtr, k_cfg, NULL);
938
939 if (retcode == 0) {
940
941 /* allow this many simultaneous IO's to
942 this RAID device */
943 raidPtr->openings = RAIDOUTSTANDING;
944
945 raidinit(raidPtr);
946 rf_markalldirty(raidPtr);
947 }
948 /* free the buffers. No return code here. */
949 if (k_cfg->layoutSpecificSize) {
950 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
951 }
952 RF_Free(k_cfg, sizeof(RF_Config_t));
953
954 return (retcode);
955
956 /* shutdown the system */
957 case RAIDFRAME_SHUTDOWN:
958
959 if ((error = raidlock(rs)) != 0)
960 return (error);
961
962 /*
963 * If somebody has a partition mounted, we shouldn't
964 * shutdown.
965 */
966
967 part = DISKPART(dev);
968 pmask = (1 << part);
969 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
970 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
971 (rs->sc_dkdev.dk_copenmask & pmask))) {
972 raidunlock(rs);
973 return (EBUSY);
974 }
975
976 retcode = rf_Shutdown(raidPtr);
977
978 /* It's no longer initialized... */
979 rs->sc_flags &= ~RAIDF_INITED;
980
981 /* Detach the disk. */
982 disk_detach(&rs->sc_dkdev);
983
984 raidunlock(rs);
985
986 return (retcode);
987 case RAIDFRAME_GET_COMPONENT_LABEL:
988 clabel_ptr = (RF_ComponentLabel_t **) data;
989 /* need to read the component label for the disk indicated
990 by row,column in clabel */
991
992 /* For practice, let's get it directly fromdisk, rather
993 than from the in-core copy */
994 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
995 (RF_ComponentLabel_t *));
996 if (clabel == NULL)
997 return (ENOMEM);
998
999 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1000
1001 retcode = copyin( *clabel_ptr, clabel,
1002 sizeof(RF_ComponentLabel_t));
1003
1004 if (retcode) {
1005 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1006 return(retcode);
1007 }
1008
1009 clabel->row = 0; /* Don't allow looking at anything else.*/
1010
1011 column = clabel->column;
1012
1013 if ((column < 0) || (column >= raidPtr->numCol +
1014 raidPtr->numSpare)) {
1015 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1016 return(EINVAL);
1017 }
1018
1019 raidread_component_label(raidPtr->Disks[column].dev,
1020 raidPtr->raid_cinfo[column].ci_vp,
1021 clabel );
1022
1023 retcode = copyout(clabel, *clabel_ptr,
1024 sizeof(RF_ComponentLabel_t));
1025 RF_Free(clabel, sizeof(RF_ComponentLabel_t));
1026 return (retcode);
1027
1028 case RAIDFRAME_SET_COMPONENT_LABEL:
1029 clabel = (RF_ComponentLabel_t *) data;
1030
1031 /* XXX check the label for valid stuff... */
1032 /* Note that some things *should not* get modified --
1033 the user should be re-initing the labels instead of
1034 trying to patch things.
1035 */
1036
1037 raidid = raidPtr->raidid;
1038 printf("raid%d: Got component label:\n", raidid);
1039 printf("raid%d: Version: %d\n", raidid, clabel->version);
1040 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1041 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1042 printf("raid%d: Column: %d\n", raidid, clabel->column);
1043 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1044 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1045 printf("raid%d: Status: %d\n", raidid, clabel->status);
1046
1047 clabel->row = 0;
1048 column = clabel->column;
1049
1050 if ((column < 0) || (column >= raidPtr->numCol)) {
1051 return(EINVAL);
1052 }
1053
1054 /* XXX this isn't allowed to do anything for now :-) */
1055
1056 /* XXX and before it is, we need to fill in the rest
1057 of the fields!?!?!?! */
1058 #if 0
1059 raidwrite_component_label(
1060 raidPtr->Disks[column].dev,
1061 raidPtr->raid_cinfo[column].ci_vp,
1062 clabel );
1063 #endif
1064 return (0);
1065
1066 case RAIDFRAME_INIT_LABELS:
1067 clabel = (RF_ComponentLabel_t *) data;
1068 /*
1069 we only want the serial number from
1070 the above. We get all the rest of the information
1071 from the config that was used to create this RAID
1072 set.
1073 */
1074
1075 raidPtr->serial_number = clabel->serial_number;
1076
1077 raid_init_component_label(raidPtr, &ci_label);
1078 ci_label.serial_number = clabel->serial_number;
1079 ci_label.row = 0; /* we dont' pretend to support more */
1080
1081 for(column=0;column<raidPtr->numCol;column++) {
1082 diskPtr = &raidPtr->Disks[column];
1083 if (!RF_DEAD_DISK(diskPtr->status)) {
1084 ci_label.partitionSize = diskPtr->partitionSize;
1085 ci_label.column = column;
1086 raidwrite_component_label(
1087 raidPtr->Disks[column].dev,
1088 raidPtr->raid_cinfo[column].ci_vp,
1089 &ci_label );
1090 }
1091 }
1092
1093 return (retcode);
1094 case RAIDFRAME_SET_AUTOCONFIG:
1095 d = rf_set_autoconfig(raidPtr, *(int *) data);
1096 printf("raid%d: New autoconfig value is: %d\n",
1097 raidPtr->raidid, d);
1098 *(int *) data = d;
1099 return (retcode);
1100
1101 case RAIDFRAME_SET_ROOT:
1102 d = rf_set_rootpartition(raidPtr, *(int *) data);
1103 printf("raid%d: New rootpartition value is: %d\n",
1104 raidPtr->raidid, d);
1105 *(int *) data = d;
1106 return (retcode);
1107
1108 /* initialize all parity */
1109 case RAIDFRAME_REWRITEPARITY:
1110
1111 if (raidPtr->Layout.map->faultsTolerated == 0) {
1112 /* Parity for RAID 0 is trivially correct */
1113 raidPtr->parity_good = RF_RAID_CLEAN;
1114 return(0);
1115 }
1116
1117 if (raidPtr->parity_rewrite_in_progress == 1) {
1118 /* Re-write is already in progress! */
1119 return(EINVAL);
1120 }
1121
1122 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1123 rf_RewriteParityThread,
1124 raidPtr,"raid_parity");
1125 return (retcode);
1126
1127
1128 case RAIDFRAME_ADD_HOT_SPARE:
1129 sparePtr = (RF_SingleComponent_t *) data;
1130 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1131 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1132 return(retcode);
1133
1134 case RAIDFRAME_REMOVE_HOT_SPARE:
1135 return(retcode);
1136
1137 case RAIDFRAME_DELETE_COMPONENT:
1138 componentPtr = (RF_SingleComponent_t *)data;
1139 memcpy( &component, componentPtr,
1140 sizeof(RF_SingleComponent_t));
1141 retcode = rf_delete_component(raidPtr, &component);
1142 return(retcode);
1143
1144 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1145 componentPtr = (RF_SingleComponent_t *)data;
1146 memcpy( &component, componentPtr,
1147 sizeof(RF_SingleComponent_t));
1148 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1149 return(retcode);
1150
1151 case RAIDFRAME_REBUILD_IN_PLACE:
1152
1153 if (raidPtr->Layout.map->faultsTolerated == 0) {
1154 /* Can't do this on a RAID 0!! */
1155 return(EINVAL);
1156 }
1157
1158 if (raidPtr->recon_in_progress == 1) {
1159 /* a reconstruct is already in progress! */
1160 return(EINVAL);
1161 }
1162
1163 componentPtr = (RF_SingleComponent_t *) data;
1164 memcpy( &component, componentPtr,
1165 sizeof(RF_SingleComponent_t));
1166 component.row = 0; /* we don't support any more */
1167 column = component.column;
1168
1169 if ((column < 0) || (column >= raidPtr->numCol)) {
1170 return(EINVAL);
1171 }
1172
1173 RF_LOCK_MUTEX(raidPtr->mutex);
1174 if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1175 (raidPtr->numFailures > 0)) {
1176 /* XXX 0 above shouldn't be constant!!! */
1177 /* some component other than this has failed.
1178 Let's not make things worse than they already
1179 are... */
1180 printf("raid%d: Unable to reconstruct to disk at:\n",
1181 raidPtr->raidid);
1182 printf("raid%d: Col: %d Too many failures.\n",
1183 raidPtr->raidid, column);
1184 RF_UNLOCK_MUTEX(raidPtr->mutex);
1185 return (EINVAL);
1186 }
1187 if (raidPtr->Disks[column].status ==
1188 rf_ds_reconstructing) {
1189 printf("raid%d: Unable to reconstruct to disk at:\n",
1190 raidPtr->raidid);
1191 printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr->raidid, column);
1192
1193 RF_UNLOCK_MUTEX(raidPtr->mutex);
1194 return (EINVAL);
1195 }
1196 if (raidPtr->Disks[column].status == rf_ds_spared) {
1197 RF_UNLOCK_MUTEX(raidPtr->mutex);
1198 return (EINVAL);
1199 }
1200 RF_UNLOCK_MUTEX(raidPtr->mutex);
1201
1202 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1203 if (rrcopy == NULL)
1204 return(ENOMEM);
1205
1206 rrcopy->raidPtr = (void *) raidPtr;
1207 rrcopy->col = column;
1208
1209 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1210 rf_ReconstructInPlaceThread,
1211 rrcopy,"raid_reconip");
1212 return(retcode);
1213
1214 case RAIDFRAME_GET_INFO:
1215 if (!raidPtr->valid)
1216 return (ENODEV);
1217 ucfgp = (RF_DeviceConfig_t **) data;
1218 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1219 (RF_DeviceConfig_t *));
1220 if (d_cfg == NULL)
1221 return (ENOMEM);
1222 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1223 d_cfg->rows = 1; /* there is only 1 row now */
1224 d_cfg->cols = raidPtr->numCol;
1225 d_cfg->ndevs = raidPtr->numCol;
1226 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1227 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1228 return (ENOMEM);
1229 }
1230 d_cfg->nspares = raidPtr->numSpare;
1231 if (d_cfg->nspares >= RF_MAX_DISKS) {
1232 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1233 return (ENOMEM);
1234 }
1235 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1236 d = 0;
1237 for (j = 0; j < d_cfg->cols; j++) {
1238 d_cfg->devs[d] = raidPtr->Disks[j];
1239 d++;
1240 }
1241 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1242 d_cfg->spares[i] = raidPtr->Disks[j];
1243 }
1244 retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1245 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1246
1247 return (retcode);
1248
1249 case RAIDFRAME_CHECK_PARITY:
1250 *(int *) data = raidPtr->parity_good;
1251 return (0);
1252
1253 case RAIDFRAME_RESET_ACCTOTALS:
1254 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1255 return (0);
1256
1257 case RAIDFRAME_GET_ACCTOTALS:
1258 totals = (RF_AccTotals_t *) data;
1259 *totals = raidPtr->acc_totals;
1260 return (0);
1261
1262 case RAIDFRAME_KEEP_ACCTOTALS:
1263 raidPtr->keep_acc_totals = *(int *)data;
1264 return (0);
1265
1266 case RAIDFRAME_GET_SIZE:
1267 *(int *) data = raidPtr->totalSectors;
1268 return (0);
1269
1270 /* fail a disk & optionally start reconstruction */
1271 case RAIDFRAME_FAIL_DISK:
1272
1273 if (raidPtr->Layout.map->faultsTolerated == 0) {
1274 /* Can't do this on a RAID 0!! */
1275 return(EINVAL);
1276 }
1277
1278 rr = (struct rf_recon_req *) data;
1279 rr->row = 0;
1280 if (rr->col < 0 || rr->col >= raidPtr->numCol)
1281 return (EINVAL);
1282
1283
1284 RF_LOCK_MUTEX(raidPtr->mutex);
1285 if ((raidPtr->Disks[rr->col].status ==
1286 rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1287 /* some other component has failed. Let's not make
1288 things worse. XXX wrong for RAID6 */
1289 RF_UNLOCK_MUTEX(raidPtr->mutex);
1290 return (EINVAL);
1291 }
1292 if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1293 /* Can't fail a spared disk! */
1294 RF_UNLOCK_MUTEX(raidPtr->mutex);
1295 return (EINVAL);
1296 }
1297 RF_UNLOCK_MUTEX(raidPtr->mutex);
1298
1299 /* make a copy of the recon request so that we don't rely on
1300 * the user's buffer */
1301 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1302 if (rrcopy == NULL)
1303 return(ENOMEM);
1304 memcpy(rrcopy, rr, sizeof(*rr));
1305 rrcopy->raidPtr = (void *) raidPtr;
1306
1307 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1308 rf_ReconThread,
1309 rrcopy,"raid_recon");
1310 return (0);
1311
1312 /* invoke a copyback operation after recon on whatever disk
1313 * needs it, if any */
1314 case RAIDFRAME_COPYBACK:
1315
1316 if (raidPtr->Layout.map->faultsTolerated == 0) {
1317 /* This makes no sense on a RAID 0!! */
1318 return(EINVAL);
1319 }
1320
1321 if (raidPtr->copyback_in_progress == 1) {
1322 /* Copyback is already in progress! */
1323 return(EINVAL);
1324 }
1325
1326 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1327 rf_CopybackThread,
1328 raidPtr,"raid_copyback");
1329 return (retcode);
1330
1331 /* return the percentage completion of reconstruction */
1332 case RAIDFRAME_CHECK_RECON_STATUS:
1333 if (raidPtr->Layout.map->faultsTolerated == 0) {
1334 /* This makes no sense on a RAID 0, so tell the
1335 user it's done. */
1336 *(int *) data = 100;
1337 return(0);
1338 }
1339 if (raidPtr->status != rf_rs_reconstructing)
1340 *(int *) data = 100;
1341 else
1342 *(int *) data = raidPtr->reconControl->percentComplete;
1343 return (0);
1344 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1345 progressInfoPtr = (RF_ProgressInfo_t **) data;
1346 if (raidPtr->status != rf_rs_reconstructing) {
1347 progressInfo.remaining = 0;
1348 progressInfo.completed = 100;
1349 progressInfo.total = 100;
1350 } else {
1351 progressInfo.total =
1352 raidPtr->reconControl->numRUsTotal;
1353 progressInfo.completed =
1354 raidPtr->reconControl->numRUsComplete;
1355 progressInfo.remaining = progressInfo.total -
1356 progressInfo.completed;
1357 }
1358 retcode = copyout(&progressInfo, *progressInfoPtr,
1359 sizeof(RF_ProgressInfo_t));
1360 return (retcode);
1361
1362 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1363 if (raidPtr->Layout.map->faultsTolerated == 0) {
1364 /* This makes no sense on a RAID 0, so tell the
1365 user it's done. */
1366 *(int *) data = 100;
1367 return(0);
1368 }
1369 if (raidPtr->parity_rewrite_in_progress == 1) {
1370 *(int *) data = 100 *
1371 raidPtr->parity_rewrite_stripes_done /
1372 raidPtr->Layout.numStripe;
1373 } else {
1374 *(int *) data = 100;
1375 }
1376 return (0);
1377
1378 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1379 progressInfoPtr = (RF_ProgressInfo_t **) data;
1380 if (raidPtr->parity_rewrite_in_progress == 1) {
1381 progressInfo.total = raidPtr->Layout.numStripe;
1382 progressInfo.completed =
1383 raidPtr->parity_rewrite_stripes_done;
1384 progressInfo.remaining = progressInfo.total -
1385 progressInfo.completed;
1386 } else {
1387 progressInfo.remaining = 0;
1388 progressInfo.completed = 100;
1389 progressInfo.total = 100;
1390 }
1391 retcode = copyout(&progressInfo, *progressInfoPtr,
1392 sizeof(RF_ProgressInfo_t));
1393 return (retcode);
1394
1395 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1396 if (raidPtr->Layout.map->faultsTolerated == 0) {
1397 /* This makes no sense on a RAID 0 */
1398 *(int *) data = 100;
1399 return(0);
1400 }
1401 if (raidPtr->copyback_in_progress == 1) {
1402 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1403 raidPtr->Layout.numStripe;
1404 } else {
1405 *(int *) data = 100;
1406 }
1407 return (0);
1408
1409 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1410 progressInfoPtr = (RF_ProgressInfo_t **) data;
1411 if (raidPtr->copyback_in_progress == 1) {
1412 progressInfo.total = raidPtr->Layout.numStripe;
1413 progressInfo.completed =
1414 raidPtr->copyback_stripes_done;
1415 progressInfo.remaining = progressInfo.total -
1416 progressInfo.completed;
1417 } else {
1418 progressInfo.remaining = 0;
1419 progressInfo.completed = 100;
1420 progressInfo.total = 100;
1421 }
1422 retcode = copyout(&progressInfo, *progressInfoPtr,
1423 sizeof(RF_ProgressInfo_t));
1424 return (retcode);
1425
1426 /* the sparetable daemon calls this to wait for the kernel to
1427 * need a spare table. this ioctl does not return until a
1428 * spare table is needed. XXX -- calling mpsleep here in the
1429 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1430 * -- I should either compute the spare table in the kernel,
1431 * or have a different -- XXX XXX -- interface (a different
1432 * character device) for delivering the table -- XXX */
1433 #if 0
1434 case RAIDFRAME_SPARET_WAIT:
1435 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1436 while (!rf_sparet_wait_queue)
1437 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1438 waitreq = rf_sparet_wait_queue;
1439 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1440 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1441
1442 /* structure assignment */
1443 *((RF_SparetWait_t *) data) = *waitreq;
1444
1445 RF_Free(waitreq, sizeof(*waitreq));
1446 return (0);
1447
1448 /* wakes up a process waiting on SPARET_WAIT and puts an error
1449 * code in it that will cause the dameon to exit */
1450 case RAIDFRAME_ABORT_SPARET_WAIT:
1451 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1452 waitreq->fcol = -1;
1453 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1454 waitreq->next = rf_sparet_wait_queue;
1455 rf_sparet_wait_queue = waitreq;
1456 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1457 wakeup(&rf_sparet_wait_queue);
1458 return (0);
1459
1460 /* used by the spare table daemon to deliver a spare table
1461 * into the kernel */
1462 case RAIDFRAME_SEND_SPARET:
1463
1464 /* install the spare table */
1465 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1466
1467 /* respond to the requestor. the return status of the spare
1468 * table installation is passed in the "fcol" field */
1469 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1470 waitreq->fcol = retcode;
1471 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1472 waitreq->next = rf_sparet_resp_queue;
1473 rf_sparet_resp_queue = waitreq;
1474 wakeup(&rf_sparet_resp_queue);
1475 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1476
1477 return (retcode);
1478 #endif
1479
1480 default:
1481 break; /* fall through to the os-specific code below */
1482
1483 }
1484
1485 if (!raidPtr->valid)
1486 return (EINVAL);
1487
1488 /*
1489 * Add support for "regular" device ioctls here.
1490 */
1491
1492 switch (cmd) {
1493 case DIOCGDINFO:
1494 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1495 break;
1496 #ifdef __HAVE_OLD_DISKLABEL
1497 case ODIOCGDINFO:
1498 newlabel = *(rs->sc_dkdev.dk_label);
1499 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1500 return ENOTTY;
1501 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1502 break;
1503 #endif
1504
1505 case DIOCGPART:
1506 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1507 ((struct partinfo *) data)->part =
1508 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1509 break;
1510
1511 case DIOCWDINFO:
1512 case DIOCSDINFO:
1513 #ifdef __HAVE_OLD_DISKLABEL
1514 case ODIOCWDINFO:
1515 case ODIOCSDINFO:
1516 #endif
1517 {
1518 struct disklabel *lp;
1519 #ifdef __HAVE_OLD_DISKLABEL
1520 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1521 memset(&newlabel, 0, sizeof newlabel);
1522 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1523 lp = &newlabel;
1524 } else
1525 #endif
1526 lp = (struct disklabel *)data;
1527
1528 if ((error = raidlock(rs)) != 0)
1529 return (error);
1530
1531 rs->sc_flags |= RAIDF_LABELLING;
1532
1533 error = setdisklabel(rs->sc_dkdev.dk_label,
1534 lp, 0, rs->sc_dkdev.dk_cpulabel);
1535 if (error == 0) {
1536 if (cmd == DIOCWDINFO
1537 #ifdef __HAVE_OLD_DISKLABEL
1538 || cmd == ODIOCWDINFO
1539 #endif
1540 )
1541 error = writedisklabel(RAIDLABELDEV(dev),
1542 raidstrategy, rs->sc_dkdev.dk_label,
1543 rs->sc_dkdev.dk_cpulabel);
1544 }
1545 rs->sc_flags &= ~RAIDF_LABELLING;
1546
1547 raidunlock(rs);
1548
1549 if (error)
1550 return (error);
1551 break;
1552 }
1553
1554 case DIOCWLABEL:
1555 if (*(int *) data != 0)
1556 rs->sc_flags |= RAIDF_WLABEL;
1557 else
1558 rs->sc_flags &= ~RAIDF_WLABEL;
1559 break;
1560
1561 case DIOCGDEFLABEL:
1562 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1563 break;
1564
1565 #ifdef __HAVE_OLD_DISKLABEL
1566 case ODIOCGDEFLABEL:
1567 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1568 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1569 return ENOTTY;
1570 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1571 break;
1572 #endif
1573
1574 default:
1575 retcode = ENOTTY;
1576 }
1577 return (retcode);
1578
1579 }
1580
1581
1582 /* raidinit -- complete the rest of the initialization for the
1583 RAIDframe device. */
1584
1585
1586 static void
1587 raidinit(RF_Raid_t *raidPtr)
1588 {
1589 struct raid_softc *rs;
1590 int unit;
1591
1592 unit = raidPtr->raidid;
1593
1594 rs = &raid_softc[unit];
1595
1596 /* XXX should check return code first... */
1597 rs->sc_flags |= RAIDF_INITED;
1598
1599 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1600
1601 rs->sc_dkdev.dk_name = rs->sc_xname;
1602
1603 /* disk_attach actually creates space for the CPU disklabel, among
1604 * other things, so it's critical to call this *BEFORE* we try putzing
1605 * with disklabels. */
1606
1607 disk_attach(&rs->sc_dkdev);
1608
1609 /* XXX There may be a weird interaction here between this, and
1610 * protectedSectors, as used in RAIDframe. */
1611
1612 rs->sc_size = raidPtr->totalSectors;
1613
1614 }
1615 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1616 /* wake up the daemon & tell it to get us a spare table
1617 * XXX
1618 * the entries in the queues should be tagged with the raidPtr
1619 * so that in the extremely rare case that two recons happen at once,
1620 * we know for which device were requesting a spare table
1621 * XXX
1622 *
1623 * XXX This code is not currently used. GO
1624 */
1625 int
1626 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1627 {
1628 int retcode;
1629
1630 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1631 req->next = rf_sparet_wait_queue;
1632 rf_sparet_wait_queue = req;
1633 wakeup(&rf_sparet_wait_queue);
1634
1635 /* mpsleep unlocks the mutex */
1636 while (!rf_sparet_resp_queue) {
1637 tsleep(&rf_sparet_resp_queue, PRIBIO,
1638 "raidframe getsparetable", 0);
1639 }
1640 req = rf_sparet_resp_queue;
1641 rf_sparet_resp_queue = req->next;
1642 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1643
1644 retcode = req->fcol;
1645 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1646 * alloc'd */
1647 return (retcode);
1648 }
1649 #endif
1650
1651 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1652 * bp & passes it down.
1653 * any calls originating in the kernel must use non-blocking I/O
1654 * do some extra sanity checking to return "appropriate" error values for
1655 * certain conditions (to make some standard utilities work)
1656 *
1657 * Formerly known as: rf_DoAccessKernel
1658 */
1659 void
1660 raidstart(RF_Raid_t *raidPtr)
1661 {
1662 RF_SectorCount_t num_blocks, pb, sum;
1663 RF_RaidAddr_t raid_addr;
1664 struct partition *pp;
1665 daddr_t blocknum;
1666 int unit;
1667 struct raid_softc *rs;
1668 int do_async;
1669 struct buf *bp;
1670
1671 unit = raidPtr->raidid;
1672 rs = &raid_softc[unit];
1673
1674 /* quick check to see if anything has died recently */
1675 RF_LOCK_MUTEX(raidPtr->mutex);
1676 if (raidPtr->numNewFailures > 0) {
1677 RF_UNLOCK_MUTEX(raidPtr->mutex);
1678 rf_update_component_labels(raidPtr,
1679 RF_NORMAL_COMPONENT_UPDATE);
1680 RF_LOCK_MUTEX(raidPtr->mutex);
1681 raidPtr->numNewFailures--;
1682 }
1683
1684 /* Check to see if we're at the limit... */
1685 while (raidPtr->openings > 0) {
1686 RF_UNLOCK_MUTEX(raidPtr->mutex);
1687
1688 /* get the next item, if any, from the queue */
1689 if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
1690 /* nothing more to do */
1691 return;
1692 }
1693
1694 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1695 * partition.. Need to make it absolute to the underlying
1696 * device.. */
1697
1698 blocknum = bp->b_blkno;
1699 if (DISKPART(bp->b_dev) != RAW_PART) {
1700 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1701 blocknum += pp->p_offset;
1702 }
1703
1704 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1705 (int) blocknum));
1706
1707 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1708 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1709
1710 /* *THIS* is where we adjust what block we're going to...
1711 * but DO NOT TOUCH bp->b_blkno!!! */
1712 raid_addr = blocknum;
1713
1714 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1715 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1716 sum = raid_addr + num_blocks + pb;
1717 if (1 || rf_debugKernelAccess) {
1718 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1719 (int) raid_addr, (int) sum, (int) num_blocks,
1720 (int) pb, (int) bp->b_resid));
1721 }
1722 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1723 || (sum < num_blocks) || (sum < pb)) {
1724 bp->b_error = ENOSPC;
1725 bp->b_flags |= B_ERROR;
1726 bp->b_resid = bp->b_bcount;
1727 biodone(bp);
1728 RF_LOCK_MUTEX(raidPtr->mutex);
1729 continue;
1730 }
1731 /*
1732 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1733 */
1734
1735 if (bp->b_bcount & raidPtr->sectorMask) {
1736 bp->b_error = EINVAL;
1737 bp->b_flags |= B_ERROR;
1738 bp->b_resid = bp->b_bcount;
1739 biodone(bp);
1740 RF_LOCK_MUTEX(raidPtr->mutex);
1741 continue;
1742
1743 }
1744 db1_printf(("Calling DoAccess..\n"));
1745
1746
1747 RF_LOCK_MUTEX(raidPtr->mutex);
1748 raidPtr->openings--;
1749 RF_UNLOCK_MUTEX(raidPtr->mutex);
1750
1751 /*
1752 * Everything is async.
1753 */
1754 do_async = 1;
1755
1756 disk_busy(&rs->sc_dkdev);
1757
1758 /* XXX we're still at splbio() here... do we *really*
1759 need to be? */
1760
1761 /* don't ever condition on bp->b_flags & B_WRITE.
1762 * always condition on B_READ instead */
1763
1764 bp->b_error = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1765 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1766 do_async, raid_addr, num_blocks,
1767 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1768
1769 if (bp->b_error) {
1770 bp->b_flags |= B_ERROR;
1771 }
1772
1773 RF_LOCK_MUTEX(raidPtr->mutex);
1774 }
1775 RF_UNLOCK_MUTEX(raidPtr->mutex);
1776 }
1777
1778
1779
1780
1781 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1782
1783 int
1784 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
1785 {
1786 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1787 struct buf *bp;
1788 struct raidbuf *raidbp = NULL;
1789
1790 req->queue = queue;
1791
1792 #if DIAGNOSTIC
1793 if (queue->raidPtr->raidid >= numraid) {
1794 printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
1795 numraid);
1796 panic("Invalid Unit number in rf_DispatchKernelIO");
1797 }
1798 #endif
1799
1800 bp = req->bp;
1801 #if 1
1802 /* XXX when there is a physical disk failure, someone is passing us a
1803 * buffer that contains old stuff!! Attempt to deal with this problem
1804 * without taking a performance hit... (not sure where the real bug
1805 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1806
1807 if (bp->b_flags & B_ERROR) {
1808 bp->b_flags &= ~B_ERROR;
1809 }
1810 if (bp->b_error != 0) {
1811 bp->b_error = 0;
1812 }
1813 #endif
1814 raidbp = pool_get(&raidframe_cbufpool, PR_NOWAIT);
1815 if (raidbp == NULL) {
1816 bp->b_flags |= B_ERROR;
1817 bp->b_error = ENOMEM;
1818 return (ENOMEM);
1819 }
1820 BUF_INIT(&raidbp->rf_buf);
1821
1822 /*
1823 * context for raidiodone
1824 */
1825 raidbp->rf_obp = bp;
1826 raidbp->req = req;
1827
1828 switch (req->type) {
1829 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1830 /* XXX need to do something extra here.. */
1831 /* I'm leaving this in, as I've never actually seen it used,
1832 * and I'd like folks to report it... GO */
1833 printf(("WAKEUP CALLED\n"));
1834 queue->numOutstanding++;
1835
1836 /* XXX need to glue the original buffer into this?? */
1837
1838 KernelWakeupFunc(&raidbp->rf_buf);
1839 break;
1840
1841 case RF_IO_TYPE_READ:
1842 case RF_IO_TYPE_WRITE:
1843
1844 if (req->tracerec) {
1845 RF_ETIMER_START(req->tracerec->timer);
1846 }
1847 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1848 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1849 req->sectorOffset, req->numSector,
1850 req->buf, KernelWakeupFunc, (void *) req,
1851 queue->raidPtr->logBytesPerSector, req->b_proc);
1852
1853 if (rf_debugKernelAccess) {
1854 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1855 (long) bp->b_blkno));
1856 }
1857 queue->numOutstanding++;
1858 queue->last_deq_sector = req->sectorOffset;
1859 /* acc wouldn't have been let in if there were any pending
1860 * reqs at any other priority */
1861 queue->curPriority = req->priority;
1862
1863 db1_printf(("Going for %c to unit %d col %d\n",
1864 req->type, queue->raidPtr->raidid,
1865 queue->col));
1866 db1_printf(("sector %d count %d (%d bytes) %d\n",
1867 (int) req->sectorOffset, (int) req->numSector,
1868 (int) (req->numSector <<
1869 queue->raidPtr->logBytesPerSector),
1870 (int) queue->raidPtr->logBytesPerSector));
1871 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1872 raidbp->rf_buf.b_vp->v_numoutput++;
1873 }
1874 VOP_STRATEGY(&raidbp->rf_buf);
1875
1876 break;
1877
1878 default:
1879 panic("bad req->type in rf_DispatchKernelIO");
1880 }
1881 db1_printf(("Exiting from DispatchKernelIO\n"));
1882
1883 return (0);
1884 }
1885 /* this is the callback function associated with a I/O invoked from
1886 kernel code.
1887 */
1888 static void
1889 KernelWakeupFunc(struct buf *vbp)
1890 {
1891 RF_DiskQueueData_t *req = NULL;
1892 RF_DiskQueue_t *queue;
1893 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1894 struct buf *bp;
1895 int s;
1896
1897 s = splbio();
1898 db1_printf(("recovering the request queue:\n"));
1899 req = raidbp->req;
1900
1901 bp = raidbp->rf_obp;
1902
1903 queue = (RF_DiskQueue_t *) req->queue;
1904
1905 if (raidbp->rf_buf.b_flags & B_ERROR) {
1906 bp->b_flags |= B_ERROR;
1907 bp->b_error = raidbp->rf_buf.b_error ?
1908 raidbp->rf_buf.b_error : EIO;
1909 }
1910
1911 /* XXX methinks this could be wrong... */
1912 #if 1
1913 bp->b_resid = raidbp->rf_buf.b_resid;
1914 #endif
1915
1916 if (req->tracerec) {
1917 RF_ETIMER_STOP(req->tracerec->timer);
1918 RF_ETIMER_EVAL(req->tracerec->timer);
1919 RF_LOCK_MUTEX(rf_tracing_mutex);
1920 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1921 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1922 req->tracerec->num_phys_ios++;
1923 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1924 }
1925 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1926
1927 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1928 * ballistic, and mark the component as hosed... */
1929
1930 if (bp->b_flags & B_ERROR) {
1931 /* Mark the disk as dead */
1932 /* but only mark it once... */
1933 if (queue->raidPtr->Disks[queue->col].status ==
1934 rf_ds_optimal) {
1935 printf("raid%d: IO Error. Marking %s as failed.\n",
1936 queue->raidPtr->raidid,
1937 queue->raidPtr->Disks[queue->col].devname);
1938 queue->raidPtr->Disks[queue->col].status =
1939 rf_ds_failed;
1940 queue->raidPtr->status = rf_rs_degraded;
1941 queue->raidPtr->numFailures++;
1942 queue->raidPtr->numNewFailures++;
1943 } else { /* Disk is already dead... */
1944 /* printf("Disk already marked as dead!\n"); */
1945 }
1946
1947 }
1948
1949 pool_put(&raidframe_cbufpool, raidbp);
1950
1951 /* Fill in the error value */
1952
1953 req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
1954
1955 simple_lock(&queue->raidPtr->iodone_lock);
1956
1957 /* Drop this one on the "finished" queue... */
1958 TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
1959
1960 /* Let the raidio thread know there is work to be done. */
1961 wakeup(&(queue->raidPtr->iodone));
1962
1963 simple_unlock(&queue->raidPtr->iodone_lock);
1964
1965 splx(s);
1966 }
1967
1968
1969
1970 /*
1971 * initialize a buf structure for doing an I/O in the kernel.
1972 */
1973 static void
1974 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
1975 RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
1976 void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
1977 struct proc *b_proc)
1978 {
1979 /* bp->b_flags = B_PHYS | rw_flag; */
1980 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1981 bp->b_bcount = numSect << logBytesPerSector;
1982 bp->b_bufsize = bp->b_bcount;
1983 bp->b_error = 0;
1984 bp->b_dev = dev;
1985 bp->b_data = buf;
1986 bp->b_blkno = startSect;
1987 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1988 if (bp->b_bcount == 0) {
1989 panic("bp->b_bcount is zero in InitBP!!");
1990 }
1991 bp->b_proc = b_proc;
1992 bp->b_iodone = cbFunc;
1993 bp->b_vp = b_vp;
1994
1995 }
1996
1997 static void
1998 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
1999 struct disklabel *lp)
2000 {
2001 memset(lp, 0, sizeof(*lp));
2002
2003 /* fabricate a label... */
2004 lp->d_secperunit = raidPtr->totalSectors;
2005 lp->d_secsize = raidPtr->bytesPerSector;
2006 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2007 lp->d_ntracks = 4 * raidPtr->numCol;
2008 lp->d_ncylinders = raidPtr->totalSectors /
2009 (lp->d_nsectors * lp->d_ntracks);
2010 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2011
2012 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2013 lp->d_type = DTYPE_RAID;
2014 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2015 lp->d_rpm = 3600;
2016 lp->d_interleave = 1;
2017 lp->d_flags = 0;
2018
2019 lp->d_partitions[RAW_PART].p_offset = 0;
2020 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2021 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2022 lp->d_npartitions = RAW_PART + 1;
2023
2024 lp->d_magic = DISKMAGIC;
2025 lp->d_magic2 = DISKMAGIC;
2026 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2027
2028 }
2029 /*
2030 * Read the disklabel from the raid device. If one is not present, fake one
2031 * up.
2032 */
2033 static void
2034 raidgetdisklabel(dev_t dev)
2035 {
2036 int unit = raidunit(dev);
2037 struct raid_softc *rs = &raid_softc[unit];
2038 const char *errstring;
2039 struct disklabel *lp = rs->sc_dkdev.dk_label;
2040 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2041 RF_Raid_t *raidPtr;
2042
2043 db1_printf(("Getting the disklabel...\n"));
2044
2045 memset(clp, 0, sizeof(*clp));
2046
2047 raidPtr = raidPtrs[unit];
2048
2049 raidgetdefaultlabel(raidPtr, rs, lp);
2050
2051 /*
2052 * Call the generic disklabel extraction routine.
2053 */
2054 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2055 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2056 if (errstring)
2057 raidmakedisklabel(rs);
2058 else {
2059 int i;
2060 struct partition *pp;
2061
2062 /*
2063 * Sanity check whether the found disklabel is valid.
2064 *
2065 * This is necessary since total size of the raid device
2066 * may vary when an interleave is changed even though exactly
2067 * same componets are used, and old disklabel may used
2068 * if that is found.
2069 */
2070 if (lp->d_secperunit != rs->sc_size)
2071 printf("raid%d: WARNING: %s: "
2072 "total sector size in disklabel (%d) != "
2073 "the size of raid (%ld)\n", unit, rs->sc_xname,
2074 lp->d_secperunit, (long) rs->sc_size);
2075 for (i = 0; i < lp->d_npartitions; i++) {
2076 pp = &lp->d_partitions[i];
2077 if (pp->p_offset + pp->p_size > rs->sc_size)
2078 printf("raid%d: WARNING: %s: end of partition `%c' "
2079 "exceeds the size of raid (%ld)\n",
2080 unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2081 }
2082 }
2083
2084 }
2085 /*
2086 * Take care of things one might want to take care of in the event
2087 * that a disklabel isn't present.
2088 */
2089 static void
2090 raidmakedisklabel(struct raid_softc *rs)
2091 {
2092 struct disklabel *lp = rs->sc_dkdev.dk_label;
2093 db1_printf(("Making a label..\n"));
2094
2095 /*
2096 * For historical reasons, if there's no disklabel present
2097 * the raw partition must be marked FS_BSDFFS.
2098 */
2099
2100 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2101
2102 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2103
2104 lp->d_checksum = dkcksum(lp);
2105 }
2106 /*
2107 * Lookup the provided name in the filesystem. If the file exists,
2108 * is a valid block device, and isn't being used by anyone else,
2109 * set *vpp to the file's vnode.
2110 * You'll find the original of this in ccd.c
2111 */
2112 int
2113 raidlookup(char *path, struct proc *p, struct vnode **vpp)
2114 {
2115 struct nameidata nd;
2116 struct vnode *vp;
2117 struct vattr va;
2118 int error;
2119
2120 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2121 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2122 return (error);
2123 }
2124 vp = nd.ni_vp;
2125 if (vp->v_usecount > 1) {
2126 VOP_UNLOCK(vp, 0);
2127 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2128 return (EBUSY);
2129 }
2130 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2131 VOP_UNLOCK(vp, 0);
2132 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2133 return (error);
2134 }
2135 /* XXX: eventually we should handle VREG, too. */
2136 if (va.va_type != VBLK) {
2137 VOP_UNLOCK(vp, 0);
2138 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2139 return (ENOTBLK);
2140 }
2141 VOP_UNLOCK(vp, 0);
2142 *vpp = vp;
2143 return (0);
2144 }
2145 /*
2146 * Wait interruptibly for an exclusive lock.
2147 *
2148 * XXX
2149 * Several drivers do this; it should be abstracted and made MP-safe.
2150 * (Hmm... where have we seen this warning before :-> GO )
2151 */
2152 static int
2153 raidlock(struct raid_softc *rs)
2154 {
2155 int error;
2156
2157 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2158 rs->sc_flags |= RAIDF_WANTED;
2159 if ((error =
2160 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2161 return (error);
2162 }
2163 rs->sc_flags |= RAIDF_LOCKED;
2164 return (0);
2165 }
2166 /*
2167 * Unlock and wake up any waiters.
2168 */
2169 static void
2170 raidunlock(struct raid_softc *rs)
2171 {
2172
2173 rs->sc_flags &= ~RAIDF_LOCKED;
2174 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2175 rs->sc_flags &= ~RAIDF_WANTED;
2176 wakeup(rs);
2177 }
2178 }
2179
2180
2181 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2182 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2183
2184 int
2185 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2186 {
2187 RF_ComponentLabel_t clabel;
2188 raidread_component_label(dev, b_vp, &clabel);
2189 clabel.mod_counter = mod_counter;
2190 clabel.clean = RF_RAID_CLEAN;
2191 raidwrite_component_label(dev, b_vp, &clabel);
2192 return(0);
2193 }
2194
2195
2196 int
2197 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2198 {
2199 RF_ComponentLabel_t clabel;
2200 raidread_component_label(dev, b_vp, &clabel);
2201 clabel.mod_counter = mod_counter;
2202 clabel.clean = RF_RAID_DIRTY;
2203 raidwrite_component_label(dev, b_vp, &clabel);
2204 return(0);
2205 }
2206
2207 /* ARGSUSED */
2208 int
2209 raidread_component_label(dev_t dev, struct vnode *b_vp,
2210 RF_ComponentLabel_t *clabel)
2211 {
2212 struct buf *bp;
2213 const struct bdevsw *bdev;
2214 int error;
2215
2216 /* XXX should probably ensure that we don't try to do this if
2217 someone has changed rf_protected_sectors. */
2218
2219 if (b_vp == NULL) {
2220 /* For whatever reason, this component is not valid.
2221 Don't try to read a component label from it. */
2222 return(EINVAL);
2223 }
2224
2225 /* get a block of the appropriate size... */
2226 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2227 bp->b_dev = dev;
2228
2229 /* get our ducks in a row for the read */
2230 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2231 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2232 bp->b_flags |= B_READ;
2233 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2234
2235 bdev = bdevsw_lookup(bp->b_dev);
2236 if (bdev == NULL)
2237 return (ENXIO);
2238 (*bdev->d_strategy)(bp);
2239
2240 error = biowait(bp);
2241
2242 if (!error) {
2243 memcpy(clabel, bp->b_data,
2244 sizeof(RF_ComponentLabel_t));
2245 }
2246
2247 brelse(bp);
2248 return(error);
2249 }
2250 /* ARGSUSED */
2251 int
2252 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2253 RF_ComponentLabel_t *clabel)
2254 {
2255 struct buf *bp;
2256 const struct bdevsw *bdev;
2257 int error;
2258
2259 /* get a block of the appropriate size... */
2260 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2261 bp->b_dev = dev;
2262
2263 /* get our ducks in a row for the write */
2264 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2265 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2266 bp->b_flags |= B_WRITE;
2267 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2268
2269 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2270
2271 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2272
2273 bdev = bdevsw_lookup(bp->b_dev);
2274 if (bdev == NULL)
2275 return (ENXIO);
2276 (*bdev->d_strategy)(bp);
2277 error = biowait(bp);
2278 brelse(bp);
2279 if (error) {
2280 #if 1
2281 printf("Failed to write RAID component info!\n");
2282 #endif
2283 }
2284
2285 return(error);
2286 }
2287
2288 void
2289 rf_markalldirty(RF_Raid_t *raidPtr)
2290 {
2291 RF_ComponentLabel_t clabel;
2292 int sparecol;
2293 int c;
2294 int j;
2295 int scol = -1;
2296
2297 raidPtr->mod_counter++;
2298 for (c = 0; c < raidPtr->numCol; c++) {
2299 /* we don't want to touch (at all) a disk that has
2300 failed */
2301 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2302 raidread_component_label(
2303 raidPtr->Disks[c].dev,
2304 raidPtr->raid_cinfo[c].ci_vp,
2305 &clabel);
2306 if (clabel.status == rf_ds_spared) {
2307 /* XXX do something special...
2308 but whatever you do, don't
2309 try to access it!! */
2310 } else {
2311 raidmarkdirty(
2312 raidPtr->Disks[c].dev,
2313 raidPtr->raid_cinfo[c].ci_vp,
2314 raidPtr->mod_counter);
2315 }
2316 }
2317 }
2318
2319 for( c = 0; c < raidPtr->numSpare ; c++) {
2320 sparecol = raidPtr->numCol + c;
2321 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2322 /*
2323
2324 we claim this disk is "optimal" if it's
2325 rf_ds_used_spare, as that means it should be
2326 directly substitutable for the disk it replaced.
2327 We note that too...
2328
2329 */
2330
2331 for(j=0;j<raidPtr->numCol;j++) {
2332 if (raidPtr->Disks[j].spareCol == sparecol) {
2333 scol = j;
2334 break;
2335 }
2336 }
2337
2338 raidread_component_label(
2339 raidPtr->Disks[sparecol].dev,
2340 raidPtr->raid_cinfo[sparecol].ci_vp,
2341 &clabel);
2342 /* make sure status is noted */
2343
2344 raid_init_component_label(raidPtr, &clabel);
2345
2346 clabel.row = 0;
2347 clabel.column = scol;
2348 /* Note: we *don't* change status from rf_ds_used_spare
2349 to rf_ds_optimal */
2350 /* clabel.status = rf_ds_optimal; */
2351
2352 raidmarkdirty(raidPtr->Disks[sparecol].dev,
2353 raidPtr->raid_cinfo[sparecol].ci_vp,
2354 raidPtr->mod_counter);
2355 }
2356 }
2357 }
2358
2359
2360 void
2361 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2362 {
2363 RF_ComponentLabel_t clabel;
2364 int sparecol;
2365 int c;
2366 int j;
2367 int scol;
2368
2369 scol = -1;
2370
2371 /* XXX should do extra checks to make sure things really are clean,
2372 rather than blindly setting the clean bit... */
2373
2374 raidPtr->mod_counter++;
2375
2376 for (c = 0; c < raidPtr->numCol; c++) {
2377 if (raidPtr->Disks[c].status == rf_ds_optimal) {
2378 raidread_component_label(
2379 raidPtr->Disks[c].dev,
2380 raidPtr->raid_cinfo[c].ci_vp,
2381 &clabel);
2382 /* make sure status is noted */
2383 clabel.status = rf_ds_optimal;
2384 /* bump the counter */
2385 clabel.mod_counter = raidPtr->mod_counter;
2386
2387 raidwrite_component_label(
2388 raidPtr->Disks[c].dev,
2389 raidPtr->raid_cinfo[c].ci_vp,
2390 &clabel);
2391 if (final == RF_FINAL_COMPONENT_UPDATE) {
2392 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2393 raidmarkclean(
2394 raidPtr->Disks[c].dev,
2395 raidPtr->raid_cinfo[c].ci_vp,
2396 raidPtr->mod_counter);
2397 }
2398 }
2399 }
2400 /* else we don't touch it.. */
2401 }
2402
2403 for( c = 0; c < raidPtr->numSpare ; c++) {
2404 sparecol = raidPtr->numCol + c;
2405 /* Need to ensure that the reconstruct actually completed! */
2406 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2407 /*
2408
2409 we claim this disk is "optimal" if it's
2410 rf_ds_used_spare, as that means it should be
2411 directly substitutable for the disk it replaced.
2412 We note that too...
2413
2414 */
2415
2416 for(j=0;j<raidPtr->numCol;j++) {
2417 if (raidPtr->Disks[j].spareCol == sparecol) {
2418 scol = j;
2419 break;
2420 }
2421 }
2422
2423 /* XXX shouldn't *really* need this... */
2424 raidread_component_label(
2425 raidPtr->Disks[sparecol].dev,
2426 raidPtr->raid_cinfo[sparecol].ci_vp,
2427 &clabel);
2428 /* make sure status is noted */
2429
2430 raid_init_component_label(raidPtr, &clabel);
2431
2432 clabel.mod_counter = raidPtr->mod_counter;
2433 clabel.column = scol;
2434 clabel.status = rf_ds_optimal;
2435
2436 raidwrite_component_label(
2437 raidPtr->Disks[sparecol].dev,
2438 raidPtr->raid_cinfo[sparecol].ci_vp,
2439 &clabel);
2440 if (final == RF_FINAL_COMPONENT_UPDATE) {
2441 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2442 raidmarkclean( raidPtr->Disks[sparecol].dev,
2443 raidPtr->raid_cinfo[sparecol].ci_vp,
2444 raidPtr->mod_counter);
2445 }
2446 }
2447 }
2448 }
2449 }
2450
2451 void
2452 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2453 {
2454 struct proc *p;
2455
2456 p = raidPtr->engine_thread;
2457
2458 if (vp != NULL) {
2459 if (auto_configured == 1) {
2460 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2461 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2462 vput(vp);
2463
2464 } else {
2465 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2466 }
2467 }
2468 }
2469
2470
2471 void
2472 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2473 {
2474 int r,c;
2475 struct vnode *vp;
2476 int acd;
2477
2478
2479 /* We take this opportunity to close the vnodes like we should.. */
2480
2481 for (c = 0; c < raidPtr->numCol; c++) {
2482 vp = raidPtr->raid_cinfo[c].ci_vp;
2483 acd = raidPtr->Disks[c].auto_configured;
2484 rf_close_component(raidPtr, vp, acd);
2485 raidPtr->raid_cinfo[c].ci_vp = NULL;
2486 raidPtr->Disks[c].auto_configured = 0;
2487 }
2488
2489 for (r = 0; r < raidPtr->numSpare; r++) {
2490 vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2491 acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2492 rf_close_component(raidPtr, vp, acd);
2493 raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2494 raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2495 }
2496 }
2497
2498
2499 void
2500 rf_ReconThread(struct rf_recon_req *req)
2501 {
2502 int s;
2503 RF_Raid_t *raidPtr;
2504
2505 s = splbio();
2506 raidPtr = (RF_Raid_t *) req->raidPtr;
2507 raidPtr->recon_in_progress = 1;
2508
2509 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2510 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2511
2512 RF_Free(req, sizeof(*req));
2513
2514 raidPtr->recon_in_progress = 0;
2515 splx(s);
2516
2517 /* That's all... */
2518 kthread_exit(0); /* does not return */
2519 }
2520
2521 void
2522 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2523 {
2524 int retcode;
2525 int s;
2526
2527 raidPtr->parity_rewrite_in_progress = 1;
2528 s = splbio();
2529 retcode = rf_RewriteParity(raidPtr);
2530 splx(s);
2531 if (retcode) {
2532 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2533 } else {
2534 /* set the clean bit! If we shutdown correctly,
2535 the clean bit on each component label will get
2536 set */
2537 raidPtr->parity_good = RF_RAID_CLEAN;
2538 }
2539 raidPtr->parity_rewrite_in_progress = 0;
2540
2541 /* Anyone waiting for us to stop? If so, inform them... */
2542 if (raidPtr->waitShutdown) {
2543 wakeup(&raidPtr->parity_rewrite_in_progress);
2544 }
2545
2546 /* That's all... */
2547 kthread_exit(0); /* does not return */
2548 }
2549
2550
2551 void
2552 rf_CopybackThread(RF_Raid_t *raidPtr)
2553 {
2554 int s;
2555
2556 raidPtr->copyback_in_progress = 1;
2557 s = splbio();
2558 rf_CopybackReconstructedData(raidPtr);
2559 splx(s);
2560 raidPtr->copyback_in_progress = 0;
2561
2562 /* That's all... */
2563 kthread_exit(0); /* does not return */
2564 }
2565
2566
2567 void
2568 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2569 {
2570 int s;
2571 RF_Raid_t *raidPtr;
2572
2573 s = splbio();
2574 raidPtr = req->raidPtr;
2575 raidPtr->recon_in_progress = 1;
2576 rf_ReconstructInPlace(raidPtr, req->col);
2577 RF_Free(req, sizeof(*req));
2578 raidPtr->recon_in_progress = 0;
2579 splx(s);
2580
2581 /* That's all... */
2582 kthread_exit(0); /* does not return */
2583 }
2584
2585 RF_AutoConfig_t *
2586 rf_find_raid_components()
2587 {
2588 struct vnode *vp;
2589 struct disklabel label;
2590 struct device *dv;
2591 dev_t dev;
2592 int bmajor;
2593 int error;
2594 int i;
2595 int good_one;
2596 RF_ComponentLabel_t *clabel;
2597 RF_AutoConfig_t *ac_list;
2598 RF_AutoConfig_t *ac;
2599
2600
2601 /* initialize the AutoConfig list */
2602 ac_list = NULL;
2603
2604 /* we begin by trolling through *all* the devices on the system */
2605
2606 for (dv = alldevs.tqh_first; dv != NULL;
2607 dv = dv->dv_list.tqe_next) {
2608
2609 /* we are only interested in disks... */
2610 if (dv->dv_class != DV_DISK)
2611 continue;
2612
2613 /* we don't care about floppies... */
2614 if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
2615 continue;
2616 }
2617
2618 /* we don't care about CD's... */
2619 if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
2620 continue;
2621 }
2622
2623 /* hdfd is the Atari/Hades floppy driver */
2624 if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
2625 continue;
2626 }
2627 /* fdisa is the Atari/Milan floppy driver */
2628 if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
2629 continue;
2630 }
2631
2632 /* need to find the device_name_to_block_device_major stuff */
2633 bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
2634
2635 /* get a vnode for the raw partition of this disk */
2636
2637 dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
2638 if (bdevvp(dev, &vp))
2639 panic("RAID can't alloc vnode");
2640
2641 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2642
2643 if (error) {
2644 /* "Who cares." Continue looking
2645 for something that exists*/
2646 vput(vp);
2647 continue;
2648 }
2649
2650 /* Ok, the disk exists. Go get the disklabel. */
2651 error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
2652 if (error) {
2653 /*
2654 * XXX can't happen - open() would
2655 * have errored out (or faked up one)
2656 */
2657 printf("can't get label for dev %s%c (%d)!?!?\n",
2658 dv->dv_xname, 'a' + RAW_PART, error);
2659 }
2660
2661 /* don't need this any more. We'll allocate it again
2662 a little later if we really do... */
2663 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2664 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2665 vput(vp);
2666
2667 for (i=0; i < label.d_npartitions; i++) {
2668 /* We only support partitions marked as RAID */
2669 if (label.d_partitions[i].p_fstype != FS_RAID)
2670 continue;
2671
2672 dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
2673 if (bdevvp(dev, &vp))
2674 panic("RAID can't alloc vnode");
2675
2676 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2677 if (error) {
2678 /* Whatever... */
2679 vput(vp);
2680 continue;
2681 }
2682
2683 good_one = 0;
2684
2685 clabel = (RF_ComponentLabel_t *)
2686 malloc(sizeof(RF_ComponentLabel_t),
2687 M_RAIDFRAME, M_NOWAIT);
2688 if (clabel == NULL) {
2689 /* XXX CLEANUP HERE */
2690 printf("RAID auto config: out of memory!\n");
2691 return(NULL); /* XXX probably should panic? */
2692 }
2693
2694 if (!raidread_component_label(dev, vp, clabel)) {
2695 /* Got the label. Does it look reasonable? */
2696 if (rf_reasonable_label(clabel) &&
2697 (clabel->partitionSize <=
2698 label.d_partitions[i].p_size)) {
2699 #if DEBUG
2700 printf("Component on: %s%c: %d\n",
2701 dv->dv_xname, 'a'+i,
2702 label.d_partitions[i].p_size);
2703 rf_print_component_label(clabel);
2704 #endif
2705 /* if it's reasonable, add it,
2706 else ignore it. */
2707 ac = (RF_AutoConfig_t *)
2708 malloc(sizeof(RF_AutoConfig_t),
2709 M_RAIDFRAME,
2710 M_NOWAIT);
2711 if (ac == NULL) {
2712 /* XXX should panic?? */
2713 return(NULL);
2714 }
2715
2716 sprintf(ac->devname, "%s%c",
2717 dv->dv_xname, 'a'+i);
2718 ac->dev = dev;
2719 ac->vp = vp;
2720 ac->clabel = clabel;
2721 ac->next = ac_list;
2722 ac_list = ac;
2723 good_one = 1;
2724 }
2725 }
2726 if (!good_one) {
2727 /* cleanup */
2728 free(clabel, M_RAIDFRAME);
2729 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2730 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2731 vput(vp);
2732 }
2733 }
2734 }
2735 return(ac_list);
2736 }
2737
2738 static int
2739 rf_reasonable_label(RF_ComponentLabel_t *clabel)
2740 {
2741
2742 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2743 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2744 ((clabel->clean == RF_RAID_CLEAN) ||
2745 (clabel->clean == RF_RAID_DIRTY)) &&
2746 clabel->row >=0 &&
2747 clabel->column >= 0 &&
2748 clabel->num_rows > 0 &&
2749 clabel->num_columns > 0 &&
2750 clabel->row < clabel->num_rows &&
2751 clabel->column < clabel->num_columns &&
2752 clabel->blockSize > 0 &&
2753 clabel->numBlocks > 0) {
2754 /* label looks reasonable enough... */
2755 return(1);
2756 }
2757 return(0);
2758 }
2759
2760
2761 #if DEBUG
2762 void
2763 rf_print_component_label(RF_ComponentLabel_t *clabel)
2764 {
2765 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2766 clabel->row, clabel->column,
2767 clabel->num_rows, clabel->num_columns);
2768 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2769 clabel->version, clabel->serial_number,
2770 clabel->mod_counter);
2771 printf(" Clean: %s Status: %d\n",
2772 clabel->clean ? "Yes" : "No", clabel->status );
2773 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2774 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2775 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2776 (char) clabel->parityConfig, clabel->blockSize,
2777 clabel->numBlocks);
2778 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2779 printf(" Contains root partition: %s\n",
2780 clabel->root_partition ? "Yes" : "No" );
2781 printf(" Last configured as: raid%d\n", clabel->last_unit );
2782 #if 0
2783 printf(" Config order: %d\n", clabel->config_order);
2784 #endif
2785
2786 }
2787 #endif
2788
2789 RF_ConfigSet_t *
2790 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
2791 {
2792 RF_AutoConfig_t *ac;
2793 RF_ConfigSet_t *config_sets;
2794 RF_ConfigSet_t *cset;
2795 RF_AutoConfig_t *ac_next;
2796
2797
2798 config_sets = NULL;
2799
2800 /* Go through the AutoConfig list, and figure out which components
2801 belong to what sets. */
2802 ac = ac_list;
2803 while(ac!=NULL) {
2804 /* we're going to putz with ac->next, so save it here
2805 for use at the end of the loop */
2806 ac_next = ac->next;
2807
2808 if (config_sets == NULL) {
2809 /* will need at least this one... */
2810 config_sets = (RF_ConfigSet_t *)
2811 malloc(sizeof(RF_ConfigSet_t),
2812 M_RAIDFRAME, M_NOWAIT);
2813 if (config_sets == NULL) {
2814 panic("rf_create_auto_sets: No memory!");
2815 }
2816 /* this one is easy :) */
2817 config_sets->ac = ac;
2818 config_sets->next = NULL;
2819 config_sets->rootable = 0;
2820 ac->next = NULL;
2821 } else {
2822 /* which set does this component fit into? */
2823 cset = config_sets;
2824 while(cset!=NULL) {
2825 if (rf_does_it_fit(cset, ac)) {
2826 /* looks like it matches... */
2827 ac->next = cset->ac;
2828 cset->ac = ac;
2829 break;
2830 }
2831 cset = cset->next;
2832 }
2833 if (cset==NULL) {
2834 /* didn't find a match above... new set..*/
2835 cset = (RF_ConfigSet_t *)
2836 malloc(sizeof(RF_ConfigSet_t),
2837 M_RAIDFRAME, M_NOWAIT);
2838 if (cset == NULL) {
2839 panic("rf_create_auto_sets: No memory!");
2840 }
2841 cset->ac = ac;
2842 ac->next = NULL;
2843 cset->next = config_sets;
2844 cset->rootable = 0;
2845 config_sets = cset;
2846 }
2847 }
2848 ac = ac_next;
2849 }
2850
2851
2852 return(config_sets);
2853 }
2854
2855 static int
2856 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
2857 {
2858 RF_ComponentLabel_t *clabel1, *clabel2;
2859
2860 /* If this one matches the *first* one in the set, that's good
2861 enough, since the other members of the set would have been
2862 through here too... */
2863 /* note that we are not checking partitionSize here..
2864
2865 Note that we are also not checking the mod_counters here.
2866 If everything else matches execpt the mod_counter, that's
2867 good enough for this test. We will deal with the mod_counters
2868 a little later in the autoconfiguration process.
2869
2870 (clabel1->mod_counter == clabel2->mod_counter) &&
2871
2872 The reason we don't check for this is that failed disks
2873 will have lower modification counts. If those disks are
2874 not added to the set they used to belong to, then they will
2875 form their own set, which may result in 2 different sets,
2876 for example, competing to be configured at raid0, and
2877 perhaps competing to be the root filesystem set. If the
2878 wrong ones get configured, or both attempt to become /,
2879 weird behaviour and or serious lossage will occur. Thus we
2880 need to bring them into the fold here, and kick them out at
2881 a later point.
2882
2883 */
2884
2885 clabel1 = cset->ac->clabel;
2886 clabel2 = ac->clabel;
2887 if ((clabel1->version == clabel2->version) &&
2888 (clabel1->serial_number == clabel2->serial_number) &&
2889 (clabel1->num_rows == clabel2->num_rows) &&
2890 (clabel1->num_columns == clabel2->num_columns) &&
2891 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2892 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2893 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2894 (clabel1->parityConfig == clabel2->parityConfig) &&
2895 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2896 (clabel1->blockSize == clabel2->blockSize) &&
2897 (clabel1->numBlocks == clabel2->numBlocks) &&
2898 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2899 (clabel1->root_partition == clabel2->root_partition) &&
2900 (clabel1->last_unit == clabel2->last_unit) &&
2901 (clabel1->config_order == clabel2->config_order)) {
2902 /* if it get's here, it almost *has* to be a match */
2903 } else {
2904 /* it's not consistent with somebody in the set..
2905 punt */
2906 return(0);
2907 }
2908 /* all was fine.. it must fit... */
2909 return(1);
2910 }
2911
2912 int
2913 rf_have_enough_components(RF_ConfigSet_t *cset)
2914 {
2915 RF_AutoConfig_t *ac;
2916 RF_AutoConfig_t *auto_config;
2917 RF_ComponentLabel_t *clabel;
2918 int c;
2919 int num_cols;
2920 int num_missing;
2921 int mod_counter;
2922 int mod_counter_found;
2923 int even_pair_failed;
2924 char parity_type;
2925
2926
2927 /* check to see that we have enough 'live' components
2928 of this set. If so, we can configure it if necessary */
2929
2930 num_cols = cset->ac->clabel->num_columns;
2931 parity_type = cset->ac->clabel->parityConfig;
2932
2933 /* XXX Check for duplicate components!?!?!? */
2934
2935 /* Determine what the mod_counter is supposed to be for this set. */
2936
2937 mod_counter_found = 0;
2938 mod_counter = 0;
2939 ac = cset->ac;
2940 while(ac!=NULL) {
2941 if (mod_counter_found==0) {
2942 mod_counter = ac->clabel->mod_counter;
2943 mod_counter_found = 1;
2944 } else {
2945 if (ac->clabel->mod_counter > mod_counter) {
2946 mod_counter = ac->clabel->mod_counter;
2947 }
2948 }
2949 ac = ac->next;
2950 }
2951
2952 num_missing = 0;
2953 auto_config = cset->ac;
2954
2955 even_pair_failed = 0;
2956 for(c=0; c<num_cols; c++) {
2957 ac = auto_config;
2958 while(ac!=NULL) {
2959 if ((ac->clabel->column == c) &&
2960 (ac->clabel->mod_counter == mod_counter)) {
2961 /* it's this one... */
2962 #if DEBUG
2963 printf("Found: %s at %d\n",
2964 ac->devname,c);
2965 #endif
2966 break;
2967 }
2968 ac=ac->next;
2969 }
2970 if (ac==NULL) {
2971 /* Didn't find one here! */
2972 /* special case for RAID 1, especially
2973 where there are more than 2
2974 components (where RAIDframe treats
2975 things a little differently :( ) */
2976 if (parity_type == '1') {
2977 if (c%2 == 0) { /* even component */
2978 even_pair_failed = 1;
2979 } else { /* odd component. If
2980 we're failed, and
2981 so is the even
2982 component, it's
2983 "Good Night, Charlie" */
2984 if (even_pair_failed == 1) {
2985 return(0);
2986 }
2987 }
2988 } else {
2989 /* normal accounting */
2990 num_missing++;
2991 }
2992 }
2993 if ((parity_type == '1') && (c%2 == 1)) {
2994 /* Just did an even component, and we didn't
2995 bail.. reset the even_pair_failed flag,
2996 and go on to the next component.... */
2997 even_pair_failed = 0;
2998 }
2999 }
3000
3001 clabel = cset->ac->clabel;
3002
3003 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3004 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3005 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3006 /* XXX this needs to be made *much* more general */
3007 /* Too many failures */
3008 return(0);
3009 }
3010 /* otherwise, all is well, and we've got enough to take a kick
3011 at autoconfiguring this set */
3012 return(1);
3013 }
3014
3015 void
3016 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3017 RF_Raid_t *raidPtr)
3018 {
3019 RF_ComponentLabel_t *clabel;
3020 int i;
3021
3022 clabel = ac->clabel;
3023
3024 /* 1. Fill in the common stuff */
3025 config->numRow = clabel->num_rows = 1;
3026 config->numCol = clabel->num_columns;
3027 config->numSpare = 0; /* XXX should this be set here? */
3028 config->sectPerSU = clabel->sectPerSU;
3029 config->SUsPerPU = clabel->SUsPerPU;
3030 config->SUsPerRU = clabel->SUsPerRU;
3031 config->parityConfig = clabel->parityConfig;
3032 /* XXX... */
3033 strcpy(config->diskQueueType,"fifo");
3034 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3035 config->layoutSpecificSize = 0; /* XXX ?? */
3036
3037 while(ac!=NULL) {
3038 /* row/col values will be in range due to the checks
3039 in reasonable_label() */
3040 strcpy(config->devnames[0][ac->clabel->column],
3041 ac->devname);
3042 ac = ac->next;
3043 }
3044
3045 for(i=0;i<RF_MAXDBGV;i++) {
3046 config->debugVars[i][0] = 0;
3047 }
3048 }
3049
3050 int
3051 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3052 {
3053 RF_ComponentLabel_t clabel;
3054 struct vnode *vp;
3055 dev_t dev;
3056 int column;
3057 int sparecol;
3058
3059 raidPtr->autoconfigure = new_value;
3060
3061 for(column=0; column<raidPtr->numCol; column++) {
3062 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3063 dev = raidPtr->Disks[column].dev;
3064 vp = raidPtr->raid_cinfo[column].ci_vp;
3065 raidread_component_label(dev, vp, &clabel);
3066 clabel.autoconfigure = new_value;
3067 raidwrite_component_label(dev, vp, &clabel);
3068 }
3069 }
3070 for(column = 0; column < raidPtr->numSpare ; column++) {
3071 sparecol = raidPtr->numCol + column;
3072 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3073 dev = raidPtr->Disks[sparecol].dev;
3074 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3075 raidread_component_label(dev, vp, &clabel);
3076 clabel.autoconfigure = new_value;
3077 raidwrite_component_label(dev, vp, &clabel);
3078 }
3079 }
3080 return(new_value);
3081 }
3082
3083 int
3084 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3085 {
3086 RF_ComponentLabel_t clabel;
3087 struct vnode *vp;
3088 dev_t dev;
3089 int column;
3090 int sparecol;
3091
3092 raidPtr->root_partition = new_value;
3093 for(column=0; column<raidPtr->numCol; column++) {
3094 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3095 dev = raidPtr->Disks[column].dev;
3096 vp = raidPtr->raid_cinfo[column].ci_vp;
3097 raidread_component_label(dev, vp, &clabel);
3098 clabel.root_partition = new_value;
3099 raidwrite_component_label(dev, vp, &clabel);
3100 }
3101 }
3102 for(column = 0; column < raidPtr->numSpare ; column++) {
3103 sparecol = raidPtr->numCol + column;
3104 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3105 dev = raidPtr->Disks[sparecol].dev;
3106 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3107 raidread_component_label(dev, vp, &clabel);
3108 clabel.root_partition = new_value;
3109 raidwrite_component_label(dev, vp, &clabel);
3110 }
3111 }
3112 return(new_value);
3113 }
3114
3115 void
3116 rf_release_all_vps(RF_ConfigSet_t *cset)
3117 {
3118 RF_AutoConfig_t *ac;
3119
3120 ac = cset->ac;
3121 while(ac!=NULL) {
3122 /* Close the vp, and give it back */
3123 if (ac->vp) {
3124 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3125 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3126 vput(ac->vp);
3127 ac->vp = NULL;
3128 }
3129 ac = ac->next;
3130 }
3131 }
3132
3133
3134 void
3135 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3136 {
3137 RF_AutoConfig_t *ac;
3138 RF_AutoConfig_t *next_ac;
3139
3140 ac = cset->ac;
3141 while(ac!=NULL) {
3142 next_ac = ac->next;
3143 /* nuke the label */
3144 free(ac->clabel, M_RAIDFRAME);
3145 /* cleanup the config structure */
3146 free(ac, M_RAIDFRAME);
3147 /* "next.." */
3148 ac = next_ac;
3149 }
3150 /* and, finally, nuke the config set */
3151 free(cset, M_RAIDFRAME);
3152 }
3153
3154
3155 void
3156 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3157 {
3158 /* current version number */
3159 clabel->version = RF_COMPONENT_LABEL_VERSION;
3160 clabel->serial_number = raidPtr->serial_number;
3161 clabel->mod_counter = raidPtr->mod_counter;
3162 clabel->num_rows = 1;
3163 clabel->num_columns = raidPtr->numCol;
3164 clabel->clean = RF_RAID_DIRTY; /* not clean */
3165 clabel->status = rf_ds_optimal; /* "It's good!" */
3166
3167 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3168 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3169 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3170
3171 clabel->blockSize = raidPtr->bytesPerSector;
3172 clabel->numBlocks = raidPtr->sectorsPerDisk;
3173
3174 /* XXX not portable */
3175 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3176 clabel->maxOutstanding = raidPtr->maxOutstanding;
3177 clabel->autoconfigure = raidPtr->autoconfigure;
3178 clabel->root_partition = raidPtr->root_partition;
3179 clabel->last_unit = raidPtr->raidid;
3180 clabel->config_order = raidPtr->config_order;
3181 }
3182
3183 int
3184 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3185 {
3186 RF_Raid_t *raidPtr;
3187 RF_Config_t *config;
3188 int raidID;
3189 int retcode;
3190
3191 #if DEBUG
3192 printf("RAID autoconfigure\n");
3193 #endif
3194
3195 retcode = 0;
3196 *unit = -1;
3197
3198 /* 1. Create a config structure */
3199
3200 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3201 M_RAIDFRAME,
3202 M_NOWAIT);
3203 if (config==NULL) {
3204 printf("Out of mem!?!?\n");
3205 /* XXX do something more intelligent here. */
3206 return(1);
3207 }
3208
3209 memset(config, 0, sizeof(RF_Config_t));
3210
3211 /*
3212 2. Figure out what RAID ID this one is supposed to live at
3213 See if we can get the same RAID dev that it was configured
3214 on last time..
3215 */
3216
3217 raidID = cset->ac->clabel->last_unit;
3218 if ((raidID < 0) || (raidID >= numraid)) {
3219 /* let's not wander off into lala land. */
3220 raidID = numraid - 1;
3221 }
3222 if (raidPtrs[raidID]->valid != 0) {
3223
3224 /*
3225 Nope... Go looking for an alternative...
3226 Start high so we don't immediately use raid0 if that's
3227 not taken.
3228 */
3229
3230 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3231 if (raidPtrs[raidID]->valid == 0) {
3232 /* can use this one! */
3233 break;
3234 }
3235 }
3236 }
3237
3238 if (raidID < 0) {
3239 /* punt... */
3240 printf("Unable to auto configure this set!\n");
3241 printf("(Out of RAID devs!)\n");
3242 return(1);
3243 }
3244
3245 #if DEBUG
3246 printf("Configuring raid%d:\n",raidID);
3247 #endif
3248
3249 raidPtr = raidPtrs[raidID];
3250
3251 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3252 raidPtr->raidid = raidID;
3253 raidPtr->openings = RAIDOUTSTANDING;
3254
3255 /* 3. Build the configuration structure */
3256 rf_create_configuration(cset->ac, config, raidPtr);
3257
3258 /* 4. Do the configuration */
3259 retcode = rf_Configure(raidPtr, config, cset->ac);
3260
3261 if (retcode == 0) {
3262
3263 raidinit(raidPtrs[raidID]);
3264
3265 rf_markalldirty(raidPtrs[raidID]);
3266 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3267 if (cset->ac->clabel->root_partition==1) {
3268 /* everything configured just fine. Make a note
3269 that this set is eligible to be root. */
3270 cset->rootable = 1;
3271 /* XXX do this here? */
3272 raidPtrs[raidID]->root_partition = 1;
3273 }
3274 }
3275
3276 /* 5. Cleanup */
3277 free(config, M_RAIDFRAME);
3278
3279 *unit = raidID;
3280 return(retcode);
3281 }
3282
3283 void
3284 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3285 {
3286 struct buf *bp;
3287
3288 bp = (struct buf *)desc->bp;
3289 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3290 (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3291 }
3292