rf_netbsdkintf.c revision 1.104.2.13 1 /* $NetBSD: rf_netbsdkintf.c,v 1.104.2.13 2002/08/01 02:45:36 nathanw Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115
116 #include <sys/cdefs.h>
117 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.104.2.13 2002/08/01 02:45:36 nathanw Exp $");
118
119 #include <sys/param.h>
120 #include <sys/errno.h>
121 #include <sys/pool.h>
122 #include <sys/proc.h>
123 #include <sys/queue.h>
124 #include <sys/disk.h>
125 #include <sys/device.h>
126 #include <sys/stat.h>
127 #include <sys/ioctl.h>
128 #include <sys/fcntl.h>
129 #include <sys/systm.h>
130 #include <sys/namei.h>
131 #include <sys/vnode.h>
132 #include <sys/disklabel.h>
133 #include <sys/conf.h>
134 #include <sys/lock.h>
135 #include <sys/buf.h>
136 #include <sys/user.h>
137 #include <sys/reboot.h>
138
139 #include <dev/raidframe/raidframevar.h>
140 #include <dev/raidframe/raidframeio.h>
141 #include "raid.h"
142 #include "opt_raid_autoconfig.h"
143 #include "rf_raid.h"
144 #include "rf_copyback.h"
145 #include "rf_dag.h"
146 #include "rf_dagflags.h"
147 #include "rf_desc.h"
148 #include "rf_diskqueue.h"
149 #include "rf_acctrace.h"
150 #include "rf_etimer.h"
151 #include "rf_general.h"
152 #include "rf_debugMem.h"
153 #include "rf_kintf.h"
154 #include "rf_options.h"
155 #include "rf_driver.h"
156 #include "rf_parityscan.h"
157 #include "rf_debugprint.h"
158 #include "rf_threadstuff.h"
159
160 int rf_kdebug_level = 0;
161
162 #ifdef DEBUG
163 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
164 #else /* DEBUG */
165 #define db1_printf(a) { }
166 #endif /* DEBUG */
167
168 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
169
170 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
171
172 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
173 * spare table */
174 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
175 * installation process */
176
177 /* prototypes */
178 static void KernelWakeupFunc(struct buf * bp);
179 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
180 dev_t dev, RF_SectorNum_t startSect,
181 RF_SectorCount_t numSect, caddr_t buf,
182 void (*cbFunc) (struct buf *), void *cbArg,
183 int logBytesPerSector, struct proc * b_proc);
184 static void raidinit(RF_Raid_t *);
185
186 void raidattach(int);
187 int raidsize(dev_t);
188 int raidopen(dev_t, int, int, struct proc *);
189 int raidclose(dev_t, int, int, struct proc *);
190 int raidioctl(dev_t, u_long, caddr_t, int, struct proc *);
191 int raidwrite(dev_t, struct uio *, int);
192 int raidread(dev_t, struct uio *, int);
193 void raidstrategy(struct buf *);
194 int raiddump(dev_t, daddr_t, caddr_t, size_t);
195
196 /*
197 * Pilfered from ccd.c
198 */
199
200 struct raidbuf {
201 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
202 struct buf *rf_obp; /* ptr. to original I/O buf */
203 int rf_flags; /* misc. flags */
204 RF_DiskQueueData_t *req;/* the request that this was part of.. */
205 };
206
207 /* component buffer pool */
208 struct pool raidframe_cbufpool;
209
210 #define RAIDGETBUF(rs) pool_get(&raidframe_cbufpool, PR_NOWAIT)
211 #define RAIDPUTBUF(rs, cbp) pool_put(&raidframe_cbufpool, cbp)
212
213 /* XXX Not sure if the following should be replacing the raidPtrs above,
214 or if it should be used in conjunction with that...
215 */
216
217 struct raid_softc {
218 int sc_flags; /* flags */
219 int sc_cflags; /* configuration flags */
220 size_t sc_size; /* size of the raid device */
221 char sc_xname[20]; /* XXX external name */
222 struct disk sc_dkdev; /* generic disk device info */
223 struct bufq_state buf_queue; /* used for the device queue */
224 };
225 /* sc_flags */
226 #define RAIDF_INITED 0x01 /* unit has been initialized */
227 #define RAIDF_WLABEL 0x02 /* label area is writable */
228 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
229 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
230 #define RAIDF_LOCKED 0x80 /* unit is locked */
231
232 #define raidunit(x) DISKUNIT(x)
233 int numraid = 0;
234
235 /*
236 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
237 * Be aware that large numbers can allow the driver to consume a lot of
238 * kernel memory, especially on writes, and in degraded mode reads.
239 *
240 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
241 * a single 64K write will typically require 64K for the old data,
242 * 64K for the old parity, and 64K for the new parity, for a total
243 * of 192K (if the parity buffer is not re-used immediately).
244 * Even it if is used immediately, that's still 128K, which when multiplied
245 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
246 *
247 * Now in degraded mode, for example, a 64K read on the above setup may
248 * require data reconstruction, which will require *all* of the 4 remaining
249 * disks to participate -- 4 * 32K/disk == 128K again.
250 */
251
252 #ifndef RAIDOUTSTANDING
253 #define RAIDOUTSTANDING 6
254 #endif
255
256 #define RAIDLABELDEV(dev) \
257 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
258
259 /* declared here, and made public, for the benefit of KVM stuff.. */
260 struct raid_softc *raid_softc;
261
262 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
263 struct disklabel *);
264 static void raidgetdisklabel(dev_t);
265 static void raidmakedisklabel(struct raid_softc *);
266
267 static int raidlock(struct raid_softc *);
268 static void raidunlock(struct raid_softc *);
269
270 static void rf_markalldirty(RF_Raid_t *);
271 void rf_mountroot_hook(struct device *);
272
273 struct device *raidrootdev;
274
275 void rf_ReconThread(struct rf_recon_req *);
276 /* XXX what I want is: */
277 /*void rf_ReconThread(RF_Raid_t *raidPtr); */
278 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
279 void rf_CopybackThread(RF_Raid_t *raidPtr);
280 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
281 void rf_buildroothack(void *);
282
283 RF_AutoConfig_t *rf_find_raid_components(void);
284 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
285 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
286 static int rf_reasonable_label(RF_ComponentLabel_t *);
287 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
288 int rf_set_autoconfig(RF_Raid_t *, int);
289 int rf_set_rootpartition(RF_Raid_t *, int);
290 void rf_release_all_vps(RF_ConfigSet_t *);
291 void rf_cleanup_config_set(RF_ConfigSet_t *);
292 int rf_have_enough_components(RF_ConfigSet_t *);
293 int rf_auto_config_set(RF_ConfigSet_t *, int *);
294
295 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
296 allow autoconfig to take place.
297 Note that this is overridden by having
298 RAID_AUTOCONFIG as an option in the
299 kernel config file. */
300
301 void
302 raidattach(num)
303 int num;
304 {
305 int raidID;
306 int i, rc;
307 RF_AutoConfig_t *ac_list; /* autoconfig list */
308 RF_ConfigSet_t *config_sets;
309
310 #ifdef DEBUG
311 printf("raidattach: Asked for %d units\n", num);
312 #endif
313
314 if (num <= 0) {
315 #ifdef DIAGNOSTIC
316 panic("raidattach: count <= 0");
317 #endif
318 return;
319 }
320 /* This is where all the initialization stuff gets done. */
321
322 numraid = num;
323
324 /* Make some space for requested number of units... */
325
326 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
327 if (raidPtrs == NULL) {
328 panic("raidPtrs is NULL!!\n");
329 }
330
331 /* Initialize the component buffer pool. */
332 pool_init(&raidframe_cbufpool, sizeof(struct raidbuf), 0,
333 0, 0, "raidpl", NULL);
334
335 rc = rf_mutex_init(&rf_sparet_wait_mutex);
336 if (rc) {
337 RF_PANIC();
338 }
339
340 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
341
342 for (i = 0; i < num; i++)
343 raidPtrs[i] = NULL;
344 rc = rf_BootRaidframe();
345 if (rc == 0)
346 printf("Kernelized RAIDframe activated\n");
347 else
348 panic("Serious error booting RAID!!\n");
349
350 /* put together some datastructures like the CCD device does.. This
351 * lets us lock the device and what-not when it gets opened. */
352
353 raid_softc = (struct raid_softc *)
354 malloc(num * sizeof(struct raid_softc),
355 M_RAIDFRAME, M_NOWAIT);
356 if (raid_softc == NULL) {
357 printf("WARNING: no memory for RAIDframe driver\n");
358 return;
359 }
360
361 memset(raid_softc, 0, num * sizeof(struct raid_softc));
362
363 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
364 M_RAIDFRAME, M_NOWAIT);
365 if (raidrootdev == NULL) {
366 panic("No memory for RAIDframe driver!!?!?!\n");
367 }
368
369 for (raidID = 0; raidID < num; raidID++) {
370 bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
371
372 raidrootdev[raidID].dv_class = DV_DISK;
373 raidrootdev[raidID].dv_cfdata = NULL;
374 raidrootdev[raidID].dv_unit = raidID;
375 raidrootdev[raidID].dv_parent = NULL;
376 raidrootdev[raidID].dv_flags = 0;
377 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
378
379 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
380 (RF_Raid_t *));
381 if (raidPtrs[raidID] == NULL) {
382 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
383 numraid = raidID;
384 return;
385 }
386 }
387
388 #ifdef RAID_AUTOCONFIG
389 raidautoconfig = 1;
390 #endif
391
392 if (raidautoconfig) {
393 /* 1. locate all RAID components on the system */
394
395 #if DEBUG
396 printf("Searching for raid components...\n");
397 #endif
398 ac_list = rf_find_raid_components();
399
400 /* 2. sort them into their respective sets */
401
402 config_sets = rf_create_auto_sets(ac_list);
403
404 /* 3. evaluate each set and configure the valid ones
405 This gets done in rf_buildroothack() */
406
407 /* schedule the creation of the thread to do the
408 "/ on RAID" stuff */
409
410 kthread_create(rf_buildroothack,config_sets);
411
412 #if 0
413 mountroothook_establish(rf_mountroot_hook, &raidrootdev[0]);
414 #endif
415 }
416
417 }
418
419 void
420 rf_buildroothack(arg)
421 void *arg;
422 {
423 RF_ConfigSet_t *config_sets = arg;
424 RF_ConfigSet_t *cset;
425 RF_ConfigSet_t *next_cset;
426 int retcode;
427 int raidID;
428 int rootID;
429 int num_root;
430
431 rootID = 0;
432 num_root = 0;
433 cset = config_sets;
434 while(cset != NULL ) {
435 next_cset = cset->next;
436 if (rf_have_enough_components(cset) &&
437 cset->ac->clabel->autoconfigure==1) {
438 retcode = rf_auto_config_set(cset,&raidID);
439 if (!retcode) {
440 if (cset->rootable) {
441 rootID = raidID;
442 num_root++;
443 }
444 } else {
445 /* The autoconfig didn't work :( */
446 #if DEBUG
447 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
448 #endif
449 rf_release_all_vps(cset);
450 }
451 } else {
452 /* we're not autoconfiguring this set...
453 release the associated resources */
454 rf_release_all_vps(cset);
455 }
456 /* cleanup */
457 rf_cleanup_config_set(cset);
458 cset = next_cset;
459 }
460
461 /* we found something bootable... */
462
463 if (num_root == 1) {
464 booted_device = &raidrootdev[rootID];
465 } else if (num_root > 1) {
466 /* we can't guess.. require the user to answer... */
467 boothowto |= RB_ASKNAME;
468 }
469 }
470
471
472 int
473 raidsize(dev)
474 dev_t dev;
475 {
476 struct raid_softc *rs;
477 struct disklabel *lp;
478 int part, unit, omask, size;
479
480 unit = raidunit(dev);
481 if (unit >= numraid)
482 return (-1);
483 rs = &raid_softc[unit];
484
485 if ((rs->sc_flags & RAIDF_INITED) == 0)
486 return (-1);
487
488 part = DISKPART(dev);
489 omask = rs->sc_dkdev.dk_openmask & (1 << part);
490 lp = rs->sc_dkdev.dk_label;
491
492 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
493 return (-1);
494
495 if (lp->d_partitions[part].p_fstype != FS_SWAP)
496 size = -1;
497 else
498 size = lp->d_partitions[part].p_size *
499 (lp->d_secsize / DEV_BSIZE);
500
501 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
502 return (-1);
503
504 return (size);
505
506 }
507
508 int
509 raiddump(dev, blkno, va, size)
510 dev_t dev;
511 daddr_t blkno;
512 caddr_t va;
513 size_t size;
514 {
515 /* Not implemented. */
516 return ENXIO;
517 }
518 /* ARGSUSED */
519 int
520 raidopen(dev, flags, fmt, p)
521 dev_t dev;
522 int flags, fmt;
523 struct proc *p;
524 {
525 int unit = raidunit(dev);
526 struct raid_softc *rs;
527 struct disklabel *lp;
528 int part, pmask;
529 int error = 0;
530
531 if (unit >= numraid)
532 return (ENXIO);
533 rs = &raid_softc[unit];
534
535 if ((error = raidlock(rs)) != 0)
536 return (error);
537 lp = rs->sc_dkdev.dk_label;
538
539 part = DISKPART(dev);
540 pmask = (1 << part);
541
542 db1_printf(("Opening raid device number: %d partition: %d\n",
543 unit, part));
544
545
546 if ((rs->sc_flags & RAIDF_INITED) &&
547 (rs->sc_dkdev.dk_openmask == 0))
548 raidgetdisklabel(dev);
549
550 /* make sure that this partition exists */
551
552 if (part != RAW_PART) {
553 db1_printf(("Not a raw partition..\n"));
554 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
555 ((part >= lp->d_npartitions) ||
556 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
557 error = ENXIO;
558 raidunlock(rs);
559 db1_printf(("Bailing out...\n"));
560 return (error);
561 }
562 }
563 /* Prevent this unit from being unconfigured while open. */
564 switch (fmt) {
565 case S_IFCHR:
566 rs->sc_dkdev.dk_copenmask |= pmask;
567 break;
568
569 case S_IFBLK:
570 rs->sc_dkdev.dk_bopenmask |= pmask;
571 break;
572 }
573
574 if ((rs->sc_dkdev.dk_openmask == 0) &&
575 ((rs->sc_flags & RAIDF_INITED) != 0)) {
576 /* First one... mark things as dirty... Note that we *MUST*
577 have done a configure before this. I DO NOT WANT TO BE
578 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
579 THAT THEY BELONG TOGETHER!!!!! */
580 /* XXX should check to see if we're only open for reading
581 here... If so, we needn't do this, but then need some
582 other way of keeping track of what's happened.. */
583
584 rf_markalldirty( raidPtrs[unit] );
585 }
586
587
588 rs->sc_dkdev.dk_openmask =
589 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
590
591 raidunlock(rs);
592
593 return (error);
594
595
596 }
597 /* ARGSUSED */
598 int
599 raidclose(dev, flags, fmt, p)
600 dev_t dev;
601 int flags, fmt;
602 struct proc *p;
603 {
604 int unit = raidunit(dev);
605 struct raid_softc *rs;
606 int error = 0;
607 int part;
608
609 if (unit >= numraid)
610 return (ENXIO);
611 rs = &raid_softc[unit];
612
613 if ((error = raidlock(rs)) != 0)
614 return (error);
615
616 part = DISKPART(dev);
617
618 /* ...that much closer to allowing unconfiguration... */
619 switch (fmt) {
620 case S_IFCHR:
621 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
622 break;
623
624 case S_IFBLK:
625 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
626 break;
627 }
628 rs->sc_dkdev.dk_openmask =
629 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
630
631 if ((rs->sc_dkdev.dk_openmask == 0) &&
632 ((rs->sc_flags & RAIDF_INITED) != 0)) {
633 /* Last one... device is not unconfigured yet.
634 Device shutdown has taken care of setting the
635 clean bits if RAIDF_INITED is not set
636 mark things as clean... */
637 #if 0
638 printf("Last one on raid%d. Updating status.\n",unit);
639 #endif
640 rf_update_component_labels(raidPtrs[unit],
641 RF_FINAL_COMPONENT_UPDATE);
642 if (doing_shutdown) {
643 /* last one, and we're going down, so
644 lights out for this RAID set too. */
645 error = rf_Shutdown(raidPtrs[unit]);
646
647 /* It's no longer initialized... */
648 rs->sc_flags &= ~RAIDF_INITED;
649
650 /* Detach the disk. */
651 disk_detach(&rs->sc_dkdev);
652 }
653 }
654
655 raidunlock(rs);
656 return (0);
657
658 }
659
660 void
661 raidstrategy(bp)
662 struct buf *bp;
663 {
664 int s;
665
666 unsigned int raidID = raidunit(bp->b_dev);
667 RF_Raid_t *raidPtr;
668 struct raid_softc *rs = &raid_softc[raidID];
669 struct disklabel *lp;
670 int wlabel;
671
672 if ((rs->sc_flags & RAIDF_INITED) ==0) {
673 bp->b_error = ENXIO;
674 bp->b_flags |= B_ERROR;
675 bp->b_resid = bp->b_bcount;
676 biodone(bp);
677 return;
678 }
679 if (raidID >= numraid || !raidPtrs[raidID]) {
680 bp->b_error = ENODEV;
681 bp->b_flags |= B_ERROR;
682 bp->b_resid = bp->b_bcount;
683 biodone(bp);
684 return;
685 }
686 raidPtr = raidPtrs[raidID];
687 if (!raidPtr->valid) {
688 bp->b_error = ENODEV;
689 bp->b_flags |= B_ERROR;
690 bp->b_resid = bp->b_bcount;
691 biodone(bp);
692 return;
693 }
694 if (bp->b_bcount == 0) {
695 db1_printf(("b_bcount is zero..\n"));
696 biodone(bp);
697 return;
698 }
699 lp = rs->sc_dkdev.dk_label;
700
701 /*
702 * Do bounds checking and adjust transfer. If there's an
703 * error, the bounds check will flag that for us.
704 */
705
706 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
707 if (DISKPART(bp->b_dev) != RAW_PART)
708 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
709 db1_printf(("Bounds check failed!!:%d %d\n",
710 (int) bp->b_blkno, (int) wlabel));
711 biodone(bp);
712 return;
713 }
714 s = splbio();
715
716 bp->b_resid = 0;
717
718 /* stuff it onto our queue */
719 BUFQ_PUT(&rs->buf_queue, bp);
720
721 raidstart(raidPtrs[raidID]);
722
723 splx(s);
724 }
725 /* ARGSUSED */
726 int
727 raidread(dev, uio, flags)
728 dev_t dev;
729 struct uio *uio;
730 int flags;
731 {
732 int unit = raidunit(dev);
733 struct raid_softc *rs;
734 int part;
735
736 if (unit >= numraid)
737 return (ENXIO);
738 rs = &raid_softc[unit];
739
740 if ((rs->sc_flags & RAIDF_INITED) == 0)
741 return (ENXIO);
742 part = DISKPART(dev);
743
744 db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
745
746 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
747
748 }
749 /* ARGSUSED */
750 int
751 raidwrite(dev, uio, flags)
752 dev_t dev;
753 struct uio *uio;
754 int flags;
755 {
756 int unit = raidunit(dev);
757 struct raid_softc *rs;
758
759 if (unit >= numraid)
760 return (ENXIO);
761 rs = &raid_softc[unit];
762
763 if ((rs->sc_flags & RAIDF_INITED) == 0)
764 return (ENXIO);
765 db1_printf(("raidwrite\n"));
766 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
767
768 }
769
770 int
771 raidioctl(dev, cmd, data, flag, p)
772 dev_t dev;
773 u_long cmd;
774 caddr_t data;
775 int flag;
776 struct proc *p;
777 {
778 int unit = raidunit(dev);
779 int error = 0;
780 int part, pmask;
781 struct raid_softc *rs;
782 RF_Config_t *k_cfg, *u_cfg;
783 RF_Raid_t *raidPtr;
784 RF_RaidDisk_t *diskPtr;
785 RF_AccTotals_t *totals;
786 RF_DeviceConfig_t *d_cfg, **ucfgp;
787 u_char *specific_buf;
788 int retcode = 0;
789 int row;
790 int column;
791 int raidid;
792 struct rf_recon_req *rrcopy, *rr;
793 RF_ComponentLabel_t *clabel;
794 RF_ComponentLabel_t ci_label;
795 RF_ComponentLabel_t **clabel_ptr;
796 RF_SingleComponent_t *sparePtr,*componentPtr;
797 RF_SingleComponent_t hot_spare;
798 RF_SingleComponent_t component;
799 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
800 int i, j, d;
801 #ifdef __HAVE_OLD_DISKLABEL
802 struct disklabel newlabel;
803 #endif
804
805 if (unit >= numraid)
806 return (ENXIO);
807 rs = &raid_softc[unit];
808 raidPtr = raidPtrs[unit];
809
810 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
811 (int) DISKPART(dev), (int) unit, (int) cmd));
812
813 /* Must be open for writes for these commands... */
814 switch (cmd) {
815 case DIOCSDINFO:
816 case DIOCWDINFO:
817 #ifdef __HAVE_OLD_DISKLABEL
818 case ODIOCWDINFO:
819 case ODIOCSDINFO:
820 #endif
821 case DIOCWLABEL:
822 if ((flag & FWRITE) == 0)
823 return (EBADF);
824 }
825
826 /* Must be initialized for these... */
827 switch (cmd) {
828 case DIOCGDINFO:
829 case DIOCSDINFO:
830 case DIOCWDINFO:
831 #ifdef __HAVE_OLD_DISKLABEL
832 case ODIOCGDINFO:
833 case ODIOCWDINFO:
834 case ODIOCSDINFO:
835 case ODIOCGDEFLABEL:
836 #endif
837 case DIOCGPART:
838 case DIOCWLABEL:
839 case DIOCGDEFLABEL:
840 case RAIDFRAME_SHUTDOWN:
841 case RAIDFRAME_REWRITEPARITY:
842 case RAIDFRAME_GET_INFO:
843 case RAIDFRAME_RESET_ACCTOTALS:
844 case RAIDFRAME_GET_ACCTOTALS:
845 case RAIDFRAME_KEEP_ACCTOTALS:
846 case RAIDFRAME_GET_SIZE:
847 case RAIDFRAME_FAIL_DISK:
848 case RAIDFRAME_COPYBACK:
849 case RAIDFRAME_CHECK_RECON_STATUS:
850 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
851 case RAIDFRAME_GET_COMPONENT_LABEL:
852 case RAIDFRAME_SET_COMPONENT_LABEL:
853 case RAIDFRAME_ADD_HOT_SPARE:
854 case RAIDFRAME_REMOVE_HOT_SPARE:
855 case RAIDFRAME_INIT_LABELS:
856 case RAIDFRAME_REBUILD_IN_PLACE:
857 case RAIDFRAME_CHECK_PARITY:
858 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
859 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
860 case RAIDFRAME_CHECK_COPYBACK_STATUS:
861 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
862 case RAIDFRAME_SET_AUTOCONFIG:
863 case RAIDFRAME_SET_ROOT:
864 case RAIDFRAME_DELETE_COMPONENT:
865 case RAIDFRAME_INCORPORATE_HOT_SPARE:
866 if ((rs->sc_flags & RAIDF_INITED) == 0)
867 return (ENXIO);
868 }
869
870 switch (cmd) {
871
872 /* configure the system */
873 case RAIDFRAME_CONFIGURE:
874
875 if (raidPtr->valid) {
876 /* There is a valid RAID set running on this unit! */
877 printf("raid%d: Device already configured!\n",unit);
878 return(EINVAL);
879 }
880
881 /* copy-in the configuration information */
882 /* data points to a pointer to the configuration structure */
883
884 u_cfg = *((RF_Config_t **) data);
885 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
886 if (k_cfg == NULL) {
887 return (ENOMEM);
888 }
889 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
890 sizeof(RF_Config_t));
891 if (retcode) {
892 RF_Free(k_cfg, sizeof(RF_Config_t));
893 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
894 retcode));
895 return (retcode);
896 }
897 /* allocate a buffer for the layout-specific data, and copy it
898 * in */
899 if (k_cfg->layoutSpecificSize) {
900 if (k_cfg->layoutSpecificSize > 10000) {
901 /* sanity check */
902 RF_Free(k_cfg, sizeof(RF_Config_t));
903 return (EINVAL);
904 }
905 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
906 (u_char *));
907 if (specific_buf == NULL) {
908 RF_Free(k_cfg, sizeof(RF_Config_t));
909 return (ENOMEM);
910 }
911 retcode = copyin(k_cfg->layoutSpecific,
912 (caddr_t) specific_buf,
913 k_cfg->layoutSpecificSize);
914 if (retcode) {
915 RF_Free(k_cfg, sizeof(RF_Config_t));
916 RF_Free(specific_buf,
917 k_cfg->layoutSpecificSize);
918 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
919 retcode));
920 return (retcode);
921 }
922 } else
923 specific_buf = NULL;
924 k_cfg->layoutSpecific = specific_buf;
925
926 /* should do some kind of sanity check on the configuration.
927 * Store the sum of all the bytes in the last byte? */
928
929 /* configure the system */
930
931 /*
932 * Clear the entire RAID descriptor, just to make sure
933 * there is no stale data left in the case of a
934 * reconfiguration
935 */
936 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
937 raidPtr->raidid = unit;
938
939 retcode = rf_Configure(raidPtr, k_cfg, NULL);
940
941 if (retcode == 0) {
942
943 /* allow this many simultaneous IO's to
944 this RAID device */
945 raidPtr->openings = RAIDOUTSTANDING;
946
947 raidinit(raidPtr);
948 rf_markalldirty(raidPtr);
949 }
950 /* free the buffers. No return code here. */
951 if (k_cfg->layoutSpecificSize) {
952 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
953 }
954 RF_Free(k_cfg, sizeof(RF_Config_t));
955
956 return (retcode);
957
958 /* shutdown the system */
959 case RAIDFRAME_SHUTDOWN:
960
961 if ((error = raidlock(rs)) != 0)
962 return (error);
963
964 /*
965 * If somebody has a partition mounted, we shouldn't
966 * shutdown.
967 */
968
969 part = DISKPART(dev);
970 pmask = (1 << part);
971 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
972 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
973 (rs->sc_dkdev.dk_copenmask & pmask))) {
974 raidunlock(rs);
975 return (EBUSY);
976 }
977
978 retcode = rf_Shutdown(raidPtr);
979
980 /* It's no longer initialized... */
981 rs->sc_flags &= ~RAIDF_INITED;
982
983 /* Detach the disk. */
984 disk_detach(&rs->sc_dkdev);
985
986 raidunlock(rs);
987
988 return (retcode);
989 case RAIDFRAME_GET_COMPONENT_LABEL:
990 clabel_ptr = (RF_ComponentLabel_t **) data;
991 /* need to read the component label for the disk indicated
992 by row,column in clabel */
993
994 /* For practice, let's get it directly fromdisk, rather
995 than from the in-core copy */
996 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
997 (RF_ComponentLabel_t *));
998 if (clabel == NULL)
999 return (ENOMEM);
1000
1001 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1002
1003 retcode = copyin( *clabel_ptr, clabel,
1004 sizeof(RF_ComponentLabel_t));
1005
1006 if (retcode) {
1007 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1008 return(retcode);
1009 }
1010
1011 row = clabel->row;
1012 column = clabel->column;
1013
1014 if ((row < 0) || (row >= raidPtr->numRow) ||
1015 (column < 0) || (column >= raidPtr->numCol +
1016 raidPtr->numSpare)) {
1017 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1018 return(EINVAL);
1019 }
1020
1021 raidread_component_label(raidPtr->Disks[row][column].dev,
1022 raidPtr->raid_cinfo[row][column].ci_vp,
1023 clabel );
1024
1025 retcode = copyout((caddr_t) clabel,
1026 (caddr_t) *clabel_ptr,
1027 sizeof(RF_ComponentLabel_t));
1028 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1029 return (retcode);
1030
1031 case RAIDFRAME_SET_COMPONENT_LABEL:
1032 clabel = (RF_ComponentLabel_t *) data;
1033
1034 /* XXX check the label for valid stuff... */
1035 /* Note that some things *should not* get modified --
1036 the user should be re-initing the labels instead of
1037 trying to patch things.
1038 */
1039
1040 raidid = raidPtr->raidid;
1041 printf("raid%d: Got component label:\n", raidid);
1042 printf("raid%d: Version: %d\n", raidid, clabel->version);
1043 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1044 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1045 printf("raid%d: Row: %d\n", raidid, clabel->row);
1046 printf("raid%d: Column: %d\n", raidid, clabel->column);
1047 printf("raid%d: Num Rows: %d\n", raidid, clabel->num_rows);
1048 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1049 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1050 printf("raid%d: Status: %d\n", raidid, clabel->status);
1051
1052 row = clabel->row;
1053 column = clabel->column;
1054
1055 if ((row < 0) || (row >= raidPtr->numRow) ||
1056 (column < 0) || (column >= raidPtr->numCol)) {
1057 return(EINVAL);
1058 }
1059
1060 /* XXX this isn't allowed to do anything for now :-) */
1061
1062 /* XXX and before it is, we need to fill in the rest
1063 of the fields!?!?!?! */
1064 #if 0
1065 raidwrite_component_label(
1066 raidPtr->Disks[row][column].dev,
1067 raidPtr->raid_cinfo[row][column].ci_vp,
1068 clabel );
1069 #endif
1070 return (0);
1071
1072 case RAIDFRAME_INIT_LABELS:
1073 clabel = (RF_ComponentLabel_t *) data;
1074 /*
1075 we only want the serial number from
1076 the above. We get all the rest of the information
1077 from the config that was used to create this RAID
1078 set.
1079 */
1080
1081 raidPtr->serial_number = clabel->serial_number;
1082
1083 raid_init_component_label(raidPtr, &ci_label);
1084 ci_label.serial_number = clabel->serial_number;
1085
1086 for(row=0;row<raidPtr->numRow;row++) {
1087 ci_label.row = row;
1088 for(column=0;column<raidPtr->numCol;column++) {
1089 diskPtr = &raidPtr->Disks[row][column];
1090 if (!RF_DEAD_DISK(diskPtr->status)) {
1091 ci_label.partitionSize = diskPtr->partitionSize;
1092 ci_label.column = column;
1093 raidwrite_component_label(
1094 raidPtr->Disks[row][column].dev,
1095 raidPtr->raid_cinfo[row][column].ci_vp,
1096 &ci_label );
1097 }
1098 }
1099 }
1100
1101 return (retcode);
1102 case RAIDFRAME_SET_AUTOCONFIG:
1103 d = rf_set_autoconfig(raidPtr, *(int *) data);
1104 printf("raid%d: New autoconfig value is: %d\n",
1105 raidPtr->raidid, d);
1106 *(int *) data = d;
1107 return (retcode);
1108
1109 case RAIDFRAME_SET_ROOT:
1110 d = rf_set_rootpartition(raidPtr, *(int *) data);
1111 printf("raid%d: New rootpartition value is: %d\n",
1112 raidPtr->raidid, d);
1113 *(int *) data = d;
1114 return (retcode);
1115
1116 /* initialize all parity */
1117 case RAIDFRAME_REWRITEPARITY:
1118
1119 if (raidPtr->Layout.map->faultsTolerated == 0) {
1120 /* Parity for RAID 0 is trivially correct */
1121 raidPtr->parity_good = RF_RAID_CLEAN;
1122 return(0);
1123 }
1124
1125 if (raidPtr->parity_rewrite_in_progress == 1) {
1126 /* Re-write is already in progress! */
1127 return(EINVAL);
1128 }
1129
1130 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1131 rf_RewriteParityThread,
1132 raidPtr,"raid_parity");
1133 return (retcode);
1134
1135
1136 case RAIDFRAME_ADD_HOT_SPARE:
1137 sparePtr = (RF_SingleComponent_t *) data;
1138 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1139 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1140 return(retcode);
1141
1142 case RAIDFRAME_REMOVE_HOT_SPARE:
1143 return(retcode);
1144
1145 case RAIDFRAME_DELETE_COMPONENT:
1146 componentPtr = (RF_SingleComponent_t *)data;
1147 memcpy( &component, componentPtr,
1148 sizeof(RF_SingleComponent_t));
1149 retcode = rf_delete_component(raidPtr, &component);
1150 return(retcode);
1151
1152 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1153 componentPtr = (RF_SingleComponent_t *)data;
1154 memcpy( &component, componentPtr,
1155 sizeof(RF_SingleComponent_t));
1156 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1157 return(retcode);
1158
1159 case RAIDFRAME_REBUILD_IN_PLACE:
1160
1161 if (raidPtr->Layout.map->faultsTolerated == 0) {
1162 /* Can't do this on a RAID 0!! */
1163 return(EINVAL);
1164 }
1165
1166 if (raidPtr->recon_in_progress == 1) {
1167 /* a reconstruct is already in progress! */
1168 return(EINVAL);
1169 }
1170
1171 componentPtr = (RF_SingleComponent_t *) data;
1172 memcpy( &component, componentPtr,
1173 sizeof(RF_SingleComponent_t));
1174 row = component.row;
1175 column = component.column;
1176 printf("raid%d: Rebuild: %d %d\n", raidPtr->raidid,
1177 row, column);
1178 if ((row < 0) || (row >= raidPtr->numRow) ||
1179 (column < 0) || (column >= raidPtr->numCol)) {
1180 return(EINVAL);
1181 }
1182
1183 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1184 if (rrcopy == NULL)
1185 return(ENOMEM);
1186
1187 rrcopy->raidPtr = (void *) raidPtr;
1188 rrcopy->row = row;
1189 rrcopy->col = column;
1190
1191 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1192 rf_ReconstructInPlaceThread,
1193 rrcopy,"raid_reconip");
1194 return(retcode);
1195
1196 case RAIDFRAME_GET_INFO:
1197 if (!raidPtr->valid)
1198 return (ENODEV);
1199 ucfgp = (RF_DeviceConfig_t **) data;
1200 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1201 (RF_DeviceConfig_t *));
1202 if (d_cfg == NULL)
1203 return (ENOMEM);
1204 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1205 d_cfg->rows = raidPtr->numRow;
1206 d_cfg->cols = raidPtr->numCol;
1207 d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1208 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1209 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1210 return (ENOMEM);
1211 }
1212 d_cfg->nspares = raidPtr->numSpare;
1213 if (d_cfg->nspares >= RF_MAX_DISKS) {
1214 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1215 return (ENOMEM);
1216 }
1217 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1218 d = 0;
1219 for (i = 0; i < d_cfg->rows; i++) {
1220 for (j = 0; j < d_cfg->cols; j++) {
1221 d_cfg->devs[d] = raidPtr->Disks[i][j];
1222 d++;
1223 }
1224 }
1225 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1226 d_cfg->spares[i] = raidPtr->Disks[0][j];
1227 }
1228 retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1229 sizeof(RF_DeviceConfig_t));
1230 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1231
1232 return (retcode);
1233
1234 case RAIDFRAME_CHECK_PARITY:
1235 *(int *) data = raidPtr->parity_good;
1236 return (0);
1237
1238 case RAIDFRAME_RESET_ACCTOTALS:
1239 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1240 return (0);
1241
1242 case RAIDFRAME_GET_ACCTOTALS:
1243 totals = (RF_AccTotals_t *) data;
1244 *totals = raidPtr->acc_totals;
1245 return (0);
1246
1247 case RAIDFRAME_KEEP_ACCTOTALS:
1248 raidPtr->keep_acc_totals = *(int *)data;
1249 return (0);
1250
1251 case RAIDFRAME_GET_SIZE:
1252 *(int *) data = raidPtr->totalSectors;
1253 return (0);
1254
1255 /* fail a disk & optionally start reconstruction */
1256 case RAIDFRAME_FAIL_DISK:
1257
1258 if (raidPtr->Layout.map->faultsTolerated == 0) {
1259 /* Can't do this on a RAID 0!! */
1260 return(EINVAL);
1261 }
1262
1263 rr = (struct rf_recon_req *) data;
1264
1265 if (rr->row < 0 || rr->row >= raidPtr->numRow
1266 || rr->col < 0 || rr->col >= raidPtr->numCol)
1267 return (EINVAL);
1268
1269 printf("raid%d: Failing the disk: row: %d col: %d\n",
1270 unit, rr->row, rr->col);
1271
1272 /* make a copy of the recon request so that we don't rely on
1273 * the user's buffer */
1274 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1275 if (rrcopy == NULL)
1276 return(ENOMEM);
1277 memcpy(rrcopy, rr, sizeof(*rr));
1278 rrcopy->raidPtr = (void *) raidPtr;
1279
1280 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1281 rf_ReconThread,
1282 rrcopy,"raid_recon");
1283 return (0);
1284
1285 /* invoke a copyback operation after recon on whatever disk
1286 * needs it, if any */
1287 case RAIDFRAME_COPYBACK:
1288
1289 if (raidPtr->Layout.map->faultsTolerated == 0) {
1290 /* This makes no sense on a RAID 0!! */
1291 return(EINVAL);
1292 }
1293
1294 if (raidPtr->copyback_in_progress == 1) {
1295 /* Copyback is already in progress! */
1296 return(EINVAL);
1297 }
1298
1299 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1300 rf_CopybackThread,
1301 raidPtr,"raid_copyback");
1302 return (retcode);
1303
1304 /* return the percentage completion of reconstruction */
1305 case RAIDFRAME_CHECK_RECON_STATUS:
1306 if (raidPtr->Layout.map->faultsTolerated == 0) {
1307 /* This makes no sense on a RAID 0, so tell the
1308 user it's done. */
1309 *(int *) data = 100;
1310 return(0);
1311 }
1312 row = 0; /* XXX we only consider a single row... */
1313 if (raidPtr->status[row] != rf_rs_reconstructing)
1314 *(int *) data = 100;
1315 else
1316 *(int *) data = raidPtr->reconControl[row]->percentComplete;
1317 return (0);
1318 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1319 progressInfoPtr = (RF_ProgressInfo_t **) data;
1320 row = 0; /* XXX we only consider a single row... */
1321 if (raidPtr->status[row] != rf_rs_reconstructing) {
1322 progressInfo.remaining = 0;
1323 progressInfo.completed = 100;
1324 progressInfo.total = 100;
1325 } else {
1326 progressInfo.total =
1327 raidPtr->reconControl[row]->numRUsTotal;
1328 progressInfo.completed =
1329 raidPtr->reconControl[row]->numRUsComplete;
1330 progressInfo.remaining = progressInfo.total -
1331 progressInfo.completed;
1332 }
1333 retcode = copyout((caddr_t) &progressInfo,
1334 (caddr_t) *progressInfoPtr,
1335 sizeof(RF_ProgressInfo_t));
1336 return (retcode);
1337
1338 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1339 if (raidPtr->Layout.map->faultsTolerated == 0) {
1340 /* This makes no sense on a RAID 0, so tell the
1341 user it's done. */
1342 *(int *) data = 100;
1343 return(0);
1344 }
1345 if (raidPtr->parity_rewrite_in_progress == 1) {
1346 *(int *) data = 100 *
1347 raidPtr->parity_rewrite_stripes_done /
1348 raidPtr->Layout.numStripe;
1349 } else {
1350 *(int *) data = 100;
1351 }
1352 return (0);
1353
1354 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1355 progressInfoPtr = (RF_ProgressInfo_t **) data;
1356 if (raidPtr->parity_rewrite_in_progress == 1) {
1357 progressInfo.total = raidPtr->Layout.numStripe;
1358 progressInfo.completed =
1359 raidPtr->parity_rewrite_stripes_done;
1360 progressInfo.remaining = progressInfo.total -
1361 progressInfo.completed;
1362 } else {
1363 progressInfo.remaining = 0;
1364 progressInfo.completed = 100;
1365 progressInfo.total = 100;
1366 }
1367 retcode = copyout((caddr_t) &progressInfo,
1368 (caddr_t) *progressInfoPtr,
1369 sizeof(RF_ProgressInfo_t));
1370 return (retcode);
1371
1372 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1373 if (raidPtr->Layout.map->faultsTolerated == 0) {
1374 /* This makes no sense on a RAID 0 */
1375 *(int *) data = 100;
1376 return(0);
1377 }
1378 if (raidPtr->copyback_in_progress == 1) {
1379 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1380 raidPtr->Layout.numStripe;
1381 } else {
1382 *(int *) data = 100;
1383 }
1384 return (0);
1385
1386 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1387 progressInfoPtr = (RF_ProgressInfo_t **) data;
1388 if (raidPtr->copyback_in_progress == 1) {
1389 progressInfo.total = raidPtr->Layout.numStripe;
1390 progressInfo.completed =
1391 raidPtr->copyback_stripes_done;
1392 progressInfo.remaining = progressInfo.total -
1393 progressInfo.completed;
1394 } else {
1395 progressInfo.remaining = 0;
1396 progressInfo.completed = 100;
1397 progressInfo.total = 100;
1398 }
1399 retcode = copyout((caddr_t) &progressInfo,
1400 (caddr_t) *progressInfoPtr,
1401 sizeof(RF_ProgressInfo_t));
1402 return (retcode);
1403
1404 /* the sparetable daemon calls this to wait for the kernel to
1405 * need a spare table. this ioctl does not return until a
1406 * spare table is needed. XXX -- calling mpsleep here in the
1407 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1408 * -- I should either compute the spare table in the kernel,
1409 * or have a different -- XXX XXX -- interface (a different
1410 * character device) for delivering the table -- XXX */
1411 #if 0
1412 case RAIDFRAME_SPARET_WAIT:
1413 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1414 while (!rf_sparet_wait_queue)
1415 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1416 waitreq = rf_sparet_wait_queue;
1417 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1418 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1419
1420 /* structure assignment */
1421 *((RF_SparetWait_t *) data) = *waitreq;
1422
1423 RF_Free(waitreq, sizeof(*waitreq));
1424 return (0);
1425
1426 /* wakes up a process waiting on SPARET_WAIT and puts an error
1427 * code in it that will cause the dameon to exit */
1428 case RAIDFRAME_ABORT_SPARET_WAIT:
1429 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1430 waitreq->fcol = -1;
1431 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1432 waitreq->next = rf_sparet_wait_queue;
1433 rf_sparet_wait_queue = waitreq;
1434 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1435 wakeup(&rf_sparet_wait_queue);
1436 return (0);
1437
1438 /* used by the spare table daemon to deliver a spare table
1439 * into the kernel */
1440 case RAIDFRAME_SEND_SPARET:
1441
1442 /* install the spare table */
1443 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1444
1445 /* respond to the requestor. the return status of the spare
1446 * table installation is passed in the "fcol" field */
1447 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1448 waitreq->fcol = retcode;
1449 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1450 waitreq->next = rf_sparet_resp_queue;
1451 rf_sparet_resp_queue = waitreq;
1452 wakeup(&rf_sparet_resp_queue);
1453 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1454
1455 return (retcode);
1456 #endif
1457
1458 default:
1459 break; /* fall through to the os-specific code below */
1460
1461 }
1462
1463 if (!raidPtr->valid)
1464 return (EINVAL);
1465
1466 /*
1467 * Add support for "regular" device ioctls here.
1468 */
1469
1470 switch (cmd) {
1471 case DIOCGDINFO:
1472 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1473 break;
1474 #ifdef __HAVE_OLD_DISKLABEL
1475 case ODIOCGDINFO:
1476 newlabel = *(rs->sc_dkdev.dk_label);
1477 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1478 return ENOTTY;
1479 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1480 break;
1481 #endif
1482
1483 case DIOCGPART:
1484 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1485 ((struct partinfo *) data)->part =
1486 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1487 break;
1488
1489 case DIOCWDINFO:
1490 case DIOCSDINFO:
1491 #ifdef __HAVE_OLD_DISKLABEL
1492 case ODIOCWDINFO:
1493 case ODIOCSDINFO:
1494 #endif
1495 {
1496 struct disklabel *lp;
1497 #ifdef __HAVE_OLD_DISKLABEL
1498 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1499 memset(&newlabel, 0, sizeof newlabel);
1500 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1501 lp = &newlabel;
1502 } else
1503 #endif
1504 lp = (struct disklabel *)data;
1505
1506 if ((error = raidlock(rs)) != 0)
1507 return (error);
1508
1509 rs->sc_flags |= RAIDF_LABELLING;
1510
1511 error = setdisklabel(rs->sc_dkdev.dk_label,
1512 lp, 0, rs->sc_dkdev.dk_cpulabel);
1513 if (error == 0) {
1514 if (cmd == DIOCWDINFO
1515 #ifdef __HAVE_OLD_DISKLABEL
1516 || cmd == ODIOCWDINFO
1517 #endif
1518 )
1519 error = writedisklabel(RAIDLABELDEV(dev),
1520 raidstrategy, rs->sc_dkdev.dk_label,
1521 rs->sc_dkdev.dk_cpulabel);
1522 }
1523 rs->sc_flags &= ~RAIDF_LABELLING;
1524
1525 raidunlock(rs);
1526
1527 if (error)
1528 return (error);
1529 break;
1530 }
1531
1532 case DIOCWLABEL:
1533 if (*(int *) data != 0)
1534 rs->sc_flags |= RAIDF_WLABEL;
1535 else
1536 rs->sc_flags &= ~RAIDF_WLABEL;
1537 break;
1538
1539 case DIOCGDEFLABEL:
1540 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1541 break;
1542
1543 #ifdef __HAVE_OLD_DISKLABEL
1544 case ODIOCGDEFLABEL:
1545 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1546 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1547 return ENOTTY;
1548 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1549 break;
1550 #endif
1551
1552 default:
1553 retcode = ENOTTY;
1554 }
1555 return (retcode);
1556
1557 }
1558
1559
1560 /* raidinit -- complete the rest of the initialization for the
1561 RAIDframe device. */
1562
1563
1564 static void
1565 raidinit(raidPtr)
1566 RF_Raid_t *raidPtr;
1567 {
1568 struct raid_softc *rs;
1569 int unit;
1570
1571 unit = raidPtr->raidid;
1572
1573 rs = &raid_softc[unit];
1574
1575 /* XXX should check return code first... */
1576 rs->sc_flags |= RAIDF_INITED;
1577
1578 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1579
1580 rs->sc_dkdev.dk_name = rs->sc_xname;
1581
1582 /* disk_attach actually creates space for the CPU disklabel, among
1583 * other things, so it's critical to call this *BEFORE* we try putzing
1584 * with disklabels. */
1585
1586 disk_attach(&rs->sc_dkdev);
1587
1588 /* XXX There may be a weird interaction here between this, and
1589 * protectedSectors, as used in RAIDframe. */
1590
1591 rs->sc_size = raidPtr->totalSectors;
1592
1593 }
1594
1595 /* wake up the daemon & tell it to get us a spare table
1596 * XXX
1597 * the entries in the queues should be tagged with the raidPtr
1598 * so that in the extremely rare case that two recons happen at once,
1599 * we know for which device were requesting a spare table
1600 * XXX
1601 *
1602 * XXX This code is not currently used. GO
1603 */
1604 int
1605 rf_GetSpareTableFromDaemon(req)
1606 RF_SparetWait_t *req;
1607 {
1608 int retcode;
1609
1610 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1611 req->next = rf_sparet_wait_queue;
1612 rf_sparet_wait_queue = req;
1613 wakeup(&rf_sparet_wait_queue);
1614
1615 /* mpsleep unlocks the mutex */
1616 while (!rf_sparet_resp_queue) {
1617 tsleep(&rf_sparet_resp_queue, PRIBIO,
1618 "raidframe getsparetable", 0);
1619 }
1620 req = rf_sparet_resp_queue;
1621 rf_sparet_resp_queue = req->next;
1622 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1623
1624 retcode = req->fcol;
1625 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1626 * alloc'd */
1627 return (retcode);
1628 }
1629
1630 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1631 * bp & passes it down.
1632 * any calls originating in the kernel must use non-blocking I/O
1633 * do some extra sanity checking to return "appropriate" error values for
1634 * certain conditions (to make some standard utilities work)
1635 *
1636 * Formerly known as: rf_DoAccessKernel
1637 */
1638 void
1639 raidstart(raidPtr)
1640 RF_Raid_t *raidPtr;
1641 {
1642 RF_SectorCount_t num_blocks, pb, sum;
1643 RF_RaidAddr_t raid_addr;
1644 int retcode;
1645 struct partition *pp;
1646 daddr_t blocknum;
1647 int unit;
1648 struct raid_softc *rs;
1649 int do_async;
1650 struct buf *bp;
1651
1652 unit = raidPtr->raidid;
1653 rs = &raid_softc[unit];
1654
1655 /* quick check to see if anything has died recently */
1656 RF_LOCK_MUTEX(raidPtr->mutex);
1657 if (raidPtr->numNewFailures > 0) {
1658 rf_update_component_labels(raidPtr,
1659 RF_NORMAL_COMPONENT_UPDATE);
1660 raidPtr->numNewFailures--;
1661 }
1662
1663 /* Check to see if we're at the limit... */
1664 while (raidPtr->openings > 0) {
1665 RF_UNLOCK_MUTEX(raidPtr->mutex);
1666
1667 /* get the next item, if any, from the queue */
1668 if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
1669 /* nothing more to do */
1670 return;
1671 }
1672
1673 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1674 * partition.. Need to make it absolute to the underlying
1675 * device.. */
1676
1677 blocknum = bp->b_blkno;
1678 if (DISKPART(bp->b_dev) != RAW_PART) {
1679 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1680 blocknum += pp->p_offset;
1681 }
1682
1683 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1684 (int) blocknum));
1685
1686 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1687 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1688
1689 /* *THIS* is where we adjust what block we're going to...
1690 * but DO NOT TOUCH bp->b_blkno!!! */
1691 raid_addr = blocknum;
1692
1693 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1694 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1695 sum = raid_addr + num_blocks + pb;
1696 if (1 || rf_debugKernelAccess) {
1697 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1698 (int) raid_addr, (int) sum, (int) num_blocks,
1699 (int) pb, (int) bp->b_resid));
1700 }
1701 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1702 || (sum < num_blocks) || (sum < pb)) {
1703 bp->b_error = ENOSPC;
1704 bp->b_flags |= B_ERROR;
1705 bp->b_resid = bp->b_bcount;
1706 biodone(bp);
1707 RF_LOCK_MUTEX(raidPtr->mutex);
1708 continue;
1709 }
1710 /*
1711 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1712 */
1713
1714 if (bp->b_bcount & raidPtr->sectorMask) {
1715 bp->b_error = EINVAL;
1716 bp->b_flags |= B_ERROR;
1717 bp->b_resid = bp->b_bcount;
1718 biodone(bp);
1719 RF_LOCK_MUTEX(raidPtr->mutex);
1720 continue;
1721
1722 }
1723 db1_printf(("Calling DoAccess..\n"));
1724
1725
1726 RF_LOCK_MUTEX(raidPtr->mutex);
1727 raidPtr->openings--;
1728 RF_UNLOCK_MUTEX(raidPtr->mutex);
1729
1730 /*
1731 * Everything is async.
1732 */
1733 do_async = 1;
1734
1735 disk_busy(&rs->sc_dkdev);
1736
1737 /* XXX we're still at splbio() here... do we *really*
1738 need to be? */
1739
1740 /* don't ever condition on bp->b_flags & B_WRITE.
1741 * always condition on B_READ instead */
1742
1743 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1744 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1745 do_async, raid_addr, num_blocks,
1746 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1747
1748 RF_LOCK_MUTEX(raidPtr->mutex);
1749 }
1750 RF_UNLOCK_MUTEX(raidPtr->mutex);
1751 }
1752
1753
1754
1755
1756 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1757
1758 int
1759 rf_DispatchKernelIO(queue, req)
1760 RF_DiskQueue_t *queue;
1761 RF_DiskQueueData_t *req;
1762 {
1763 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1764 struct buf *bp;
1765 struct raidbuf *raidbp = NULL;
1766 struct raid_softc *rs;
1767 int unit;
1768 int s;
1769
1770 s=0;
1771 /* s = splbio();*/ /* want to test this */
1772 /* XXX along with the vnode, we also need the softc associated with
1773 * this device.. */
1774
1775 req->queue = queue;
1776
1777 unit = queue->raidPtr->raidid;
1778
1779 db1_printf(("DispatchKernelIO unit: %d\n", unit));
1780
1781 if (unit >= numraid) {
1782 printf("Invalid unit number: %d %d\n", unit, numraid);
1783 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1784 }
1785 rs = &raid_softc[unit];
1786
1787 bp = req->bp;
1788 #if 1
1789 /* XXX when there is a physical disk failure, someone is passing us a
1790 * buffer that contains old stuff!! Attempt to deal with this problem
1791 * without taking a performance hit... (not sure where the real bug
1792 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1793
1794 if (bp->b_flags & B_ERROR) {
1795 bp->b_flags &= ~B_ERROR;
1796 }
1797 if (bp->b_error != 0) {
1798 bp->b_error = 0;
1799 }
1800 #endif
1801 raidbp = RAIDGETBUF(rs);
1802
1803 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1804
1805 /*
1806 * context for raidiodone
1807 */
1808 raidbp->rf_obp = bp;
1809 raidbp->req = req;
1810
1811 LIST_INIT(&raidbp->rf_buf.b_dep);
1812
1813 switch (req->type) {
1814 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1815 /* XXX need to do something extra here.. */
1816 /* I'm leaving this in, as I've never actually seen it used,
1817 * and I'd like folks to report it... GO */
1818 printf(("WAKEUP CALLED\n"));
1819 queue->numOutstanding++;
1820
1821 /* XXX need to glue the original buffer into this?? */
1822
1823 KernelWakeupFunc(&raidbp->rf_buf);
1824 break;
1825
1826 case RF_IO_TYPE_READ:
1827 case RF_IO_TYPE_WRITE:
1828
1829 if (req->tracerec) {
1830 RF_ETIMER_START(req->tracerec->timer);
1831 }
1832 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1833 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1834 req->sectorOffset, req->numSector,
1835 req->buf, KernelWakeupFunc, (void *) req,
1836 queue->raidPtr->logBytesPerSector, req->b_proc);
1837
1838 if (rf_debugKernelAccess) {
1839 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1840 (long) bp->b_blkno));
1841 }
1842 queue->numOutstanding++;
1843 queue->last_deq_sector = req->sectorOffset;
1844 /* acc wouldn't have been let in if there were any pending
1845 * reqs at any other priority */
1846 queue->curPriority = req->priority;
1847
1848 db1_printf(("Going for %c to unit %d row %d col %d\n",
1849 req->type, unit, queue->row, queue->col));
1850 db1_printf(("sector %d count %d (%d bytes) %d\n",
1851 (int) req->sectorOffset, (int) req->numSector,
1852 (int) (req->numSector <<
1853 queue->raidPtr->logBytesPerSector),
1854 (int) queue->raidPtr->logBytesPerSector));
1855 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1856 raidbp->rf_buf.b_vp->v_numoutput++;
1857 }
1858 VOP_STRATEGY(&raidbp->rf_buf);
1859
1860 break;
1861
1862 default:
1863 panic("bad req->type in rf_DispatchKernelIO");
1864 }
1865 db1_printf(("Exiting from DispatchKernelIO\n"));
1866 /* splx(s); */ /* want to test this */
1867 return (0);
1868 }
1869 /* this is the callback function associated with a I/O invoked from
1870 kernel code.
1871 */
1872 static void
1873 KernelWakeupFunc(vbp)
1874 struct buf *vbp;
1875 {
1876 RF_DiskQueueData_t *req = NULL;
1877 RF_DiskQueue_t *queue;
1878 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1879 struct buf *bp;
1880 struct raid_softc *rs;
1881 int unit;
1882 int s;
1883
1884 s = splbio();
1885 db1_printf(("recovering the request queue:\n"));
1886 req = raidbp->req;
1887
1888 bp = raidbp->rf_obp;
1889
1890 queue = (RF_DiskQueue_t *) req->queue;
1891
1892 if (raidbp->rf_buf.b_flags & B_ERROR) {
1893 bp->b_flags |= B_ERROR;
1894 bp->b_error = raidbp->rf_buf.b_error ?
1895 raidbp->rf_buf.b_error : EIO;
1896 }
1897
1898 /* XXX methinks this could be wrong... */
1899 #if 1
1900 bp->b_resid = raidbp->rf_buf.b_resid;
1901 #endif
1902
1903 if (req->tracerec) {
1904 RF_ETIMER_STOP(req->tracerec->timer);
1905 RF_ETIMER_EVAL(req->tracerec->timer);
1906 RF_LOCK_MUTEX(rf_tracing_mutex);
1907 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1908 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1909 req->tracerec->num_phys_ios++;
1910 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1911 }
1912 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1913
1914 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1915
1916
1917 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1918 * ballistic, and mark the component as hosed... */
1919
1920 if (bp->b_flags & B_ERROR) {
1921 /* Mark the disk as dead */
1922 /* but only mark it once... */
1923 if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1924 rf_ds_optimal) {
1925 printf("raid%d: IO Error. Marking %s as failed.\n",
1926 unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1927 queue->raidPtr->Disks[queue->row][queue->col].status =
1928 rf_ds_failed;
1929 queue->raidPtr->status[queue->row] = rf_rs_degraded;
1930 queue->raidPtr->numFailures++;
1931 queue->raidPtr->numNewFailures++;
1932 } else { /* Disk is already dead... */
1933 /* printf("Disk already marked as dead!\n"); */
1934 }
1935
1936 }
1937
1938 rs = &raid_softc[unit];
1939 RAIDPUTBUF(rs, raidbp);
1940
1941 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1942 (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1943
1944 splx(s);
1945 }
1946
1947
1948
1949 /*
1950 * initialize a buf structure for doing an I/O in the kernel.
1951 */
1952 static void
1953 InitBP(bp, b_vp, rw_flag, dev, startSect, numSect, buf, cbFunc, cbArg,
1954 logBytesPerSector, b_proc)
1955 struct buf *bp;
1956 struct vnode *b_vp;
1957 unsigned rw_flag;
1958 dev_t dev;
1959 RF_SectorNum_t startSect;
1960 RF_SectorCount_t numSect;
1961 caddr_t buf;
1962 void (*cbFunc) (struct buf *);
1963 void *cbArg;
1964 int logBytesPerSector;
1965 struct proc *b_proc;
1966 {
1967 /* bp->b_flags = B_PHYS | rw_flag; */
1968 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1969 bp->b_bcount = numSect << logBytesPerSector;
1970 bp->b_bufsize = bp->b_bcount;
1971 bp->b_error = 0;
1972 bp->b_dev = dev;
1973 bp->b_data = buf;
1974 bp->b_blkno = startSect;
1975 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1976 if (bp->b_bcount == 0) {
1977 panic("bp->b_bcount is zero in InitBP!!\n");
1978 }
1979 bp->b_proc = b_proc;
1980 bp->b_iodone = cbFunc;
1981 bp->b_vp = b_vp;
1982
1983 }
1984
1985 static void
1986 raidgetdefaultlabel(raidPtr, rs, lp)
1987 RF_Raid_t *raidPtr;
1988 struct raid_softc *rs;
1989 struct disklabel *lp;
1990 {
1991 db1_printf(("Building a default label...\n"));
1992 memset(lp, 0, sizeof(*lp));
1993
1994 /* fabricate a label... */
1995 lp->d_secperunit = raidPtr->totalSectors;
1996 lp->d_secsize = raidPtr->bytesPerSector;
1997 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
1998 lp->d_ntracks = 4 * raidPtr->numCol;
1999 lp->d_ncylinders = raidPtr->totalSectors /
2000 (lp->d_nsectors * lp->d_ntracks);
2001 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2002
2003 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2004 lp->d_type = DTYPE_RAID;
2005 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2006 lp->d_rpm = 3600;
2007 lp->d_interleave = 1;
2008 lp->d_flags = 0;
2009
2010 lp->d_partitions[RAW_PART].p_offset = 0;
2011 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2012 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2013 lp->d_npartitions = RAW_PART + 1;
2014
2015 lp->d_magic = DISKMAGIC;
2016 lp->d_magic2 = DISKMAGIC;
2017 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2018
2019 }
2020 /*
2021 * Read the disklabel from the raid device. If one is not present, fake one
2022 * up.
2023 */
2024 static void
2025 raidgetdisklabel(dev)
2026 dev_t dev;
2027 {
2028 int unit = raidunit(dev);
2029 struct raid_softc *rs = &raid_softc[unit];
2030 char *errstring;
2031 struct disklabel *lp = rs->sc_dkdev.dk_label;
2032 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2033 RF_Raid_t *raidPtr;
2034
2035 db1_printf(("Getting the disklabel...\n"));
2036
2037 memset(clp, 0, sizeof(*clp));
2038
2039 raidPtr = raidPtrs[unit];
2040
2041 raidgetdefaultlabel(raidPtr, rs, lp);
2042
2043 /*
2044 * Call the generic disklabel extraction routine.
2045 */
2046 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2047 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2048 if (errstring)
2049 raidmakedisklabel(rs);
2050 else {
2051 int i;
2052 struct partition *pp;
2053
2054 /*
2055 * Sanity check whether the found disklabel is valid.
2056 *
2057 * This is necessary since total size of the raid device
2058 * may vary when an interleave is changed even though exactly
2059 * same componets are used, and old disklabel may used
2060 * if that is found.
2061 */
2062 if (lp->d_secperunit != rs->sc_size)
2063 printf("raid%d: WARNING: %s: "
2064 "total sector size in disklabel (%d) != "
2065 "the size of raid (%ld)\n", unit, rs->sc_xname,
2066 lp->d_secperunit, (long) rs->sc_size);
2067 for (i = 0; i < lp->d_npartitions; i++) {
2068 pp = &lp->d_partitions[i];
2069 if (pp->p_offset + pp->p_size > rs->sc_size)
2070 printf("raid%d: WARNING: %s: end of partition `%c' "
2071 "exceeds the size of raid (%ld)\n",
2072 unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2073 }
2074 }
2075
2076 }
2077 /*
2078 * Take care of things one might want to take care of in the event
2079 * that a disklabel isn't present.
2080 */
2081 static void
2082 raidmakedisklabel(rs)
2083 struct raid_softc *rs;
2084 {
2085 struct disklabel *lp = rs->sc_dkdev.dk_label;
2086 db1_printf(("Making a label..\n"));
2087
2088 /*
2089 * For historical reasons, if there's no disklabel present
2090 * the raw partition must be marked FS_BSDFFS.
2091 */
2092
2093 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2094
2095 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2096
2097 lp->d_checksum = dkcksum(lp);
2098 }
2099 /*
2100 * Lookup the provided name in the filesystem. If the file exists,
2101 * is a valid block device, and isn't being used by anyone else,
2102 * set *vpp to the file's vnode.
2103 * You'll find the original of this in ccd.c
2104 */
2105 int
2106 raidlookup(path, p, vpp)
2107 char *path;
2108 struct proc *p;
2109 struct vnode **vpp; /* result */
2110 {
2111 struct nameidata nd;
2112 struct vnode *vp;
2113 struct vattr va;
2114 int error;
2115
2116 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2117 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2118 #if 0
2119 printf("RAIDframe: vn_open returned %d\n", error);
2120 #endif
2121 return (error);
2122 }
2123 vp = nd.ni_vp;
2124 if (vp->v_usecount > 1) {
2125 VOP_UNLOCK(vp, 0);
2126 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2127 return (EBUSY);
2128 }
2129 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2130 VOP_UNLOCK(vp, 0);
2131 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2132 return (error);
2133 }
2134 /* XXX: eventually we should handle VREG, too. */
2135 if (va.va_type != VBLK) {
2136 VOP_UNLOCK(vp, 0);
2137 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2138 return (ENOTBLK);
2139 }
2140 VOP_UNLOCK(vp, 0);
2141 *vpp = vp;
2142 return (0);
2143 }
2144 /*
2145 * Wait interruptibly for an exclusive lock.
2146 *
2147 * XXX
2148 * Several drivers do this; it should be abstracted and made MP-safe.
2149 * (Hmm... where have we seen this warning before :-> GO )
2150 */
2151 static int
2152 raidlock(rs)
2153 struct raid_softc *rs;
2154 {
2155 int error;
2156
2157 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2158 rs->sc_flags |= RAIDF_WANTED;
2159 if ((error =
2160 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2161 return (error);
2162 }
2163 rs->sc_flags |= RAIDF_LOCKED;
2164 return (0);
2165 }
2166 /*
2167 * Unlock and wake up any waiters.
2168 */
2169 static void
2170 raidunlock(rs)
2171 struct raid_softc *rs;
2172 {
2173
2174 rs->sc_flags &= ~RAIDF_LOCKED;
2175 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2176 rs->sc_flags &= ~RAIDF_WANTED;
2177 wakeup(rs);
2178 }
2179 }
2180
2181
2182 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2183 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2184
2185 int
2186 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2187 {
2188 RF_ComponentLabel_t clabel;
2189 raidread_component_label(dev, b_vp, &clabel);
2190 clabel.mod_counter = mod_counter;
2191 clabel.clean = RF_RAID_CLEAN;
2192 raidwrite_component_label(dev, b_vp, &clabel);
2193 return(0);
2194 }
2195
2196
2197 int
2198 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2199 {
2200 RF_ComponentLabel_t clabel;
2201 raidread_component_label(dev, b_vp, &clabel);
2202 clabel.mod_counter = mod_counter;
2203 clabel.clean = RF_RAID_DIRTY;
2204 raidwrite_component_label(dev, b_vp, &clabel);
2205 return(0);
2206 }
2207
2208 /* ARGSUSED */
2209 int
2210 raidread_component_label(dev, b_vp, clabel)
2211 dev_t dev;
2212 struct vnode *b_vp;
2213 RF_ComponentLabel_t *clabel;
2214 {
2215 struct buf *bp;
2216 int error;
2217
2218 /* XXX should probably ensure that we don't try to do this if
2219 someone has changed rf_protected_sectors. */
2220
2221 if (b_vp == NULL) {
2222 /* For whatever reason, this component is not valid.
2223 Don't try to read a component label from it. */
2224 return(EINVAL);
2225 }
2226
2227 /* get a block of the appropriate size... */
2228 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2229 bp->b_dev = dev;
2230
2231 /* get our ducks in a row for the read */
2232 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2233 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2234 bp->b_flags |= B_READ;
2235 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2236
2237 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2238
2239 error = biowait(bp);
2240
2241 if (!error) {
2242 memcpy(clabel, bp->b_data,
2243 sizeof(RF_ComponentLabel_t));
2244 #if 0
2245 rf_print_component_label( clabel );
2246 #endif
2247 } else {
2248 #if 0
2249 printf("Failed to read RAID component label!\n");
2250 #endif
2251 }
2252
2253 brelse(bp);
2254 return(error);
2255 }
2256 /* ARGSUSED */
2257 int
2258 raidwrite_component_label(dev, b_vp, clabel)
2259 dev_t dev;
2260 struct vnode *b_vp;
2261 RF_ComponentLabel_t *clabel;
2262 {
2263 struct buf *bp;
2264 int error;
2265
2266 /* get a block of the appropriate size... */
2267 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2268 bp->b_dev = dev;
2269
2270 /* get our ducks in a row for the write */
2271 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2272 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2273 bp->b_flags |= B_WRITE;
2274 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2275
2276 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2277
2278 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2279
2280 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2281 error = biowait(bp);
2282 brelse(bp);
2283 if (error) {
2284 #if 1
2285 printf("Failed to write RAID component info!\n");
2286 #endif
2287 }
2288
2289 return(error);
2290 }
2291
2292 void
2293 rf_markalldirty(raidPtr)
2294 RF_Raid_t *raidPtr;
2295 {
2296 RF_ComponentLabel_t clabel;
2297 int r,c;
2298
2299 raidPtr->mod_counter++;
2300 for (r = 0; r < raidPtr->numRow; r++) {
2301 for (c = 0; c < raidPtr->numCol; c++) {
2302 /* we don't want to touch (at all) a disk that has
2303 failed */
2304 if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
2305 raidread_component_label(
2306 raidPtr->Disks[r][c].dev,
2307 raidPtr->raid_cinfo[r][c].ci_vp,
2308 &clabel);
2309 if (clabel.status == rf_ds_spared) {
2310 /* XXX do something special...
2311 but whatever you do, don't
2312 try to access it!! */
2313 } else {
2314 #if 0
2315 clabel.status =
2316 raidPtr->Disks[r][c].status;
2317 raidwrite_component_label(
2318 raidPtr->Disks[r][c].dev,
2319 raidPtr->raid_cinfo[r][c].ci_vp,
2320 &clabel);
2321 #endif
2322 raidmarkdirty(
2323 raidPtr->Disks[r][c].dev,
2324 raidPtr->raid_cinfo[r][c].ci_vp,
2325 raidPtr->mod_counter);
2326 }
2327 }
2328 }
2329 }
2330 /* printf("Component labels marked dirty.\n"); */
2331 #if 0
2332 for( c = 0; c < raidPtr->numSpare ; c++) {
2333 sparecol = raidPtr->numCol + c;
2334 if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2335 /*
2336
2337 XXX this is where we get fancy and map this spare
2338 into it's correct spot in the array.
2339
2340 */
2341 /*
2342
2343 we claim this disk is "optimal" if it's
2344 rf_ds_used_spare, as that means it should be
2345 directly substitutable for the disk it replaced.
2346 We note that too...
2347
2348 */
2349
2350 for(i=0;i<raidPtr->numRow;i++) {
2351 for(j=0;j<raidPtr->numCol;j++) {
2352 if ((raidPtr->Disks[i][j].spareRow ==
2353 r) &&
2354 (raidPtr->Disks[i][j].spareCol ==
2355 sparecol)) {
2356 srow = r;
2357 scol = sparecol;
2358 break;
2359 }
2360 }
2361 }
2362
2363 raidread_component_label(
2364 raidPtr->Disks[r][sparecol].dev,
2365 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2366 &clabel);
2367 /* make sure status is noted */
2368 clabel.version = RF_COMPONENT_LABEL_VERSION;
2369 clabel.mod_counter = raidPtr->mod_counter;
2370 clabel.serial_number = raidPtr->serial_number;
2371 clabel.row = srow;
2372 clabel.column = scol;
2373 clabel.num_rows = raidPtr->numRow;
2374 clabel.num_columns = raidPtr->numCol;
2375 clabel.clean = RF_RAID_DIRTY; /* changed in a bit*/
2376 clabel.status = rf_ds_optimal;
2377 raidwrite_component_label(
2378 raidPtr->Disks[r][sparecol].dev,
2379 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2380 &clabel);
2381 raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2382 raidPtr->raid_cinfo[r][sparecol].ci_vp);
2383 }
2384 }
2385
2386 #endif
2387 }
2388
2389
2390 void
2391 rf_update_component_labels(raidPtr, final)
2392 RF_Raid_t *raidPtr;
2393 int final;
2394 {
2395 RF_ComponentLabel_t clabel;
2396 int sparecol;
2397 int r,c;
2398 int i,j;
2399 int srow, scol;
2400
2401 srow = -1;
2402 scol = -1;
2403
2404 /* XXX should do extra checks to make sure things really are clean,
2405 rather than blindly setting the clean bit... */
2406
2407 raidPtr->mod_counter++;
2408
2409 for (r = 0; r < raidPtr->numRow; r++) {
2410 for (c = 0; c < raidPtr->numCol; c++) {
2411 if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2412 raidread_component_label(
2413 raidPtr->Disks[r][c].dev,
2414 raidPtr->raid_cinfo[r][c].ci_vp,
2415 &clabel);
2416 /* make sure status is noted */
2417 clabel.status = rf_ds_optimal;
2418 /* bump the counter */
2419 clabel.mod_counter = raidPtr->mod_counter;
2420
2421 raidwrite_component_label(
2422 raidPtr->Disks[r][c].dev,
2423 raidPtr->raid_cinfo[r][c].ci_vp,
2424 &clabel);
2425 if (final == RF_FINAL_COMPONENT_UPDATE) {
2426 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2427 raidmarkclean(
2428 raidPtr->Disks[r][c].dev,
2429 raidPtr->raid_cinfo[r][c].ci_vp,
2430 raidPtr->mod_counter);
2431 }
2432 }
2433 }
2434 /* else we don't touch it.. */
2435 }
2436 }
2437
2438 for( c = 0; c < raidPtr->numSpare ; c++) {
2439 sparecol = raidPtr->numCol + c;
2440 /* Need to ensure that the reconstruct actually completed! */
2441 if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2442 /*
2443
2444 we claim this disk is "optimal" if it's
2445 rf_ds_used_spare, as that means it should be
2446 directly substitutable for the disk it replaced.
2447 We note that too...
2448
2449 */
2450
2451 for(i=0;i<raidPtr->numRow;i++) {
2452 for(j=0;j<raidPtr->numCol;j++) {
2453 if ((raidPtr->Disks[i][j].spareRow ==
2454 0) &&
2455 (raidPtr->Disks[i][j].spareCol ==
2456 sparecol)) {
2457 srow = i;
2458 scol = j;
2459 break;
2460 }
2461 }
2462 }
2463
2464 /* XXX shouldn't *really* need this... */
2465 raidread_component_label(
2466 raidPtr->Disks[0][sparecol].dev,
2467 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2468 &clabel);
2469 /* make sure status is noted */
2470
2471 raid_init_component_label(raidPtr, &clabel);
2472
2473 clabel.mod_counter = raidPtr->mod_counter;
2474 clabel.row = srow;
2475 clabel.column = scol;
2476 clabel.status = rf_ds_optimal;
2477
2478 raidwrite_component_label(
2479 raidPtr->Disks[0][sparecol].dev,
2480 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2481 &clabel);
2482 if (final == RF_FINAL_COMPONENT_UPDATE) {
2483 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2484 raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2485 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2486 raidPtr->mod_counter);
2487 }
2488 }
2489 }
2490 }
2491 /* printf("Component labels updated\n"); */
2492 }
2493
2494 void
2495 rf_close_component(raidPtr, vp, auto_configured)
2496 RF_Raid_t *raidPtr;
2497 struct vnode *vp;
2498 int auto_configured;
2499 {
2500 struct proc *p;
2501
2502 p = raidPtr->engine_thread;
2503
2504 if (vp != NULL) {
2505 if (auto_configured == 1) {
2506 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2507 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2508 vput(vp);
2509
2510 } else {
2511 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2512 }
2513 } else {
2514 #if 0
2515 printf("vnode was NULL\n");
2516 #endif
2517 }
2518 }
2519
2520
2521 void
2522 rf_UnconfigureVnodes(raidPtr)
2523 RF_Raid_t *raidPtr;
2524 {
2525 int r,c;
2526 struct proc *p;
2527 struct vnode *vp;
2528 int acd;
2529
2530
2531 /* We take this opportunity to close the vnodes like we should.. */
2532
2533 p = raidPtr->engine_thread;
2534
2535 for (r = 0; r < raidPtr->numRow; r++) {
2536 for (c = 0; c < raidPtr->numCol; c++) {
2537 #if 0
2538 printf("raid%d: Closing vnode for row: %d col: %d\n",
2539 raidPtr->raidid, r, c);
2540 #endif
2541 vp = raidPtr->raid_cinfo[r][c].ci_vp;
2542 acd = raidPtr->Disks[r][c].auto_configured;
2543 rf_close_component(raidPtr, vp, acd);
2544 raidPtr->raid_cinfo[r][c].ci_vp = NULL;
2545 raidPtr->Disks[r][c].auto_configured = 0;
2546 }
2547 }
2548 for (r = 0; r < raidPtr->numSpare; r++) {
2549 #if 0
2550 printf("raid%d: Closing vnode for spare: %d\n",
2551 raidPtr->raidid, r);
2552 #endif
2553 vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
2554 acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
2555 rf_close_component(raidPtr, vp, acd);
2556 raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
2557 raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
2558 }
2559 }
2560
2561
2562 void
2563 rf_ReconThread(req)
2564 struct rf_recon_req *req;
2565 {
2566 int s;
2567 RF_Raid_t *raidPtr;
2568
2569 s = splbio();
2570 raidPtr = (RF_Raid_t *) req->raidPtr;
2571 raidPtr->recon_in_progress = 1;
2572
2573 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2574 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2575
2576 /* XXX get rid of this! we don't need it at all.. */
2577 RF_Free(req, sizeof(*req));
2578
2579 raidPtr->recon_in_progress = 0;
2580 splx(s);
2581
2582 /* That's all... */
2583 kthread_exit(0); /* does not return */
2584 }
2585
2586 void
2587 rf_RewriteParityThread(raidPtr)
2588 RF_Raid_t *raidPtr;
2589 {
2590 int retcode;
2591 int s;
2592
2593 raidPtr->parity_rewrite_in_progress = 1;
2594 s = splbio();
2595 retcode = rf_RewriteParity(raidPtr);
2596 splx(s);
2597 if (retcode) {
2598 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2599 } else {
2600 /* set the clean bit! If we shutdown correctly,
2601 the clean bit on each component label will get
2602 set */
2603 raidPtr->parity_good = RF_RAID_CLEAN;
2604 }
2605 raidPtr->parity_rewrite_in_progress = 0;
2606
2607 /* Anyone waiting for us to stop? If so, inform them... */
2608 if (raidPtr->waitShutdown) {
2609 wakeup(&raidPtr->parity_rewrite_in_progress);
2610 }
2611
2612 /* That's all... */
2613 kthread_exit(0); /* does not return */
2614 }
2615
2616
2617 void
2618 rf_CopybackThread(raidPtr)
2619 RF_Raid_t *raidPtr;
2620 {
2621 int s;
2622
2623 raidPtr->copyback_in_progress = 1;
2624 s = splbio();
2625 rf_CopybackReconstructedData(raidPtr);
2626 splx(s);
2627 raidPtr->copyback_in_progress = 0;
2628
2629 /* That's all... */
2630 kthread_exit(0); /* does not return */
2631 }
2632
2633
2634 void
2635 rf_ReconstructInPlaceThread(req)
2636 struct rf_recon_req *req;
2637 {
2638 int retcode;
2639 int s;
2640 RF_Raid_t *raidPtr;
2641
2642 s = splbio();
2643 raidPtr = req->raidPtr;
2644 raidPtr->recon_in_progress = 1;
2645 retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2646 RF_Free(req, sizeof(*req));
2647 raidPtr->recon_in_progress = 0;
2648 splx(s);
2649
2650 /* That's all... */
2651 kthread_exit(0); /* does not return */
2652 }
2653
2654 void
2655 rf_mountroot_hook(dev)
2656 struct device *dev;
2657 {
2658
2659 }
2660
2661
2662 RF_AutoConfig_t *
2663 rf_find_raid_components()
2664 {
2665 struct devnametobdevmaj *dtobdm;
2666 struct vnode *vp;
2667 struct disklabel label;
2668 struct device *dv;
2669 char *cd_name;
2670 dev_t dev;
2671 int error;
2672 int i;
2673 int good_one;
2674 RF_ComponentLabel_t *clabel;
2675 RF_AutoConfig_t *ac_list;
2676 RF_AutoConfig_t *ac;
2677
2678
2679 /* initialize the AutoConfig list */
2680 ac_list = NULL;
2681
2682 /* we begin by trolling through *all* the devices on the system */
2683
2684 for (dv = alldevs.tqh_first; dv != NULL;
2685 dv = dv->dv_list.tqe_next) {
2686
2687 /* we are only interested in disks... */
2688 if (dv->dv_class != DV_DISK)
2689 continue;
2690
2691 /* we don't care about floppies... */
2692 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fd")) {
2693 continue;
2694 }
2695 /* hdfd is the Atari/Hades floppy driver */
2696 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"hdfd")) {
2697 continue;
2698 }
2699 /* fdisa is the Atari/Milan floppy driver */
2700 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fdisa")) {
2701 continue;
2702 }
2703
2704 /* need to find the device_name_to_block_device_major stuff */
2705 cd_name = dv->dv_cfdata->cf_driver->cd_name;
2706 dtobdm = dev_name2blk;
2707 while (dtobdm->d_name && strcmp(dtobdm->d_name, cd_name)) {
2708 dtobdm++;
2709 }
2710
2711 /* get a vnode for the raw partition of this disk */
2712
2713 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, RAW_PART);
2714 if (bdevvp(dev, &vp))
2715 panic("RAID can't alloc vnode");
2716
2717 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2718
2719 if (error) {
2720 /* "Who cares." Continue looking
2721 for something that exists*/
2722 vput(vp);
2723 continue;
2724 }
2725
2726 /* Ok, the disk exists. Go get the disklabel. */
2727 error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
2728 FREAD, NOCRED, 0);
2729 if (error) {
2730 /*
2731 * XXX can't happen - open() would
2732 * have errored out (or faked up one)
2733 */
2734 printf("can't get label for dev %s%c (%d)!?!?\n",
2735 dv->dv_xname, 'a' + RAW_PART, error);
2736 }
2737
2738 /* don't need this any more. We'll allocate it again
2739 a little later if we really do... */
2740 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2741 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2742 vput(vp);
2743
2744 for (i=0; i < label.d_npartitions; i++) {
2745 /* We only support partitions marked as RAID */
2746 if (label.d_partitions[i].p_fstype != FS_RAID)
2747 continue;
2748
2749 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, i);
2750 if (bdevvp(dev, &vp))
2751 panic("RAID can't alloc vnode");
2752
2753 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2754 if (error) {
2755 /* Whatever... */
2756 vput(vp);
2757 continue;
2758 }
2759
2760 good_one = 0;
2761
2762 clabel = (RF_ComponentLabel_t *)
2763 malloc(sizeof(RF_ComponentLabel_t),
2764 M_RAIDFRAME, M_NOWAIT);
2765 if (clabel == NULL) {
2766 /* XXX CLEANUP HERE */
2767 printf("RAID auto config: out of memory!\n");
2768 return(NULL); /* XXX probably should panic? */
2769 }
2770
2771 if (!raidread_component_label(dev, vp, clabel)) {
2772 /* Got the label. Does it look reasonable? */
2773 if (rf_reasonable_label(clabel) &&
2774 (clabel->partitionSize <=
2775 label.d_partitions[i].p_size)) {
2776 #if DEBUG
2777 printf("Component on: %s%c: %d\n",
2778 dv->dv_xname, 'a'+i,
2779 label.d_partitions[i].p_size);
2780 rf_print_component_label(clabel);
2781 #endif
2782 /* if it's reasonable, add it,
2783 else ignore it. */
2784 ac = (RF_AutoConfig_t *)
2785 malloc(sizeof(RF_AutoConfig_t),
2786 M_RAIDFRAME,
2787 M_NOWAIT);
2788 if (ac == NULL) {
2789 /* XXX should panic?? */
2790 return(NULL);
2791 }
2792
2793 sprintf(ac->devname, "%s%c",
2794 dv->dv_xname, 'a'+i);
2795 ac->dev = dev;
2796 ac->vp = vp;
2797 ac->clabel = clabel;
2798 ac->next = ac_list;
2799 ac_list = ac;
2800 good_one = 1;
2801 }
2802 }
2803 if (!good_one) {
2804 /* cleanup */
2805 free(clabel, M_RAIDFRAME);
2806 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2807 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2808 vput(vp);
2809 }
2810 }
2811 }
2812 return(ac_list);
2813 }
2814
2815 static int
2816 rf_reasonable_label(clabel)
2817 RF_ComponentLabel_t *clabel;
2818 {
2819
2820 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2821 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2822 ((clabel->clean == RF_RAID_CLEAN) ||
2823 (clabel->clean == RF_RAID_DIRTY)) &&
2824 clabel->row >=0 &&
2825 clabel->column >= 0 &&
2826 clabel->num_rows > 0 &&
2827 clabel->num_columns > 0 &&
2828 clabel->row < clabel->num_rows &&
2829 clabel->column < clabel->num_columns &&
2830 clabel->blockSize > 0 &&
2831 clabel->numBlocks > 0) {
2832 /* label looks reasonable enough... */
2833 return(1);
2834 }
2835 return(0);
2836 }
2837
2838
2839 void
2840 rf_print_component_label(clabel)
2841 RF_ComponentLabel_t *clabel;
2842 {
2843 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2844 clabel->row, clabel->column,
2845 clabel->num_rows, clabel->num_columns);
2846 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2847 clabel->version, clabel->serial_number,
2848 clabel->mod_counter);
2849 printf(" Clean: %s Status: %d\n",
2850 clabel->clean ? "Yes" : "No", clabel->status );
2851 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2852 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2853 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2854 (char) clabel->parityConfig, clabel->blockSize,
2855 clabel->numBlocks);
2856 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2857 printf(" Contains root partition: %s\n",
2858 clabel->root_partition ? "Yes" : "No" );
2859 printf(" Last configured as: raid%d\n", clabel->last_unit );
2860 #if 0
2861 printf(" Config order: %d\n", clabel->config_order);
2862 #endif
2863
2864 }
2865
2866 RF_ConfigSet_t *
2867 rf_create_auto_sets(ac_list)
2868 RF_AutoConfig_t *ac_list;
2869 {
2870 RF_AutoConfig_t *ac;
2871 RF_ConfigSet_t *config_sets;
2872 RF_ConfigSet_t *cset;
2873 RF_AutoConfig_t *ac_next;
2874
2875
2876 config_sets = NULL;
2877
2878 /* Go through the AutoConfig list, and figure out which components
2879 belong to what sets. */
2880 ac = ac_list;
2881 while(ac!=NULL) {
2882 /* we're going to putz with ac->next, so save it here
2883 for use at the end of the loop */
2884 ac_next = ac->next;
2885
2886 if (config_sets == NULL) {
2887 /* will need at least this one... */
2888 config_sets = (RF_ConfigSet_t *)
2889 malloc(sizeof(RF_ConfigSet_t),
2890 M_RAIDFRAME, M_NOWAIT);
2891 if (config_sets == NULL) {
2892 panic("rf_create_auto_sets: No memory!\n");
2893 }
2894 /* this one is easy :) */
2895 config_sets->ac = ac;
2896 config_sets->next = NULL;
2897 config_sets->rootable = 0;
2898 ac->next = NULL;
2899 } else {
2900 /* which set does this component fit into? */
2901 cset = config_sets;
2902 while(cset!=NULL) {
2903 if (rf_does_it_fit(cset, ac)) {
2904 /* looks like it matches... */
2905 ac->next = cset->ac;
2906 cset->ac = ac;
2907 break;
2908 }
2909 cset = cset->next;
2910 }
2911 if (cset==NULL) {
2912 /* didn't find a match above... new set..*/
2913 cset = (RF_ConfigSet_t *)
2914 malloc(sizeof(RF_ConfigSet_t),
2915 M_RAIDFRAME, M_NOWAIT);
2916 if (cset == NULL) {
2917 panic("rf_create_auto_sets: No memory!\n");
2918 }
2919 cset->ac = ac;
2920 ac->next = NULL;
2921 cset->next = config_sets;
2922 cset->rootable = 0;
2923 config_sets = cset;
2924 }
2925 }
2926 ac = ac_next;
2927 }
2928
2929
2930 return(config_sets);
2931 }
2932
2933 static int
2934 rf_does_it_fit(cset, ac)
2935 RF_ConfigSet_t *cset;
2936 RF_AutoConfig_t *ac;
2937 {
2938 RF_ComponentLabel_t *clabel1, *clabel2;
2939
2940 /* If this one matches the *first* one in the set, that's good
2941 enough, since the other members of the set would have been
2942 through here too... */
2943 /* note that we are not checking partitionSize here..
2944
2945 Note that we are also not checking the mod_counters here.
2946 If everything else matches execpt the mod_counter, that's
2947 good enough for this test. We will deal with the mod_counters
2948 a little later in the autoconfiguration process.
2949
2950 (clabel1->mod_counter == clabel2->mod_counter) &&
2951
2952 The reason we don't check for this is that failed disks
2953 will have lower modification counts. If those disks are
2954 not added to the set they used to belong to, then they will
2955 form their own set, which may result in 2 different sets,
2956 for example, competing to be configured at raid0, and
2957 perhaps competing to be the root filesystem set. If the
2958 wrong ones get configured, or both attempt to become /,
2959 weird behaviour and or serious lossage will occur. Thus we
2960 need to bring them into the fold here, and kick them out at
2961 a later point.
2962
2963 */
2964
2965 clabel1 = cset->ac->clabel;
2966 clabel2 = ac->clabel;
2967 if ((clabel1->version == clabel2->version) &&
2968 (clabel1->serial_number == clabel2->serial_number) &&
2969 (clabel1->num_rows == clabel2->num_rows) &&
2970 (clabel1->num_columns == clabel2->num_columns) &&
2971 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2972 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2973 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2974 (clabel1->parityConfig == clabel2->parityConfig) &&
2975 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2976 (clabel1->blockSize == clabel2->blockSize) &&
2977 (clabel1->numBlocks == clabel2->numBlocks) &&
2978 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2979 (clabel1->root_partition == clabel2->root_partition) &&
2980 (clabel1->last_unit == clabel2->last_unit) &&
2981 (clabel1->config_order == clabel2->config_order)) {
2982 /* if it get's here, it almost *has* to be a match */
2983 } else {
2984 /* it's not consistent with somebody in the set..
2985 punt */
2986 return(0);
2987 }
2988 /* all was fine.. it must fit... */
2989 return(1);
2990 }
2991
2992 int
2993 rf_have_enough_components(cset)
2994 RF_ConfigSet_t *cset;
2995 {
2996 RF_AutoConfig_t *ac;
2997 RF_AutoConfig_t *auto_config;
2998 RF_ComponentLabel_t *clabel;
2999 int r,c;
3000 int num_rows;
3001 int num_cols;
3002 int num_missing;
3003 int mod_counter;
3004 int mod_counter_found;
3005 int even_pair_failed;
3006 char parity_type;
3007
3008
3009 /* check to see that we have enough 'live' components
3010 of this set. If so, we can configure it if necessary */
3011
3012 num_rows = cset->ac->clabel->num_rows;
3013 num_cols = cset->ac->clabel->num_columns;
3014 parity_type = cset->ac->clabel->parityConfig;
3015
3016 /* XXX Check for duplicate components!?!?!? */
3017
3018 /* Determine what the mod_counter is supposed to be for this set. */
3019
3020 mod_counter_found = 0;
3021 mod_counter = 0;
3022 ac = cset->ac;
3023 while(ac!=NULL) {
3024 if (mod_counter_found==0) {
3025 mod_counter = ac->clabel->mod_counter;
3026 mod_counter_found = 1;
3027 } else {
3028 if (ac->clabel->mod_counter > mod_counter) {
3029 mod_counter = ac->clabel->mod_counter;
3030 }
3031 }
3032 ac = ac->next;
3033 }
3034
3035 num_missing = 0;
3036 auto_config = cset->ac;
3037
3038 for(r=0; r<num_rows; r++) {
3039 even_pair_failed = 0;
3040 for(c=0; c<num_cols; c++) {
3041 ac = auto_config;
3042 while(ac!=NULL) {
3043 if ((ac->clabel->row == r) &&
3044 (ac->clabel->column == c) &&
3045 (ac->clabel->mod_counter == mod_counter)) {
3046 /* it's this one... */
3047 #if DEBUG
3048 printf("Found: %s at %d,%d\n",
3049 ac->devname,r,c);
3050 #endif
3051 break;
3052 }
3053 ac=ac->next;
3054 }
3055 if (ac==NULL) {
3056 /* Didn't find one here! */
3057 /* special case for RAID 1, especially
3058 where there are more than 2
3059 components (where RAIDframe treats
3060 things a little differently :( ) */
3061 if (parity_type == '1') {
3062 if (c%2 == 0) { /* even component */
3063 even_pair_failed = 1;
3064 } else { /* odd component. If
3065 we're failed, and
3066 so is the even
3067 component, it's
3068 "Good Night, Charlie" */
3069 if (even_pair_failed == 1) {
3070 return(0);
3071 }
3072 }
3073 } else {
3074 /* normal accounting */
3075 num_missing++;
3076 }
3077 }
3078 if ((parity_type == '1') && (c%2 == 1)) {
3079 /* Just did an even component, and we didn't
3080 bail.. reset the even_pair_failed flag,
3081 and go on to the next component.... */
3082 even_pair_failed = 0;
3083 }
3084 }
3085 }
3086
3087 clabel = cset->ac->clabel;
3088
3089 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3090 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3091 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3092 /* XXX this needs to be made *much* more general */
3093 /* Too many failures */
3094 return(0);
3095 }
3096 /* otherwise, all is well, and we've got enough to take a kick
3097 at autoconfiguring this set */
3098 return(1);
3099 }
3100
3101 void
3102 rf_create_configuration(ac,config,raidPtr)
3103 RF_AutoConfig_t *ac;
3104 RF_Config_t *config;
3105 RF_Raid_t *raidPtr;
3106 {
3107 RF_ComponentLabel_t *clabel;
3108 int i;
3109
3110 clabel = ac->clabel;
3111
3112 /* 1. Fill in the common stuff */
3113 config->numRow = clabel->num_rows;
3114 config->numCol = clabel->num_columns;
3115 config->numSpare = 0; /* XXX should this be set here? */
3116 config->sectPerSU = clabel->sectPerSU;
3117 config->SUsPerPU = clabel->SUsPerPU;
3118 config->SUsPerRU = clabel->SUsPerRU;
3119 config->parityConfig = clabel->parityConfig;
3120 /* XXX... */
3121 strcpy(config->diskQueueType,"fifo");
3122 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3123 config->layoutSpecificSize = 0; /* XXX ?? */
3124
3125 while(ac!=NULL) {
3126 /* row/col values will be in range due to the checks
3127 in reasonable_label() */
3128 strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
3129 ac->devname);
3130 ac = ac->next;
3131 }
3132
3133 for(i=0;i<RF_MAXDBGV;i++) {
3134 config->debugVars[i][0] = NULL;
3135 }
3136 }
3137
3138 int
3139 rf_set_autoconfig(raidPtr, new_value)
3140 RF_Raid_t *raidPtr;
3141 int new_value;
3142 {
3143 RF_ComponentLabel_t clabel;
3144 struct vnode *vp;
3145 dev_t dev;
3146 int row, column;
3147
3148 raidPtr->autoconfigure = new_value;
3149 for(row=0; row<raidPtr->numRow; row++) {
3150 for(column=0; column<raidPtr->numCol; column++) {
3151 if (raidPtr->Disks[row][column].status ==
3152 rf_ds_optimal) {
3153 dev = raidPtr->Disks[row][column].dev;
3154 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3155 raidread_component_label(dev, vp, &clabel);
3156 clabel.autoconfigure = new_value;
3157 raidwrite_component_label(dev, vp, &clabel);
3158 }
3159 }
3160 }
3161 return(new_value);
3162 }
3163
3164 int
3165 rf_set_rootpartition(raidPtr, new_value)
3166 RF_Raid_t *raidPtr;
3167 int new_value;
3168 {
3169 RF_ComponentLabel_t clabel;
3170 struct vnode *vp;
3171 dev_t dev;
3172 int row, column;
3173
3174 raidPtr->root_partition = new_value;
3175 for(row=0; row<raidPtr->numRow; row++) {
3176 for(column=0; column<raidPtr->numCol; column++) {
3177 if (raidPtr->Disks[row][column].status ==
3178 rf_ds_optimal) {
3179 dev = raidPtr->Disks[row][column].dev;
3180 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3181 raidread_component_label(dev, vp, &clabel);
3182 clabel.root_partition = new_value;
3183 raidwrite_component_label(dev, vp, &clabel);
3184 }
3185 }
3186 }
3187 return(new_value);
3188 }
3189
3190 void
3191 rf_release_all_vps(cset)
3192 RF_ConfigSet_t *cset;
3193 {
3194 RF_AutoConfig_t *ac;
3195
3196 ac = cset->ac;
3197 while(ac!=NULL) {
3198 /* Close the vp, and give it back */
3199 if (ac->vp) {
3200 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3201 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3202 vput(ac->vp);
3203 ac->vp = NULL;
3204 }
3205 ac = ac->next;
3206 }
3207 }
3208
3209
3210 void
3211 rf_cleanup_config_set(cset)
3212 RF_ConfigSet_t *cset;
3213 {
3214 RF_AutoConfig_t *ac;
3215 RF_AutoConfig_t *next_ac;
3216
3217 ac = cset->ac;
3218 while(ac!=NULL) {
3219 next_ac = ac->next;
3220 /* nuke the label */
3221 free(ac->clabel, M_RAIDFRAME);
3222 /* cleanup the config structure */
3223 free(ac, M_RAIDFRAME);
3224 /* "next.." */
3225 ac = next_ac;
3226 }
3227 /* and, finally, nuke the config set */
3228 free(cset, M_RAIDFRAME);
3229 }
3230
3231
3232 void
3233 raid_init_component_label(raidPtr, clabel)
3234 RF_Raid_t *raidPtr;
3235 RF_ComponentLabel_t *clabel;
3236 {
3237 /* current version number */
3238 clabel->version = RF_COMPONENT_LABEL_VERSION;
3239 clabel->serial_number = raidPtr->serial_number;
3240 clabel->mod_counter = raidPtr->mod_counter;
3241 clabel->num_rows = raidPtr->numRow;
3242 clabel->num_columns = raidPtr->numCol;
3243 clabel->clean = RF_RAID_DIRTY; /* not clean */
3244 clabel->status = rf_ds_optimal; /* "It's good!" */
3245
3246 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3247 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3248 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3249
3250 clabel->blockSize = raidPtr->bytesPerSector;
3251 clabel->numBlocks = raidPtr->sectorsPerDisk;
3252
3253 /* XXX not portable */
3254 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3255 clabel->maxOutstanding = raidPtr->maxOutstanding;
3256 clabel->autoconfigure = raidPtr->autoconfigure;
3257 clabel->root_partition = raidPtr->root_partition;
3258 clabel->last_unit = raidPtr->raidid;
3259 clabel->config_order = raidPtr->config_order;
3260 }
3261
3262 int
3263 rf_auto_config_set(cset,unit)
3264 RF_ConfigSet_t *cset;
3265 int *unit;
3266 {
3267 RF_Raid_t *raidPtr;
3268 RF_Config_t *config;
3269 int raidID;
3270 int retcode;
3271
3272 #if DEBUG
3273 printf("RAID autoconfigure\n");
3274 #endif
3275
3276 retcode = 0;
3277 *unit = -1;
3278
3279 /* 1. Create a config structure */
3280
3281 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3282 M_RAIDFRAME,
3283 M_NOWAIT);
3284 if (config==NULL) {
3285 printf("Out of mem!?!?\n");
3286 /* XXX do something more intelligent here. */
3287 return(1);
3288 }
3289
3290 memset(config, 0, sizeof(RF_Config_t));
3291
3292 /* XXX raidID needs to be set correctly.. */
3293
3294 /*
3295 2. Figure out what RAID ID this one is supposed to live at
3296 See if we can get the same RAID dev that it was configured
3297 on last time..
3298 */
3299
3300 raidID = cset->ac->clabel->last_unit;
3301 if ((raidID < 0) || (raidID >= numraid)) {
3302 /* let's not wander off into lala land. */
3303 raidID = numraid - 1;
3304 }
3305 if (raidPtrs[raidID]->valid != 0) {
3306
3307 /*
3308 Nope... Go looking for an alternative...
3309 Start high so we don't immediately use raid0 if that's
3310 not taken.
3311 */
3312
3313 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3314 if (raidPtrs[raidID]->valid == 0) {
3315 /* can use this one! */
3316 break;
3317 }
3318 }
3319 }
3320
3321 if (raidID < 0) {
3322 /* punt... */
3323 printf("Unable to auto configure this set!\n");
3324 printf("(Out of RAID devs!)\n");
3325 return(1);
3326 }
3327
3328 #if DEBUG
3329 printf("Configuring raid%d:\n",raidID);
3330 #endif
3331
3332 raidPtr = raidPtrs[raidID];
3333
3334 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3335 raidPtr->raidid = raidID;
3336 raidPtr->openings = RAIDOUTSTANDING;
3337
3338 /* 3. Build the configuration structure */
3339 rf_create_configuration(cset->ac, config, raidPtr);
3340
3341 /* 4. Do the configuration */
3342 retcode = rf_Configure(raidPtr, config, cset->ac);
3343
3344 if (retcode == 0) {
3345
3346 raidinit(raidPtrs[raidID]);
3347
3348 rf_markalldirty(raidPtrs[raidID]);
3349 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3350 if (cset->ac->clabel->root_partition==1) {
3351 /* everything configured just fine. Make a note
3352 that this set is eligible to be root. */
3353 cset->rootable = 1;
3354 /* XXX do this here? */
3355 raidPtrs[raidID]->root_partition = 1;
3356 }
3357 }
3358
3359 /* 5. Cleanup */
3360 free(config, M_RAIDFRAME);
3361
3362 *unit = raidID;
3363 return(retcode);
3364 }
3365
3366 void
3367 rf_disk_unbusy(desc)
3368 RF_RaidAccessDesc_t *desc;
3369 {
3370 struct buf *bp;
3371
3372 bp = (struct buf *)desc->bp;
3373 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3374 (bp->b_bcount - bp->b_resid));
3375 }
3376