rf_netbsdkintf.c revision 1.245.4.4 1 /* $NetBSD: rf_netbsdkintf.c,v 1.245.4.4 2009/06/20 07:20:28 yamt Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * Copyright (c) 1990, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * This code is derived from software contributed to Berkeley by
36 * the Systems Programming Group of the University of Utah Computer
37 * Science Department.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * from: Utah $Hdr: cd.c 1.6 90/11/28$
64 *
65 * @(#)cd.c 8.2 (Berkeley) 11/16/93
66 */
67
68 /*
69 * Copyright (c) 1988 University of Utah.
70 *
71 * This code is derived from software contributed to Berkeley by
72 * the Systems Programming Group of the University of Utah Computer
73 * Science Department.
74 *
75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions
77 * are met:
78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution.
83 * 3. All advertising materials mentioning features or use of this software
84 * must display the following acknowledgement:
85 * This product includes software developed by the University of
86 * California, Berkeley and its contributors.
87 * 4. Neither the name of the University nor the names of its contributors
88 * may be used to endorse or promote products derived from this software
89 * without specific prior written permission.
90 *
91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 * SUCH DAMAGE.
102 *
103 * from: Utah $Hdr: cd.c 1.6 90/11/28$
104 *
105 * @(#)cd.c 8.2 (Berkeley) 11/16/93
106 */
107
108 /*
109 * Copyright (c) 1995 Carnegie-Mellon University.
110 * All rights reserved.
111 *
112 * Authors: Mark Holland, Jim Zelenka
113 *
114 * Permission to use, copy, modify and distribute this software and
115 * its documentation is hereby granted, provided that both the copyright
116 * notice and this permission notice appear in all copies of the
117 * software, derivative works or modified versions, and any portions
118 * thereof, and that both notices appear in supporting documentation.
119 *
120 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
121 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
122 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
123 *
124 * Carnegie Mellon requests users of this software to return to
125 *
126 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
127 * School of Computer Science
128 * Carnegie Mellon University
129 * Pittsburgh PA 15213-3890
130 *
131 * any improvements or extensions that they make and grant Carnegie the
132 * rights to redistribute these changes.
133 */
134
135 /***********************************************************
136 *
137 * rf_kintf.c -- the kernel interface routines for RAIDframe
138 *
139 ***********************************************************/
140
141 #include <sys/cdefs.h>
142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.245.4.4 2009/06/20 07:20:28 yamt Exp $");
143
144 #ifdef _KERNEL_OPT
145 #include "opt_compat_netbsd.h"
146 #include "opt_raid_autoconfig.h"
147 #include "raid.h"
148 #endif
149
150 #include <sys/param.h>
151 #include <sys/errno.h>
152 #include <sys/pool.h>
153 #include <sys/proc.h>
154 #include <sys/queue.h>
155 #include <sys/disk.h>
156 #include <sys/device.h>
157 #include <sys/stat.h>
158 #include <sys/ioctl.h>
159 #include <sys/fcntl.h>
160 #include <sys/systm.h>
161 #include <sys/vnode.h>
162 #include <sys/disklabel.h>
163 #include <sys/conf.h>
164 #include <sys/buf.h>
165 #include <sys/bufq.h>
166 #include <sys/user.h>
167 #include <sys/reboot.h>
168 #include <sys/kauth.h>
169
170 #include <prop/proplib.h>
171
172 #include <dev/raidframe/raidframevar.h>
173 #include <dev/raidframe/raidframeio.h>
174
175 #include "rf_raid.h"
176 #include "rf_copyback.h"
177 #include "rf_dag.h"
178 #include "rf_dagflags.h"
179 #include "rf_desc.h"
180 #include "rf_diskqueue.h"
181 #include "rf_etimer.h"
182 #include "rf_general.h"
183 #include "rf_kintf.h"
184 #include "rf_options.h"
185 #include "rf_driver.h"
186 #include "rf_parityscan.h"
187 #include "rf_threadstuff.h"
188
189 #ifdef COMPAT_50
190 #include "rf_compat50.h"
191 #endif
192
193 #ifdef DEBUG
194 int rf_kdebug_level = 0;
195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
196 #else /* DEBUG */
197 #define db1_printf(a) { }
198 #endif /* DEBUG */
199
200 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
201
202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
204
205 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
206 * spare table */
207 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
208 * installation process */
209 #endif
210
211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
212
213 /* prototypes */
214 static void KernelWakeupFunc(struct buf *);
215 static void InitBP(struct buf *, struct vnode *, unsigned,
216 dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
217 void *, int, struct proc *);
218 static void raidinit(RF_Raid_t *);
219
220 void raidattach(int);
221 static int raid_match(device_t, cfdata_t, void *);
222 static void raid_attach(device_t, device_t, void *);
223 static int raid_detach(device_t, int);
224
225 dev_type_open(raidopen);
226 dev_type_close(raidclose);
227 dev_type_read(raidread);
228 dev_type_write(raidwrite);
229 dev_type_ioctl(raidioctl);
230 dev_type_strategy(raidstrategy);
231 dev_type_dump(raiddump);
232 dev_type_size(raidsize);
233
234 const struct bdevsw raid_bdevsw = {
235 raidopen, raidclose, raidstrategy, raidioctl,
236 raiddump, raidsize, D_DISK
237 };
238
239 const struct cdevsw raid_cdevsw = {
240 raidopen, raidclose, raidread, raidwrite, raidioctl,
241 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
242 };
243
244 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
245
246 /* XXX Not sure if the following should be replacing the raidPtrs above,
247 or if it should be used in conjunction with that...
248 */
249
250 struct raid_softc {
251 device_t sc_dev;
252 int sc_flags; /* flags */
253 int sc_cflags; /* configuration flags */
254 uint64_t sc_size; /* size of the raid device */
255 char sc_xname[20]; /* XXX external name */
256 struct disk sc_dkdev; /* generic disk device info */
257 struct bufq_state *buf_queue; /* used for the device queue */
258 };
259 /* sc_flags */
260 #define RAIDF_INITED 0x01 /* unit has been initialized */
261 #define RAIDF_WLABEL 0x02 /* label area is writable */
262 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
263 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
264 #define RAIDF_LOCKED 0x80 /* unit is locked */
265
266 #define raidunit(x) DISKUNIT(x)
267 int numraid = 0;
268
269 extern struct cfdriver raid_cd;
270 CFATTACH_DECL_NEW(raid, sizeof(struct raid_softc),
271 raid_match, raid_attach, raid_detach, NULL);
272
273 /*
274 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
275 * Be aware that large numbers can allow the driver to consume a lot of
276 * kernel memory, especially on writes, and in degraded mode reads.
277 *
278 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
279 * a single 64K write will typically require 64K for the old data,
280 * 64K for the old parity, and 64K for the new parity, for a total
281 * of 192K (if the parity buffer is not re-used immediately).
282 * Even it if is used immediately, that's still 128K, which when multiplied
283 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
284 *
285 * Now in degraded mode, for example, a 64K read on the above setup may
286 * require data reconstruction, which will require *all* of the 4 remaining
287 * disks to participate -- 4 * 32K/disk == 128K again.
288 */
289
290 #ifndef RAIDOUTSTANDING
291 #define RAIDOUTSTANDING 6
292 #endif
293
294 #define RAIDLABELDEV(dev) \
295 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
296
297 /* declared here, and made public, for the benefit of KVM stuff.. */
298 struct raid_softc *raid_softc;
299
300 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
301 struct disklabel *);
302 static void raidgetdisklabel(dev_t);
303 static void raidmakedisklabel(struct raid_softc *);
304
305 static int raidlock(struct raid_softc *);
306 static void raidunlock(struct raid_softc *);
307
308 static void rf_markalldirty(RF_Raid_t *);
309 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
310
311 void rf_ReconThread(struct rf_recon_req *);
312 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
313 void rf_CopybackThread(RF_Raid_t *raidPtr);
314 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
315 int rf_autoconfig(device_t);
316 void rf_buildroothack(RF_ConfigSet_t *);
317
318 RF_AutoConfig_t *rf_find_raid_components(void);
319 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
320 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
321 static int rf_reasonable_label(RF_ComponentLabel_t *);
322 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
323 int rf_set_autoconfig(RF_Raid_t *, int);
324 int rf_set_rootpartition(RF_Raid_t *, int);
325 void rf_release_all_vps(RF_ConfigSet_t *);
326 void rf_cleanup_config_set(RF_ConfigSet_t *);
327 int rf_have_enough_components(RF_ConfigSet_t *);
328 int rf_auto_config_set(RF_ConfigSet_t *, int *);
329 static int rf_sync_component_caches(RF_Raid_t *raidPtr);
330
331 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
332 allow autoconfig to take place.
333 Note that this is overridden by having
334 RAID_AUTOCONFIG as an option in the
335 kernel config file. */
336
337 struct RF_Pools_s rf_pools;
338
339 void
340 raidattach(int num)
341 {
342 int raidID;
343 int i, rc;
344
345 aprint_debug("raidattach: Asked for %d units\n", num);
346
347 if (num <= 0) {
348 #ifdef DIAGNOSTIC
349 panic("raidattach: count <= 0");
350 #endif
351 return;
352 }
353 /* This is where all the initialization stuff gets done. */
354
355 numraid = num;
356
357 /* Make some space for requested number of units... */
358
359 RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
360 if (raidPtrs == NULL) {
361 panic("raidPtrs is NULL!!");
362 }
363
364 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
365 rf_mutex_init(&rf_sparet_wait_mutex);
366
367 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
368 #endif
369
370 for (i = 0; i < num; i++)
371 raidPtrs[i] = NULL;
372 rc = rf_BootRaidframe();
373 if (rc == 0)
374 aprint_normal("Kernelized RAIDframe activated\n");
375 else
376 panic("Serious error booting RAID!!");
377
378 /* put together some datastructures like the CCD device does.. This
379 * lets us lock the device and what-not when it gets opened. */
380
381 raid_softc = (struct raid_softc *)
382 malloc(num * sizeof(struct raid_softc),
383 M_RAIDFRAME, M_NOWAIT);
384 if (raid_softc == NULL) {
385 aprint_error("WARNING: no memory for RAIDframe driver\n");
386 return;
387 }
388
389 memset(raid_softc, 0, num * sizeof(struct raid_softc));
390
391 for (raidID = 0; raidID < num; raidID++) {
392 bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
393
394 RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
395 (RF_Raid_t *));
396 if (raidPtrs[raidID] == NULL) {
397 aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
398 numraid = raidID;
399 return;
400 }
401 }
402
403 if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
404 aprint_error("raidattach: config_cfattach_attach failed?\n");
405 }
406
407 #ifdef RAID_AUTOCONFIG
408 raidautoconfig = 1;
409 #endif
410
411 /*
412 * Register a finalizer which will be used to auto-config RAID
413 * sets once all real hardware devices have been found.
414 */
415 if (config_finalize_register(NULL, rf_autoconfig) != 0)
416 aprint_error("WARNING: unable to register RAIDframe finalizer\n");
417 }
418
419 int
420 rf_autoconfig(device_t self)
421 {
422 RF_AutoConfig_t *ac_list;
423 RF_ConfigSet_t *config_sets;
424
425 if (raidautoconfig == 0)
426 return (0);
427
428 /* XXX This code can only be run once. */
429 raidautoconfig = 0;
430
431 /* 1. locate all RAID components on the system */
432 aprint_debug("Searching for RAID components...\n");
433 ac_list = rf_find_raid_components();
434
435 /* 2. Sort them into their respective sets. */
436 config_sets = rf_create_auto_sets(ac_list);
437
438 /*
439 * 3. Evaluate each set andconfigure the valid ones.
440 * This gets done in rf_buildroothack().
441 */
442 rf_buildroothack(config_sets);
443
444 return 1;
445 }
446
447 void
448 rf_buildroothack(RF_ConfigSet_t *config_sets)
449 {
450 RF_ConfigSet_t *cset;
451 RF_ConfigSet_t *next_cset;
452 int retcode;
453 int raidID;
454 int rootID;
455 int col;
456 int num_root;
457 char *devname;
458
459 rootID = 0;
460 num_root = 0;
461 cset = config_sets;
462 while(cset != NULL ) {
463 next_cset = cset->next;
464 if (rf_have_enough_components(cset) &&
465 cset->ac->clabel->autoconfigure==1) {
466 retcode = rf_auto_config_set(cset,&raidID);
467 if (!retcode) {
468 aprint_debug("raid%d: configured ok\n", raidID);
469 if (cset->rootable) {
470 rootID = raidID;
471 num_root++;
472 }
473 } else {
474 /* The autoconfig didn't work :( */
475 aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
476 rf_release_all_vps(cset);
477 }
478 } else {
479 /* we're not autoconfiguring this set...
480 release the associated resources */
481 rf_release_all_vps(cset);
482 }
483 /* cleanup */
484 rf_cleanup_config_set(cset);
485 cset = next_cset;
486 }
487
488 /* if the user has specified what the root device should be
489 then we don't touch booted_device or boothowto... */
490
491 if (rootspec != NULL)
492 return;
493
494 /* we found something bootable... */
495
496 if (num_root == 1) {
497 booted_device = raid_softc[rootID].sc_dev;
498 } else if (num_root > 1) {
499
500 /*
501 * Maybe the MD code can help. If it cannot, then
502 * setroot() will discover that we have no
503 * booted_device and will ask the user if nothing was
504 * hardwired in the kernel config file
505 */
506
507 if (booted_device == NULL)
508 cpu_rootconf();
509 if (booted_device == NULL)
510 return;
511
512 num_root = 0;
513 for (raidID = 0; raidID < numraid; raidID++) {
514 if (raidPtrs[raidID]->valid == 0)
515 continue;
516
517 if (raidPtrs[raidID]->root_partition == 0)
518 continue;
519
520 for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
521 devname = raidPtrs[raidID]->Disks[col].devname;
522 devname += sizeof("/dev/") - 1;
523 if (strncmp(devname, device_xname(booted_device),
524 strlen(device_xname(booted_device))) != 0)
525 continue;
526 aprint_debug("raid%d includes boot device %s\n",
527 raidID, devname);
528 num_root++;
529 rootID = raidID;
530 }
531 }
532
533 if (num_root == 1) {
534 booted_device = raid_softc[rootID].sc_dev;
535 } else {
536 /* we can't guess.. require the user to answer... */
537 boothowto |= RB_ASKNAME;
538 }
539 }
540 }
541
542
543 int
544 raidsize(dev_t dev)
545 {
546 struct raid_softc *rs;
547 struct disklabel *lp;
548 int part, unit, omask, size;
549
550 unit = raidunit(dev);
551 if (unit >= numraid)
552 return (-1);
553 rs = &raid_softc[unit];
554
555 if ((rs->sc_flags & RAIDF_INITED) == 0)
556 return (-1);
557
558 part = DISKPART(dev);
559 omask = rs->sc_dkdev.dk_openmask & (1 << part);
560 lp = rs->sc_dkdev.dk_label;
561
562 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
563 return (-1);
564
565 if (lp->d_partitions[part].p_fstype != FS_SWAP)
566 size = -1;
567 else
568 size = lp->d_partitions[part].p_size *
569 (lp->d_secsize / DEV_BSIZE);
570
571 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
572 return (-1);
573
574 return (size);
575
576 }
577
578 int
579 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
580 {
581 int unit = raidunit(dev);
582 struct raid_softc *rs;
583 const struct bdevsw *bdev;
584 struct disklabel *lp;
585 RF_Raid_t *raidPtr;
586 daddr_t offset;
587 int part, c, sparecol, j, scol, dumpto;
588 int error = 0;
589
590 if (unit >= numraid)
591 return (ENXIO);
592
593 rs = &raid_softc[unit];
594 raidPtr = raidPtrs[unit];
595
596 if ((rs->sc_flags & RAIDF_INITED) == 0)
597 return ENXIO;
598
599 /* we only support dumping to RAID 1 sets */
600 if (raidPtr->Layout.numDataCol != 1 ||
601 raidPtr->Layout.numParityCol != 1)
602 return EINVAL;
603
604
605 if ((error = raidlock(rs)) != 0)
606 return error;
607
608 if (size % DEV_BSIZE != 0) {
609 error = EINVAL;
610 goto out;
611 }
612
613 if (blkno + size / DEV_BSIZE > rs->sc_size) {
614 printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
615 "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
616 size / DEV_BSIZE, rs->sc_size);
617 error = EINVAL;
618 goto out;
619 }
620
621 part = DISKPART(dev);
622 lp = rs->sc_dkdev.dk_label;
623 offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
624
625 /* figure out what device is alive.. */
626
627 /*
628 Look for a component to dump to. The preference for the
629 component to dump to is as follows:
630 1) the master
631 2) a used_spare of the master
632 3) the slave
633 4) a used_spare of the slave
634 */
635
636 dumpto = -1;
637 for (c = 0; c < raidPtr->numCol; c++) {
638 if (raidPtr->Disks[c].status == rf_ds_optimal) {
639 /* this might be the one */
640 dumpto = c;
641 break;
642 }
643 }
644
645 /*
646 At this point we have possibly selected a live master or a
647 live slave. We now check to see if there is a spared
648 master (or a spared slave), if we didn't find a live master
649 or a live slave.
650 */
651
652 for (c = 0; c < raidPtr->numSpare; c++) {
653 sparecol = raidPtr->numCol + c;
654 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
655 /* How about this one? */
656 scol = -1;
657 for(j=0;j<raidPtr->numCol;j++) {
658 if (raidPtr->Disks[j].spareCol == sparecol) {
659 scol = j;
660 break;
661 }
662 }
663 if (scol == 0) {
664 /*
665 We must have found a spared master!
666 We'll take that over anything else
667 found so far. (We couldn't have
668 found a real master before, since
669 this is a used spare, and it's
670 saying that it's replacing the
671 master.) On reboot (with
672 autoconfiguration turned on)
673 sparecol will become the 1st
674 component (component0) of this set.
675 */
676 dumpto = sparecol;
677 break;
678 } else if (scol != -1) {
679 /*
680 Must be a spared slave. We'll dump
681 to that if we havn't found anything
682 else so far.
683 */
684 if (dumpto == -1)
685 dumpto = sparecol;
686 }
687 }
688 }
689
690 if (dumpto == -1) {
691 /* we couldn't find any live components to dump to!?!?
692 */
693 error = EINVAL;
694 goto out;
695 }
696
697 bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
698
699 /*
700 Note that blkno is relative to this particular partition.
701 By adding the offset of this partition in the RAID
702 set, and also adding RF_PROTECTED_SECTORS, we get a
703 value that is relative to the partition used for the
704 underlying component.
705 */
706
707 error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
708 blkno + offset, va, size);
709
710 out:
711 raidunlock(rs);
712
713 return error;
714 }
715 /* ARGSUSED */
716 int
717 raidopen(dev_t dev, int flags, int fmt,
718 struct lwp *l)
719 {
720 int unit = raidunit(dev);
721 struct raid_softc *rs;
722 struct disklabel *lp;
723 int part, pmask;
724 int error = 0;
725
726 if (unit >= numraid)
727 return (ENXIO);
728 rs = &raid_softc[unit];
729
730 if ((error = raidlock(rs)) != 0)
731 return (error);
732 lp = rs->sc_dkdev.dk_label;
733
734 part = DISKPART(dev);
735
736 /*
737 * If there are wedges, and this is not RAW_PART, then we
738 * need to fail.
739 */
740 if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
741 error = EBUSY;
742 goto bad;
743 }
744 pmask = (1 << part);
745
746 if ((rs->sc_flags & RAIDF_INITED) &&
747 (rs->sc_dkdev.dk_openmask == 0))
748 raidgetdisklabel(dev);
749
750 /* make sure that this partition exists */
751
752 if (part != RAW_PART) {
753 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
754 ((part >= lp->d_npartitions) ||
755 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
756 error = ENXIO;
757 goto bad;
758 }
759 }
760 /* Prevent this unit from being unconfigured while open. */
761 switch (fmt) {
762 case S_IFCHR:
763 rs->sc_dkdev.dk_copenmask |= pmask;
764 break;
765
766 case S_IFBLK:
767 rs->sc_dkdev.dk_bopenmask |= pmask;
768 break;
769 }
770
771 if ((rs->sc_dkdev.dk_openmask == 0) &&
772 ((rs->sc_flags & RAIDF_INITED) != 0)) {
773 /* First one... mark things as dirty... Note that we *MUST*
774 have done a configure before this. I DO NOT WANT TO BE
775 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
776 THAT THEY BELONG TOGETHER!!!!! */
777 /* XXX should check to see if we're only open for reading
778 here... If so, we needn't do this, but then need some
779 other way of keeping track of what's happened.. */
780
781 rf_markalldirty( raidPtrs[unit] );
782 }
783
784
785 rs->sc_dkdev.dk_openmask =
786 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
787
788 bad:
789 raidunlock(rs);
790
791 return (error);
792
793
794 }
795 /* ARGSUSED */
796 int
797 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
798 {
799 int unit = raidunit(dev);
800 cfdata_t cf;
801 struct raid_softc *rs;
802 int error = 0;
803 int part;
804
805 if (unit >= numraid)
806 return (ENXIO);
807 rs = &raid_softc[unit];
808
809 if ((error = raidlock(rs)) != 0)
810 return (error);
811
812 part = DISKPART(dev);
813
814 /* ...that much closer to allowing unconfiguration... */
815 switch (fmt) {
816 case S_IFCHR:
817 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
818 break;
819
820 case S_IFBLK:
821 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
822 break;
823 }
824 rs->sc_dkdev.dk_openmask =
825 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
826
827 if ((rs->sc_dkdev.dk_openmask == 0) &&
828 ((rs->sc_flags & RAIDF_INITED) != 0)) {
829 /* Last one... device is not unconfigured yet.
830 Device shutdown has taken care of setting the
831 clean bits if RAIDF_INITED is not set
832 mark things as clean... */
833
834 rf_update_component_labels(raidPtrs[unit],
835 RF_FINAL_COMPONENT_UPDATE);
836 if (doing_shutdown) {
837 /* last one, and we're going down, so
838 lights out for this RAID set too. */
839 error = rf_Shutdown(raidPtrs[unit]);
840
841 /* It's no longer initialized... */
842 rs->sc_flags &= ~RAIDF_INITED;
843
844 /* detach the device */
845
846 cf = device_cfdata(rs->sc_dev);
847 error = config_detach(rs->sc_dev, DETACH_QUIET);
848 free(cf, M_RAIDFRAME);
849
850 /* Detach the disk. */
851 disk_detach(&rs->sc_dkdev);
852 disk_destroy(&rs->sc_dkdev);
853 }
854 }
855
856 raidunlock(rs);
857 return (0);
858
859 }
860
861 void
862 raidstrategy(struct buf *bp)
863 {
864 int s;
865
866 unsigned int raidID = raidunit(bp->b_dev);
867 RF_Raid_t *raidPtr;
868 struct raid_softc *rs = &raid_softc[raidID];
869 int wlabel;
870
871 if ((rs->sc_flags & RAIDF_INITED) ==0) {
872 bp->b_error = ENXIO;
873 goto done;
874 }
875 if (raidID >= numraid || !raidPtrs[raidID]) {
876 bp->b_error = ENODEV;
877 goto done;
878 }
879 raidPtr = raidPtrs[raidID];
880 if (!raidPtr->valid) {
881 bp->b_error = ENODEV;
882 goto done;
883 }
884 if (bp->b_bcount == 0) {
885 db1_printf(("b_bcount is zero..\n"));
886 goto done;
887 }
888
889 /*
890 * Do bounds checking and adjust transfer. If there's an
891 * error, the bounds check will flag that for us.
892 */
893
894 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
895 if (DISKPART(bp->b_dev) == RAW_PART) {
896 uint64_t size; /* device size in DEV_BSIZE unit */
897
898 if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
899 size = raidPtr->totalSectors <<
900 (raidPtr->logBytesPerSector - DEV_BSHIFT);
901 } else {
902 size = raidPtr->totalSectors >>
903 (DEV_BSHIFT - raidPtr->logBytesPerSector);
904 }
905 if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
906 goto done;
907 }
908 } else {
909 if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
910 db1_printf(("Bounds check failed!!:%d %d\n",
911 (int) bp->b_blkno, (int) wlabel));
912 goto done;
913 }
914 }
915 s = splbio();
916
917 bp->b_resid = 0;
918
919 /* stuff it onto our queue */
920 bufq_put(rs->buf_queue, bp);
921
922 /* scheduled the IO to happen at the next convenient time */
923 wakeup(&(raidPtrs[raidID]->iodone));
924
925 splx(s);
926 return;
927
928 done:
929 bp->b_resid = bp->b_bcount;
930 biodone(bp);
931 }
932 /* ARGSUSED */
933 int
934 raidread(dev_t dev, struct uio *uio, int flags)
935 {
936 int unit = raidunit(dev);
937 struct raid_softc *rs;
938
939 if (unit >= numraid)
940 return (ENXIO);
941 rs = &raid_softc[unit];
942
943 if ((rs->sc_flags & RAIDF_INITED) == 0)
944 return (ENXIO);
945
946 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
947
948 }
949 /* ARGSUSED */
950 int
951 raidwrite(dev_t dev, struct uio *uio, int flags)
952 {
953 int unit = raidunit(dev);
954 struct raid_softc *rs;
955
956 if (unit >= numraid)
957 return (ENXIO);
958 rs = &raid_softc[unit];
959
960 if ((rs->sc_flags & RAIDF_INITED) == 0)
961 return (ENXIO);
962
963 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
964
965 }
966
967 int
968 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
969 {
970 int unit = raidunit(dev);
971 int error = 0;
972 int part, pmask;
973 cfdata_t cf;
974 struct raid_softc *rs;
975 RF_Config_t *k_cfg, *u_cfg;
976 RF_Raid_t *raidPtr;
977 RF_RaidDisk_t *diskPtr;
978 RF_AccTotals_t *totals;
979 RF_DeviceConfig_t *d_cfg, **ucfgp;
980 u_char *specific_buf;
981 int retcode = 0;
982 int column;
983 int raidid;
984 struct rf_recon_req *rrcopy, *rr;
985 RF_ComponentLabel_t *clabel;
986 RF_ComponentLabel_t *ci_label;
987 RF_ComponentLabel_t **clabel_ptr;
988 RF_SingleComponent_t *sparePtr,*componentPtr;
989 RF_SingleComponent_t component;
990 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
991 int i, j, d;
992 #ifdef __HAVE_OLD_DISKLABEL
993 struct disklabel newlabel;
994 #endif
995 struct dkwedge_info *dkw;
996
997 if (unit >= numraid)
998 return (ENXIO);
999 rs = &raid_softc[unit];
1000 raidPtr = raidPtrs[unit];
1001
1002 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
1003 (int) DISKPART(dev), (int) unit, (int) cmd));
1004
1005 /* Must be open for writes for these commands... */
1006 switch (cmd) {
1007 #ifdef DIOCGSECTORSIZE
1008 case DIOCGSECTORSIZE:
1009 *(u_int *)data = raidPtr->bytesPerSector;
1010 return 0;
1011 case DIOCGMEDIASIZE:
1012 *(off_t *)data =
1013 (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
1014 return 0;
1015 #endif
1016 case DIOCSDINFO:
1017 case DIOCWDINFO:
1018 #ifdef __HAVE_OLD_DISKLABEL
1019 case ODIOCWDINFO:
1020 case ODIOCSDINFO:
1021 #endif
1022 case DIOCWLABEL:
1023 case DIOCAWEDGE:
1024 case DIOCDWEDGE:
1025 if ((flag & FWRITE) == 0)
1026 return (EBADF);
1027 }
1028
1029 /* Must be initialized for these... */
1030 switch (cmd) {
1031 case DIOCGDINFO:
1032 case DIOCSDINFO:
1033 case DIOCWDINFO:
1034 #ifdef __HAVE_OLD_DISKLABEL
1035 case ODIOCGDINFO:
1036 case ODIOCWDINFO:
1037 case ODIOCSDINFO:
1038 case ODIOCGDEFLABEL:
1039 #endif
1040 case DIOCGPART:
1041 case DIOCWLABEL:
1042 case DIOCGDEFLABEL:
1043 case DIOCAWEDGE:
1044 case DIOCDWEDGE:
1045 case DIOCLWEDGES:
1046 case DIOCCACHESYNC:
1047 case RAIDFRAME_SHUTDOWN:
1048 case RAIDFRAME_REWRITEPARITY:
1049 case RAIDFRAME_GET_INFO:
1050 case RAIDFRAME_RESET_ACCTOTALS:
1051 case RAIDFRAME_GET_ACCTOTALS:
1052 case RAIDFRAME_KEEP_ACCTOTALS:
1053 case RAIDFRAME_GET_SIZE:
1054 case RAIDFRAME_FAIL_DISK:
1055 case RAIDFRAME_COPYBACK:
1056 case RAIDFRAME_CHECK_RECON_STATUS:
1057 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1058 case RAIDFRAME_GET_COMPONENT_LABEL:
1059 case RAIDFRAME_SET_COMPONENT_LABEL:
1060 case RAIDFRAME_ADD_HOT_SPARE:
1061 case RAIDFRAME_REMOVE_HOT_SPARE:
1062 case RAIDFRAME_INIT_LABELS:
1063 case RAIDFRAME_REBUILD_IN_PLACE:
1064 case RAIDFRAME_CHECK_PARITY:
1065 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1066 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1067 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1068 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1069 case RAIDFRAME_SET_AUTOCONFIG:
1070 case RAIDFRAME_SET_ROOT:
1071 case RAIDFRAME_DELETE_COMPONENT:
1072 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1073 if ((rs->sc_flags & RAIDF_INITED) == 0)
1074 return (ENXIO);
1075 }
1076
1077 switch (cmd) {
1078 #ifdef COMPAT_50
1079 case RAIDFRAME_GET_INFO50:
1080 return rf_get_info50(raidPtr, data);
1081
1082 case RAIDFRAME_CONFIGURE50:
1083 if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1084 return retcode;
1085 goto config;
1086 #endif
1087 /* configure the system */
1088 case RAIDFRAME_CONFIGURE:
1089
1090 if (raidPtr->valid) {
1091 /* There is a valid RAID set running on this unit! */
1092 printf("raid%d: Device already configured!\n",unit);
1093 return(EINVAL);
1094 }
1095
1096 /* copy-in the configuration information */
1097 /* data points to a pointer to the configuration structure */
1098
1099 u_cfg = *((RF_Config_t **) data);
1100 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1101 if (k_cfg == NULL) {
1102 return (ENOMEM);
1103 }
1104 retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1105 if (retcode) {
1106 RF_Free(k_cfg, sizeof(RF_Config_t));
1107 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1108 retcode));
1109 return (retcode);
1110 }
1111 goto config;
1112 config:
1113 /* allocate a buffer for the layout-specific data, and copy it
1114 * in */
1115 if (k_cfg->layoutSpecificSize) {
1116 if (k_cfg->layoutSpecificSize > 10000) {
1117 /* sanity check */
1118 RF_Free(k_cfg, sizeof(RF_Config_t));
1119 return (EINVAL);
1120 }
1121 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1122 (u_char *));
1123 if (specific_buf == NULL) {
1124 RF_Free(k_cfg, sizeof(RF_Config_t));
1125 return (ENOMEM);
1126 }
1127 retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1128 k_cfg->layoutSpecificSize);
1129 if (retcode) {
1130 RF_Free(k_cfg, sizeof(RF_Config_t));
1131 RF_Free(specific_buf,
1132 k_cfg->layoutSpecificSize);
1133 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1134 retcode));
1135 return (retcode);
1136 }
1137 } else
1138 specific_buf = NULL;
1139 k_cfg->layoutSpecific = specific_buf;
1140
1141 /* should do some kind of sanity check on the configuration.
1142 * Store the sum of all the bytes in the last byte? */
1143
1144 /* configure the system */
1145
1146 /*
1147 * Clear the entire RAID descriptor, just to make sure
1148 * there is no stale data left in the case of a
1149 * reconfiguration
1150 */
1151 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
1152 raidPtr->raidid = unit;
1153
1154 retcode = rf_Configure(raidPtr, k_cfg, NULL);
1155
1156 if (retcode == 0) {
1157
1158 /* allow this many simultaneous IO's to
1159 this RAID device */
1160 raidPtr->openings = RAIDOUTSTANDING;
1161
1162 raidinit(raidPtr);
1163 rf_markalldirty(raidPtr);
1164 }
1165 /* free the buffers. No return code here. */
1166 if (k_cfg->layoutSpecificSize) {
1167 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1168 }
1169 RF_Free(k_cfg, sizeof(RF_Config_t));
1170
1171 return (retcode);
1172
1173 /* shutdown the system */
1174 case RAIDFRAME_SHUTDOWN:
1175
1176 if ((error = raidlock(rs)) != 0)
1177 return (error);
1178
1179 /*
1180 * If somebody has a partition mounted, we shouldn't
1181 * shutdown.
1182 */
1183
1184 part = DISKPART(dev);
1185 pmask = (1 << part);
1186 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1187 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1188 (rs->sc_dkdev.dk_copenmask & pmask))) {
1189 raidunlock(rs);
1190 return (EBUSY);
1191 }
1192
1193 retcode = rf_Shutdown(raidPtr);
1194
1195 /* It's no longer initialized... */
1196 rs->sc_flags &= ~RAIDF_INITED;
1197
1198 /* free the pseudo device attach bits */
1199
1200 cf = device_cfdata(rs->sc_dev);
1201 /* XXX this causes us to not return any errors
1202 from the above call to rf_Shutdown() */
1203 retcode = config_detach(rs->sc_dev, DETACH_QUIET);
1204 free(cf, M_RAIDFRAME);
1205
1206 /* Detach the disk. */
1207 disk_detach(&rs->sc_dkdev);
1208 disk_destroy(&rs->sc_dkdev);
1209
1210 raidunlock(rs);
1211
1212 return (retcode);
1213 case RAIDFRAME_GET_COMPONENT_LABEL:
1214 clabel_ptr = (RF_ComponentLabel_t **) data;
1215 /* need to read the component label for the disk indicated
1216 by row,column in clabel */
1217
1218 /* For practice, let's get it directly fromdisk, rather
1219 than from the in-core copy */
1220 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
1221 (RF_ComponentLabel_t *));
1222 if (clabel == NULL)
1223 return (ENOMEM);
1224
1225 retcode = copyin( *clabel_ptr, clabel,
1226 sizeof(RF_ComponentLabel_t));
1227
1228 if (retcode) {
1229 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1230 return(retcode);
1231 }
1232
1233 clabel->row = 0; /* Don't allow looking at anything else.*/
1234
1235 column = clabel->column;
1236
1237 if ((column < 0) || (column >= raidPtr->numCol +
1238 raidPtr->numSpare)) {
1239 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1240 return(EINVAL);
1241 }
1242
1243 retcode = raidread_component_label(raidPtr->Disks[column].dev,
1244 raidPtr->raid_cinfo[column].ci_vp,
1245 clabel );
1246
1247 if (retcode == 0) {
1248 retcode = copyout(clabel, *clabel_ptr,
1249 sizeof(RF_ComponentLabel_t));
1250 }
1251 RF_Free(clabel, sizeof(RF_ComponentLabel_t));
1252 return (retcode);
1253
1254 case RAIDFRAME_SET_COMPONENT_LABEL:
1255 clabel = (RF_ComponentLabel_t *) data;
1256
1257 /* XXX check the label for valid stuff... */
1258 /* Note that some things *should not* get modified --
1259 the user should be re-initing the labels instead of
1260 trying to patch things.
1261 */
1262
1263 raidid = raidPtr->raidid;
1264 #ifdef DEBUG
1265 printf("raid%d: Got component label:\n", raidid);
1266 printf("raid%d: Version: %d\n", raidid, clabel->version);
1267 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1268 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1269 printf("raid%d: Column: %d\n", raidid, clabel->column);
1270 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1271 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1272 printf("raid%d: Status: %d\n", raidid, clabel->status);
1273 #endif
1274 clabel->row = 0;
1275 column = clabel->column;
1276
1277 if ((column < 0) || (column >= raidPtr->numCol)) {
1278 return(EINVAL);
1279 }
1280
1281 /* XXX this isn't allowed to do anything for now :-) */
1282
1283 /* XXX and before it is, we need to fill in the rest
1284 of the fields!?!?!?! */
1285 #if 0
1286 raidwrite_component_label(
1287 raidPtr->Disks[column].dev,
1288 raidPtr->raid_cinfo[column].ci_vp,
1289 clabel );
1290 #endif
1291 return (0);
1292
1293 case RAIDFRAME_INIT_LABELS:
1294 clabel = (RF_ComponentLabel_t *) data;
1295 /*
1296 we only want the serial number from
1297 the above. We get all the rest of the information
1298 from the config that was used to create this RAID
1299 set.
1300 */
1301
1302 raidPtr->serial_number = clabel->serial_number;
1303
1304 RF_Malloc(ci_label, sizeof(RF_ComponentLabel_t),
1305 (RF_ComponentLabel_t *));
1306 if (ci_label == NULL)
1307 return (ENOMEM);
1308
1309 raid_init_component_label(raidPtr, ci_label);
1310 ci_label->serial_number = clabel->serial_number;
1311 ci_label->row = 0; /* we dont' pretend to support more */
1312
1313 for(column=0;column<raidPtr->numCol;column++) {
1314 diskPtr = &raidPtr->Disks[column];
1315 if (!RF_DEAD_DISK(diskPtr->status)) {
1316 ci_label->partitionSize = diskPtr->partitionSize;
1317 ci_label->column = column;
1318 raidwrite_component_label(
1319 raidPtr->Disks[column].dev,
1320 raidPtr->raid_cinfo[column].ci_vp,
1321 ci_label );
1322 }
1323 }
1324 RF_Free(ci_label, sizeof(RF_ComponentLabel_t));
1325
1326 return (retcode);
1327 case RAIDFRAME_SET_AUTOCONFIG:
1328 d = rf_set_autoconfig(raidPtr, *(int *) data);
1329 printf("raid%d: New autoconfig value is: %d\n",
1330 raidPtr->raidid, d);
1331 *(int *) data = d;
1332 return (retcode);
1333
1334 case RAIDFRAME_SET_ROOT:
1335 d = rf_set_rootpartition(raidPtr, *(int *) data);
1336 printf("raid%d: New rootpartition value is: %d\n",
1337 raidPtr->raidid, d);
1338 *(int *) data = d;
1339 return (retcode);
1340
1341 /* initialize all parity */
1342 case RAIDFRAME_REWRITEPARITY:
1343
1344 if (raidPtr->Layout.map->faultsTolerated == 0) {
1345 /* Parity for RAID 0 is trivially correct */
1346 raidPtr->parity_good = RF_RAID_CLEAN;
1347 return(0);
1348 }
1349
1350 if (raidPtr->parity_rewrite_in_progress == 1) {
1351 /* Re-write is already in progress! */
1352 return(EINVAL);
1353 }
1354
1355 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1356 rf_RewriteParityThread,
1357 raidPtr,"raid_parity");
1358 return (retcode);
1359
1360
1361 case RAIDFRAME_ADD_HOT_SPARE:
1362 sparePtr = (RF_SingleComponent_t *) data;
1363 memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1364 retcode = rf_add_hot_spare(raidPtr, &component);
1365 return(retcode);
1366
1367 case RAIDFRAME_REMOVE_HOT_SPARE:
1368 return(retcode);
1369
1370 case RAIDFRAME_DELETE_COMPONENT:
1371 componentPtr = (RF_SingleComponent_t *)data;
1372 memcpy( &component, componentPtr,
1373 sizeof(RF_SingleComponent_t));
1374 retcode = rf_delete_component(raidPtr, &component);
1375 return(retcode);
1376
1377 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1378 componentPtr = (RF_SingleComponent_t *)data;
1379 memcpy( &component, componentPtr,
1380 sizeof(RF_SingleComponent_t));
1381 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1382 return(retcode);
1383
1384 case RAIDFRAME_REBUILD_IN_PLACE:
1385
1386 if (raidPtr->Layout.map->faultsTolerated == 0) {
1387 /* Can't do this on a RAID 0!! */
1388 return(EINVAL);
1389 }
1390
1391 if (raidPtr->recon_in_progress == 1) {
1392 /* a reconstruct is already in progress! */
1393 return(EINVAL);
1394 }
1395
1396 componentPtr = (RF_SingleComponent_t *) data;
1397 memcpy( &component, componentPtr,
1398 sizeof(RF_SingleComponent_t));
1399 component.row = 0; /* we don't support any more */
1400 column = component.column;
1401
1402 if ((column < 0) || (column >= raidPtr->numCol)) {
1403 return(EINVAL);
1404 }
1405
1406 RF_LOCK_MUTEX(raidPtr->mutex);
1407 if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1408 (raidPtr->numFailures > 0)) {
1409 /* XXX 0 above shouldn't be constant!!! */
1410 /* some component other than this has failed.
1411 Let's not make things worse than they already
1412 are... */
1413 printf("raid%d: Unable to reconstruct to disk at:\n",
1414 raidPtr->raidid);
1415 printf("raid%d: Col: %d Too many failures.\n",
1416 raidPtr->raidid, column);
1417 RF_UNLOCK_MUTEX(raidPtr->mutex);
1418 return (EINVAL);
1419 }
1420 if (raidPtr->Disks[column].status ==
1421 rf_ds_reconstructing) {
1422 printf("raid%d: Unable to reconstruct to disk at:\n",
1423 raidPtr->raidid);
1424 printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr->raidid, column);
1425
1426 RF_UNLOCK_MUTEX(raidPtr->mutex);
1427 return (EINVAL);
1428 }
1429 if (raidPtr->Disks[column].status == rf_ds_spared) {
1430 RF_UNLOCK_MUTEX(raidPtr->mutex);
1431 return (EINVAL);
1432 }
1433 RF_UNLOCK_MUTEX(raidPtr->mutex);
1434
1435 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1436 if (rrcopy == NULL)
1437 return(ENOMEM);
1438
1439 rrcopy->raidPtr = (void *) raidPtr;
1440 rrcopy->col = column;
1441
1442 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1443 rf_ReconstructInPlaceThread,
1444 rrcopy,"raid_reconip");
1445 return(retcode);
1446
1447 case RAIDFRAME_GET_INFO:
1448 if (!raidPtr->valid)
1449 return (ENODEV);
1450 ucfgp = (RF_DeviceConfig_t **) data;
1451 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1452 (RF_DeviceConfig_t *));
1453 if (d_cfg == NULL)
1454 return (ENOMEM);
1455 d_cfg->rows = 1; /* there is only 1 row now */
1456 d_cfg->cols = raidPtr->numCol;
1457 d_cfg->ndevs = raidPtr->numCol;
1458 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1459 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1460 return (ENOMEM);
1461 }
1462 d_cfg->nspares = raidPtr->numSpare;
1463 if (d_cfg->nspares >= RF_MAX_DISKS) {
1464 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1465 return (ENOMEM);
1466 }
1467 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1468 d = 0;
1469 for (j = 0; j < d_cfg->cols; j++) {
1470 d_cfg->devs[d] = raidPtr->Disks[j];
1471 d++;
1472 }
1473 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1474 d_cfg->spares[i] = raidPtr->Disks[j];
1475 }
1476 retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1477 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1478
1479 return (retcode);
1480
1481 case RAIDFRAME_CHECK_PARITY:
1482 *(int *) data = raidPtr->parity_good;
1483 return (0);
1484
1485 case RAIDFRAME_RESET_ACCTOTALS:
1486 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1487 return (0);
1488
1489 case RAIDFRAME_GET_ACCTOTALS:
1490 totals = (RF_AccTotals_t *) data;
1491 *totals = raidPtr->acc_totals;
1492 return (0);
1493
1494 case RAIDFRAME_KEEP_ACCTOTALS:
1495 raidPtr->keep_acc_totals = *(int *)data;
1496 return (0);
1497
1498 case RAIDFRAME_GET_SIZE:
1499 *(int *) data = raidPtr->totalSectors;
1500 return (0);
1501
1502 /* fail a disk & optionally start reconstruction */
1503 case RAIDFRAME_FAIL_DISK:
1504
1505 if (raidPtr->Layout.map->faultsTolerated == 0) {
1506 /* Can't do this on a RAID 0!! */
1507 return(EINVAL);
1508 }
1509
1510 rr = (struct rf_recon_req *) data;
1511 rr->row = 0;
1512 if (rr->col < 0 || rr->col >= raidPtr->numCol)
1513 return (EINVAL);
1514
1515
1516 RF_LOCK_MUTEX(raidPtr->mutex);
1517 if (raidPtr->status == rf_rs_reconstructing) {
1518 /* you can't fail a disk while we're reconstructing! */
1519 /* XXX wrong for RAID6 */
1520 RF_UNLOCK_MUTEX(raidPtr->mutex);
1521 return (EINVAL);
1522 }
1523 if ((raidPtr->Disks[rr->col].status ==
1524 rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1525 /* some other component has failed. Let's not make
1526 things worse. XXX wrong for RAID6 */
1527 RF_UNLOCK_MUTEX(raidPtr->mutex);
1528 return (EINVAL);
1529 }
1530 if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1531 /* Can't fail a spared disk! */
1532 RF_UNLOCK_MUTEX(raidPtr->mutex);
1533 return (EINVAL);
1534 }
1535 RF_UNLOCK_MUTEX(raidPtr->mutex);
1536
1537 /* make a copy of the recon request so that we don't rely on
1538 * the user's buffer */
1539 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1540 if (rrcopy == NULL)
1541 return(ENOMEM);
1542 memcpy(rrcopy, rr, sizeof(*rr));
1543 rrcopy->raidPtr = (void *) raidPtr;
1544
1545 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1546 rf_ReconThread,
1547 rrcopy,"raid_recon");
1548 return (0);
1549
1550 /* invoke a copyback operation after recon on whatever disk
1551 * needs it, if any */
1552 case RAIDFRAME_COPYBACK:
1553
1554 if (raidPtr->Layout.map->faultsTolerated == 0) {
1555 /* This makes no sense on a RAID 0!! */
1556 return(EINVAL);
1557 }
1558
1559 if (raidPtr->copyback_in_progress == 1) {
1560 /* Copyback is already in progress! */
1561 return(EINVAL);
1562 }
1563
1564 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1565 rf_CopybackThread,
1566 raidPtr,"raid_copyback");
1567 return (retcode);
1568
1569 /* return the percentage completion of reconstruction */
1570 case RAIDFRAME_CHECK_RECON_STATUS:
1571 if (raidPtr->Layout.map->faultsTolerated == 0) {
1572 /* This makes no sense on a RAID 0, so tell the
1573 user it's done. */
1574 *(int *) data = 100;
1575 return(0);
1576 }
1577 if (raidPtr->status != rf_rs_reconstructing)
1578 *(int *) data = 100;
1579 else {
1580 if (raidPtr->reconControl->numRUsTotal > 0) {
1581 *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1582 } else {
1583 *(int *) data = 0;
1584 }
1585 }
1586 return (0);
1587 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1588 progressInfoPtr = (RF_ProgressInfo_t **) data;
1589 if (raidPtr->status != rf_rs_reconstructing) {
1590 progressInfo.remaining = 0;
1591 progressInfo.completed = 100;
1592 progressInfo.total = 100;
1593 } else {
1594 progressInfo.total =
1595 raidPtr->reconControl->numRUsTotal;
1596 progressInfo.completed =
1597 raidPtr->reconControl->numRUsComplete;
1598 progressInfo.remaining = progressInfo.total -
1599 progressInfo.completed;
1600 }
1601 retcode = copyout(&progressInfo, *progressInfoPtr,
1602 sizeof(RF_ProgressInfo_t));
1603 return (retcode);
1604
1605 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1606 if (raidPtr->Layout.map->faultsTolerated == 0) {
1607 /* This makes no sense on a RAID 0, so tell the
1608 user it's done. */
1609 *(int *) data = 100;
1610 return(0);
1611 }
1612 if (raidPtr->parity_rewrite_in_progress == 1) {
1613 *(int *) data = 100 *
1614 raidPtr->parity_rewrite_stripes_done /
1615 raidPtr->Layout.numStripe;
1616 } else {
1617 *(int *) data = 100;
1618 }
1619 return (0);
1620
1621 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1622 progressInfoPtr = (RF_ProgressInfo_t **) data;
1623 if (raidPtr->parity_rewrite_in_progress == 1) {
1624 progressInfo.total = raidPtr->Layout.numStripe;
1625 progressInfo.completed =
1626 raidPtr->parity_rewrite_stripes_done;
1627 progressInfo.remaining = progressInfo.total -
1628 progressInfo.completed;
1629 } else {
1630 progressInfo.remaining = 0;
1631 progressInfo.completed = 100;
1632 progressInfo.total = 100;
1633 }
1634 retcode = copyout(&progressInfo, *progressInfoPtr,
1635 sizeof(RF_ProgressInfo_t));
1636 return (retcode);
1637
1638 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1639 if (raidPtr->Layout.map->faultsTolerated == 0) {
1640 /* This makes no sense on a RAID 0 */
1641 *(int *) data = 100;
1642 return(0);
1643 }
1644 if (raidPtr->copyback_in_progress == 1) {
1645 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1646 raidPtr->Layout.numStripe;
1647 } else {
1648 *(int *) data = 100;
1649 }
1650 return (0);
1651
1652 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1653 progressInfoPtr = (RF_ProgressInfo_t **) data;
1654 if (raidPtr->copyback_in_progress == 1) {
1655 progressInfo.total = raidPtr->Layout.numStripe;
1656 progressInfo.completed =
1657 raidPtr->copyback_stripes_done;
1658 progressInfo.remaining = progressInfo.total -
1659 progressInfo.completed;
1660 } else {
1661 progressInfo.remaining = 0;
1662 progressInfo.completed = 100;
1663 progressInfo.total = 100;
1664 }
1665 retcode = copyout(&progressInfo, *progressInfoPtr,
1666 sizeof(RF_ProgressInfo_t));
1667 return (retcode);
1668
1669 /* the sparetable daemon calls this to wait for the kernel to
1670 * need a spare table. this ioctl does not return until a
1671 * spare table is needed. XXX -- calling mpsleep here in the
1672 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1673 * -- I should either compute the spare table in the kernel,
1674 * or have a different -- XXX XXX -- interface (a different
1675 * character device) for delivering the table -- XXX */
1676 #if 0
1677 case RAIDFRAME_SPARET_WAIT:
1678 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1679 while (!rf_sparet_wait_queue)
1680 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1681 waitreq = rf_sparet_wait_queue;
1682 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1683 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1684
1685 /* structure assignment */
1686 *((RF_SparetWait_t *) data) = *waitreq;
1687
1688 RF_Free(waitreq, sizeof(*waitreq));
1689 return (0);
1690
1691 /* wakes up a process waiting on SPARET_WAIT and puts an error
1692 * code in it that will cause the dameon to exit */
1693 case RAIDFRAME_ABORT_SPARET_WAIT:
1694 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1695 waitreq->fcol = -1;
1696 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1697 waitreq->next = rf_sparet_wait_queue;
1698 rf_sparet_wait_queue = waitreq;
1699 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1700 wakeup(&rf_sparet_wait_queue);
1701 return (0);
1702
1703 /* used by the spare table daemon to deliver a spare table
1704 * into the kernel */
1705 case RAIDFRAME_SEND_SPARET:
1706
1707 /* install the spare table */
1708 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1709
1710 /* respond to the requestor. the return status of the spare
1711 * table installation is passed in the "fcol" field */
1712 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1713 waitreq->fcol = retcode;
1714 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1715 waitreq->next = rf_sparet_resp_queue;
1716 rf_sparet_resp_queue = waitreq;
1717 wakeup(&rf_sparet_resp_queue);
1718 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1719
1720 return (retcode);
1721 #endif
1722
1723 default:
1724 break; /* fall through to the os-specific code below */
1725
1726 }
1727
1728 if (!raidPtr->valid)
1729 return (EINVAL);
1730
1731 /*
1732 * Add support for "regular" device ioctls here.
1733 */
1734
1735 error = disk_ioctl(&rs->sc_dkdev, cmd, data, flag, l);
1736 if (error != EPASSTHROUGH)
1737 return (error);
1738
1739 switch (cmd) {
1740 case DIOCGDINFO:
1741 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1742 break;
1743 #ifdef __HAVE_OLD_DISKLABEL
1744 case ODIOCGDINFO:
1745 newlabel = *(rs->sc_dkdev.dk_label);
1746 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1747 return ENOTTY;
1748 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1749 break;
1750 #endif
1751
1752 case DIOCGPART:
1753 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1754 ((struct partinfo *) data)->part =
1755 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1756 break;
1757
1758 case DIOCWDINFO:
1759 case DIOCSDINFO:
1760 #ifdef __HAVE_OLD_DISKLABEL
1761 case ODIOCWDINFO:
1762 case ODIOCSDINFO:
1763 #endif
1764 {
1765 struct disklabel *lp;
1766 #ifdef __HAVE_OLD_DISKLABEL
1767 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1768 memset(&newlabel, 0, sizeof newlabel);
1769 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1770 lp = &newlabel;
1771 } else
1772 #endif
1773 lp = (struct disklabel *)data;
1774
1775 if ((error = raidlock(rs)) != 0)
1776 return (error);
1777
1778 rs->sc_flags |= RAIDF_LABELLING;
1779
1780 error = setdisklabel(rs->sc_dkdev.dk_label,
1781 lp, 0, rs->sc_dkdev.dk_cpulabel);
1782 if (error == 0) {
1783 if (cmd == DIOCWDINFO
1784 #ifdef __HAVE_OLD_DISKLABEL
1785 || cmd == ODIOCWDINFO
1786 #endif
1787 )
1788 error = writedisklabel(RAIDLABELDEV(dev),
1789 raidstrategy, rs->sc_dkdev.dk_label,
1790 rs->sc_dkdev.dk_cpulabel);
1791 }
1792 rs->sc_flags &= ~RAIDF_LABELLING;
1793
1794 raidunlock(rs);
1795
1796 if (error)
1797 return (error);
1798 break;
1799 }
1800
1801 case DIOCWLABEL:
1802 if (*(int *) data != 0)
1803 rs->sc_flags |= RAIDF_WLABEL;
1804 else
1805 rs->sc_flags &= ~RAIDF_WLABEL;
1806 break;
1807
1808 case DIOCGDEFLABEL:
1809 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1810 break;
1811
1812 #ifdef __HAVE_OLD_DISKLABEL
1813 case ODIOCGDEFLABEL:
1814 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1815 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1816 return ENOTTY;
1817 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1818 break;
1819 #endif
1820
1821 case DIOCAWEDGE:
1822 case DIOCDWEDGE:
1823 dkw = (void *)data;
1824
1825 /* If the ioctl happens here, the parent is us. */
1826 (void)strcpy(dkw->dkw_parent, rs->sc_xname);
1827 return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
1828
1829 case DIOCLWEDGES:
1830 return dkwedge_list(&rs->sc_dkdev,
1831 (struct dkwedge_list *)data, l);
1832 case DIOCCACHESYNC:
1833 return rf_sync_component_caches(raidPtr);
1834 default:
1835 retcode = ENOTTY;
1836 }
1837 return (retcode);
1838
1839 }
1840
1841
1842 /* raidinit -- complete the rest of the initialization for the
1843 RAIDframe device. */
1844
1845
1846 static void
1847 raidinit(RF_Raid_t *raidPtr)
1848 {
1849 cfdata_t cf;
1850 struct raid_softc *rs;
1851 int unit;
1852
1853 unit = raidPtr->raidid;
1854
1855 rs = &raid_softc[unit];
1856
1857 /* XXX should check return code first... */
1858 rs->sc_flags |= RAIDF_INITED;
1859
1860 /* XXX doesn't check bounds. */
1861 snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1862
1863 /* attach the pseudo device */
1864 cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1865 cf->cf_name = raid_cd.cd_name;
1866 cf->cf_atname = raid_cd.cd_name;
1867 cf->cf_unit = unit;
1868 cf->cf_fstate = FSTATE_STAR;
1869
1870 rs->sc_dev = config_attach_pseudo(cf);
1871
1872 if (rs->sc_dev==NULL) {
1873 printf("raid%d: config_attach_pseudo failed\n",
1874 raidPtr->raidid);
1875 rs->sc_flags &= ~RAIDF_INITED;
1876 free(cf, M_RAIDFRAME);
1877 return;
1878 }
1879
1880 /* disk_attach actually creates space for the CPU disklabel, among
1881 * other things, so it's critical to call this *BEFORE* we try putzing
1882 * with disklabels. */
1883
1884 disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1885 disk_attach(&rs->sc_dkdev);
1886
1887 /* XXX There may be a weird interaction here between this, and
1888 * protectedSectors, as used in RAIDframe. */
1889
1890 rs->sc_size = raidPtr->totalSectors;
1891
1892 dkwedge_discover(&rs->sc_dkdev);
1893
1894 rf_set_properties(rs, raidPtr);
1895
1896 }
1897 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1898 /* wake up the daemon & tell it to get us a spare table
1899 * XXX
1900 * the entries in the queues should be tagged with the raidPtr
1901 * so that in the extremely rare case that two recons happen at once,
1902 * we know for which device were requesting a spare table
1903 * XXX
1904 *
1905 * XXX This code is not currently used. GO
1906 */
1907 int
1908 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1909 {
1910 int retcode;
1911
1912 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1913 req->next = rf_sparet_wait_queue;
1914 rf_sparet_wait_queue = req;
1915 wakeup(&rf_sparet_wait_queue);
1916
1917 /* mpsleep unlocks the mutex */
1918 while (!rf_sparet_resp_queue) {
1919 tsleep(&rf_sparet_resp_queue, PRIBIO,
1920 "raidframe getsparetable", 0);
1921 }
1922 req = rf_sparet_resp_queue;
1923 rf_sparet_resp_queue = req->next;
1924 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1925
1926 retcode = req->fcol;
1927 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1928 * alloc'd */
1929 return (retcode);
1930 }
1931 #endif
1932
1933 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1934 * bp & passes it down.
1935 * any calls originating in the kernel must use non-blocking I/O
1936 * do some extra sanity checking to return "appropriate" error values for
1937 * certain conditions (to make some standard utilities work)
1938 *
1939 * Formerly known as: rf_DoAccessKernel
1940 */
1941 void
1942 raidstart(RF_Raid_t *raidPtr)
1943 {
1944 RF_SectorCount_t num_blocks, pb, sum;
1945 RF_RaidAddr_t raid_addr;
1946 struct partition *pp;
1947 daddr_t blocknum;
1948 int unit;
1949 struct raid_softc *rs;
1950 int do_async;
1951 struct buf *bp;
1952 int rc;
1953
1954 unit = raidPtr->raidid;
1955 rs = &raid_softc[unit];
1956
1957 /* quick check to see if anything has died recently */
1958 RF_LOCK_MUTEX(raidPtr->mutex);
1959 if (raidPtr->numNewFailures > 0) {
1960 RF_UNLOCK_MUTEX(raidPtr->mutex);
1961 rf_update_component_labels(raidPtr,
1962 RF_NORMAL_COMPONENT_UPDATE);
1963 RF_LOCK_MUTEX(raidPtr->mutex);
1964 raidPtr->numNewFailures--;
1965 }
1966
1967 /* Check to see if we're at the limit... */
1968 while (raidPtr->openings > 0) {
1969 RF_UNLOCK_MUTEX(raidPtr->mutex);
1970
1971 /* get the next item, if any, from the queue */
1972 if ((bp = bufq_get(rs->buf_queue)) == NULL) {
1973 /* nothing more to do */
1974 return;
1975 }
1976
1977 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1978 * partition.. Need to make it absolute to the underlying
1979 * device.. */
1980
1981 blocknum = bp->b_blkno;
1982 if (DISKPART(bp->b_dev) != RAW_PART) {
1983 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1984 blocknum += pp->p_offset;
1985 }
1986
1987 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1988 (int) blocknum));
1989
1990 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1991 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1992
1993 /* *THIS* is where we adjust what block we're going to...
1994 * but DO NOT TOUCH bp->b_blkno!!! */
1995 raid_addr = blocknum;
1996
1997 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1998 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1999 sum = raid_addr + num_blocks + pb;
2000 if (1 || rf_debugKernelAccess) {
2001 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2002 (int) raid_addr, (int) sum, (int) num_blocks,
2003 (int) pb, (int) bp->b_resid));
2004 }
2005 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2006 || (sum < num_blocks) || (sum < pb)) {
2007 bp->b_error = ENOSPC;
2008 bp->b_resid = bp->b_bcount;
2009 biodone(bp);
2010 RF_LOCK_MUTEX(raidPtr->mutex);
2011 continue;
2012 }
2013 /*
2014 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2015 */
2016
2017 if (bp->b_bcount & raidPtr->sectorMask) {
2018 bp->b_error = EINVAL;
2019 bp->b_resid = bp->b_bcount;
2020 biodone(bp);
2021 RF_LOCK_MUTEX(raidPtr->mutex);
2022 continue;
2023
2024 }
2025 db1_printf(("Calling DoAccess..\n"));
2026
2027
2028 RF_LOCK_MUTEX(raidPtr->mutex);
2029 raidPtr->openings--;
2030 RF_UNLOCK_MUTEX(raidPtr->mutex);
2031
2032 /*
2033 * Everything is async.
2034 */
2035 do_async = 1;
2036
2037 disk_busy(&rs->sc_dkdev);
2038
2039 /* XXX we're still at splbio() here... do we *really*
2040 need to be? */
2041
2042 /* don't ever condition on bp->b_flags & B_WRITE.
2043 * always condition on B_READ instead */
2044
2045 rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2046 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2047 do_async, raid_addr, num_blocks,
2048 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2049
2050 if (rc) {
2051 bp->b_error = rc;
2052 bp->b_resid = bp->b_bcount;
2053 biodone(bp);
2054 /* continue loop */
2055 }
2056
2057 RF_LOCK_MUTEX(raidPtr->mutex);
2058 }
2059 RF_UNLOCK_MUTEX(raidPtr->mutex);
2060 }
2061
2062
2063
2064
2065 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2066
2067 int
2068 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2069 {
2070 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2071 struct buf *bp;
2072
2073 req->queue = queue;
2074 bp = req->bp;
2075
2076 switch (req->type) {
2077 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2078 /* XXX need to do something extra here.. */
2079 /* I'm leaving this in, as I've never actually seen it used,
2080 * and I'd like folks to report it... GO */
2081 printf(("WAKEUP CALLED\n"));
2082 queue->numOutstanding++;
2083
2084 bp->b_flags = 0;
2085 bp->b_private = req;
2086
2087 KernelWakeupFunc(bp);
2088 break;
2089
2090 case RF_IO_TYPE_READ:
2091 case RF_IO_TYPE_WRITE:
2092 #if RF_ACC_TRACE > 0
2093 if (req->tracerec) {
2094 RF_ETIMER_START(req->tracerec->timer);
2095 }
2096 #endif
2097 InitBP(bp, queue->rf_cinfo->ci_vp,
2098 op, queue->rf_cinfo->ci_dev,
2099 req->sectorOffset, req->numSector,
2100 req->buf, KernelWakeupFunc, (void *) req,
2101 queue->raidPtr->logBytesPerSector, req->b_proc);
2102
2103 if (rf_debugKernelAccess) {
2104 db1_printf(("dispatch: bp->b_blkno = %ld\n",
2105 (long) bp->b_blkno));
2106 }
2107 queue->numOutstanding++;
2108 queue->last_deq_sector = req->sectorOffset;
2109 /* acc wouldn't have been let in if there were any pending
2110 * reqs at any other priority */
2111 queue->curPriority = req->priority;
2112
2113 db1_printf(("Going for %c to unit %d col %d\n",
2114 req->type, queue->raidPtr->raidid,
2115 queue->col));
2116 db1_printf(("sector %d count %d (%d bytes) %d\n",
2117 (int) req->sectorOffset, (int) req->numSector,
2118 (int) (req->numSector <<
2119 queue->raidPtr->logBytesPerSector),
2120 (int) queue->raidPtr->logBytesPerSector));
2121
2122 /*
2123 * XXX: drop lock here since this can block at
2124 * least with backing SCSI devices. Retake it
2125 * to minimize fuss with calling interfaces.
2126 */
2127
2128 RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2129 bdev_strategy(bp);
2130 RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2131 break;
2132
2133 default:
2134 panic("bad req->type in rf_DispatchKernelIO");
2135 }
2136 db1_printf(("Exiting from DispatchKernelIO\n"));
2137
2138 return (0);
2139 }
2140 /* this is the callback function associated with a I/O invoked from
2141 kernel code.
2142 */
2143 static void
2144 KernelWakeupFunc(struct buf *bp)
2145 {
2146 RF_DiskQueueData_t *req = NULL;
2147 RF_DiskQueue_t *queue;
2148 int s;
2149
2150 s = splbio();
2151 db1_printf(("recovering the request queue:\n"));
2152 req = bp->b_private;
2153
2154 queue = (RF_DiskQueue_t *) req->queue;
2155
2156 #if RF_ACC_TRACE > 0
2157 if (req->tracerec) {
2158 RF_ETIMER_STOP(req->tracerec->timer);
2159 RF_ETIMER_EVAL(req->tracerec->timer);
2160 RF_LOCK_MUTEX(rf_tracing_mutex);
2161 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2162 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2163 req->tracerec->num_phys_ios++;
2164 RF_UNLOCK_MUTEX(rf_tracing_mutex);
2165 }
2166 #endif
2167
2168 /* XXX Ok, let's get aggressive... If b_error is set, let's go
2169 * ballistic, and mark the component as hosed... */
2170
2171 if (bp->b_error != 0) {
2172 /* Mark the disk as dead */
2173 /* but only mark it once... */
2174 /* and only if it wouldn't leave this RAID set
2175 completely broken */
2176 if (((queue->raidPtr->Disks[queue->col].status ==
2177 rf_ds_optimal) ||
2178 (queue->raidPtr->Disks[queue->col].status ==
2179 rf_ds_used_spare)) &&
2180 (queue->raidPtr->numFailures <
2181 queue->raidPtr->Layout.map->faultsTolerated)) {
2182 printf("raid%d: IO Error. Marking %s as failed.\n",
2183 queue->raidPtr->raidid,
2184 queue->raidPtr->Disks[queue->col].devname);
2185 queue->raidPtr->Disks[queue->col].status =
2186 rf_ds_failed;
2187 queue->raidPtr->status = rf_rs_degraded;
2188 queue->raidPtr->numFailures++;
2189 queue->raidPtr->numNewFailures++;
2190 } else { /* Disk is already dead... */
2191 /* printf("Disk already marked as dead!\n"); */
2192 }
2193
2194 }
2195
2196 /* Fill in the error value */
2197
2198 req->error = bp->b_error;
2199
2200 simple_lock(&queue->raidPtr->iodone_lock);
2201
2202 /* Drop this one on the "finished" queue... */
2203 TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2204
2205 /* Let the raidio thread know there is work to be done. */
2206 wakeup(&(queue->raidPtr->iodone));
2207
2208 simple_unlock(&queue->raidPtr->iodone_lock);
2209
2210 splx(s);
2211 }
2212
2213
2214
2215 /*
2216 * initialize a buf structure for doing an I/O in the kernel.
2217 */
2218 static void
2219 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2220 RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2221 void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2222 struct proc *b_proc)
2223 {
2224 /* bp->b_flags = B_PHYS | rw_flag; */
2225 bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
2226 bp->b_oflags = 0;
2227 bp->b_cflags = 0;
2228 bp->b_bcount = numSect << logBytesPerSector;
2229 bp->b_bufsize = bp->b_bcount;
2230 bp->b_error = 0;
2231 bp->b_dev = dev;
2232 bp->b_data = bf;
2233 bp->b_blkno = startSect;
2234 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2235 if (bp->b_bcount == 0) {
2236 panic("bp->b_bcount is zero in InitBP!!");
2237 }
2238 bp->b_proc = b_proc;
2239 bp->b_iodone = cbFunc;
2240 bp->b_private = cbArg;
2241 }
2242
2243 static void
2244 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2245 struct disklabel *lp)
2246 {
2247 memset(lp, 0, sizeof(*lp));
2248
2249 /* fabricate a label... */
2250 lp->d_secperunit = raidPtr->totalSectors;
2251 lp->d_secsize = raidPtr->bytesPerSector;
2252 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2253 lp->d_ntracks = 4 * raidPtr->numCol;
2254 lp->d_ncylinders = raidPtr->totalSectors /
2255 (lp->d_nsectors * lp->d_ntracks);
2256 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2257
2258 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2259 lp->d_type = DTYPE_RAID;
2260 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2261 lp->d_rpm = 3600;
2262 lp->d_interleave = 1;
2263 lp->d_flags = 0;
2264
2265 lp->d_partitions[RAW_PART].p_offset = 0;
2266 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2267 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2268 lp->d_npartitions = RAW_PART + 1;
2269
2270 lp->d_magic = DISKMAGIC;
2271 lp->d_magic2 = DISKMAGIC;
2272 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2273
2274 }
2275 /*
2276 * Read the disklabel from the raid device. If one is not present, fake one
2277 * up.
2278 */
2279 static void
2280 raidgetdisklabel(dev_t dev)
2281 {
2282 int unit = raidunit(dev);
2283 struct raid_softc *rs = &raid_softc[unit];
2284 const char *errstring;
2285 struct disklabel *lp = rs->sc_dkdev.dk_label;
2286 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2287 RF_Raid_t *raidPtr;
2288
2289 db1_printf(("Getting the disklabel...\n"));
2290
2291 memset(clp, 0, sizeof(*clp));
2292
2293 raidPtr = raidPtrs[unit];
2294
2295 raidgetdefaultlabel(raidPtr, rs, lp);
2296
2297 /*
2298 * Call the generic disklabel extraction routine.
2299 */
2300 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2301 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2302 if (errstring)
2303 raidmakedisklabel(rs);
2304 else {
2305 int i;
2306 struct partition *pp;
2307
2308 /*
2309 * Sanity check whether the found disklabel is valid.
2310 *
2311 * This is necessary since total size of the raid device
2312 * may vary when an interleave is changed even though exactly
2313 * same components are used, and old disklabel may used
2314 * if that is found.
2315 */
2316 if (lp->d_secperunit != rs->sc_size)
2317 printf("raid%d: WARNING: %s: "
2318 "total sector size in disklabel (%" PRIu32 ") != "
2319 "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
2320 lp->d_secperunit, rs->sc_size);
2321 for (i = 0; i < lp->d_npartitions; i++) {
2322 pp = &lp->d_partitions[i];
2323 if (pp->p_offset + pp->p_size > rs->sc_size)
2324 printf("raid%d: WARNING: %s: end of partition `%c' "
2325 "exceeds the size of raid (%" PRIu64 ")\n",
2326 unit, rs->sc_xname, 'a' + i, rs->sc_size);
2327 }
2328 }
2329
2330 }
2331 /*
2332 * Take care of things one might want to take care of in the event
2333 * that a disklabel isn't present.
2334 */
2335 static void
2336 raidmakedisklabel(struct raid_softc *rs)
2337 {
2338 struct disklabel *lp = rs->sc_dkdev.dk_label;
2339 db1_printf(("Making a label..\n"));
2340
2341 /*
2342 * For historical reasons, if there's no disklabel present
2343 * the raw partition must be marked FS_BSDFFS.
2344 */
2345
2346 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2347
2348 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2349
2350 lp->d_checksum = dkcksum(lp);
2351 }
2352 /*
2353 * Wait interruptibly for an exclusive lock.
2354 *
2355 * XXX
2356 * Several drivers do this; it should be abstracted and made MP-safe.
2357 * (Hmm... where have we seen this warning before :-> GO )
2358 */
2359 static int
2360 raidlock(struct raid_softc *rs)
2361 {
2362 int error;
2363
2364 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2365 rs->sc_flags |= RAIDF_WANTED;
2366 if ((error =
2367 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2368 return (error);
2369 }
2370 rs->sc_flags |= RAIDF_LOCKED;
2371 return (0);
2372 }
2373 /*
2374 * Unlock and wake up any waiters.
2375 */
2376 static void
2377 raidunlock(struct raid_softc *rs)
2378 {
2379
2380 rs->sc_flags &= ~RAIDF_LOCKED;
2381 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2382 rs->sc_flags &= ~RAIDF_WANTED;
2383 wakeup(rs);
2384 }
2385 }
2386
2387
2388 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2389 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2390
2391 int
2392 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2393 {
2394 RF_ComponentLabel_t clabel;
2395 raidread_component_label(dev, b_vp, &clabel);
2396 clabel.mod_counter = mod_counter;
2397 clabel.clean = RF_RAID_CLEAN;
2398 raidwrite_component_label(dev, b_vp, &clabel);
2399 return(0);
2400 }
2401
2402
2403 int
2404 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2405 {
2406 RF_ComponentLabel_t clabel;
2407 raidread_component_label(dev, b_vp, &clabel);
2408 clabel.mod_counter = mod_counter;
2409 clabel.clean = RF_RAID_DIRTY;
2410 raidwrite_component_label(dev, b_vp, &clabel);
2411 return(0);
2412 }
2413
2414 /* ARGSUSED */
2415 int
2416 raidread_component_label(dev_t dev, struct vnode *b_vp,
2417 RF_ComponentLabel_t *clabel)
2418 {
2419 struct buf *bp;
2420 const struct bdevsw *bdev;
2421 int error;
2422
2423 /* XXX should probably ensure that we don't try to do this if
2424 someone has changed rf_protected_sectors. */
2425
2426 if (b_vp == NULL) {
2427 /* For whatever reason, this component is not valid.
2428 Don't try to read a component label from it. */
2429 return(EINVAL);
2430 }
2431
2432 /* get a block of the appropriate size... */
2433 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2434 bp->b_dev = dev;
2435
2436 /* get our ducks in a row for the read */
2437 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2438 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2439 bp->b_flags |= B_READ;
2440 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2441
2442 bdev = bdevsw_lookup(bp->b_dev);
2443 if (bdev == NULL)
2444 return (ENXIO);
2445 (*bdev->d_strategy)(bp);
2446
2447 error = biowait(bp);
2448
2449 if (!error) {
2450 memcpy(clabel, bp->b_data,
2451 sizeof(RF_ComponentLabel_t));
2452 }
2453
2454 brelse(bp, 0);
2455 return(error);
2456 }
2457 /* ARGSUSED */
2458 int
2459 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2460 RF_ComponentLabel_t *clabel)
2461 {
2462 struct buf *bp;
2463 const struct bdevsw *bdev;
2464 int error;
2465
2466 /* get a block of the appropriate size... */
2467 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2468 bp->b_dev = dev;
2469
2470 /* get our ducks in a row for the write */
2471 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2472 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2473 bp->b_flags |= B_WRITE;
2474 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2475
2476 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2477
2478 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2479
2480 bdev = bdevsw_lookup(bp->b_dev);
2481 if (bdev == NULL)
2482 return (ENXIO);
2483 (*bdev->d_strategy)(bp);
2484 error = biowait(bp);
2485 brelse(bp, 0);
2486 if (error) {
2487 #if 1
2488 printf("Failed to write RAID component info!\n");
2489 #endif
2490 }
2491
2492 return(error);
2493 }
2494
2495 void
2496 rf_markalldirty(RF_Raid_t *raidPtr)
2497 {
2498 RF_ComponentLabel_t clabel;
2499 int sparecol;
2500 int c;
2501 int j;
2502 int scol = -1;
2503
2504 raidPtr->mod_counter++;
2505 for (c = 0; c < raidPtr->numCol; c++) {
2506 /* we don't want to touch (at all) a disk that has
2507 failed */
2508 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2509 raidread_component_label(
2510 raidPtr->Disks[c].dev,
2511 raidPtr->raid_cinfo[c].ci_vp,
2512 &clabel);
2513 if (clabel.status == rf_ds_spared) {
2514 /* XXX do something special...
2515 but whatever you do, don't
2516 try to access it!! */
2517 } else {
2518 raidmarkdirty(
2519 raidPtr->Disks[c].dev,
2520 raidPtr->raid_cinfo[c].ci_vp,
2521 raidPtr->mod_counter);
2522 }
2523 }
2524 }
2525
2526 for( c = 0; c < raidPtr->numSpare ; c++) {
2527 sparecol = raidPtr->numCol + c;
2528 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2529 /*
2530
2531 we claim this disk is "optimal" if it's
2532 rf_ds_used_spare, as that means it should be
2533 directly substitutable for the disk it replaced.
2534 We note that too...
2535
2536 */
2537
2538 for(j=0;j<raidPtr->numCol;j++) {
2539 if (raidPtr->Disks[j].spareCol == sparecol) {
2540 scol = j;
2541 break;
2542 }
2543 }
2544
2545 raidread_component_label(
2546 raidPtr->Disks[sparecol].dev,
2547 raidPtr->raid_cinfo[sparecol].ci_vp,
2548 &clabel);
2549 /* make sure status is noted */
2550
2551 raid_init_component_label(raidPtr, &clabel);
2552
2553 clabel.row = 0;
2554 clabel.column = scol;
2555 /* Note: we *don't* change status from rf_ds_used_spare
2556 to rf_ds_optimal */
2557 /* clabel.status = rf_ds_optimal; */
2558
2559 raidmarkdirty(raidPtr->Disks[sparecol].dev,
2560 raidPtr->raid_cinfo[sparecol].ci_vp,
2561 raidPtr->mod_counter);
2562 }
2563 }
2564 }
2565
2566
2567 void
2568 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2569 {
2570 RF_ComponentLabel_t clabel;
2571 int sparecol;
2572 int c;
2573 int j;
2574 int scol;
2575
2576 scol = -1;
2577
2578 /* XXX should do extra checks to make sure things really are clean,
2579 rather than blindly setting the clean bit... */
2580
2581 raidPtr->mod_counter++;
2582
2583 for (c = 0; c < raidPtr->numCol; c++) {
2584 if (raidPtr->Disks[c].status == rf_ds_optimal) {
2585 raidread_component_label(
2586 raidPtr->Disks[c].dev,
2587 raidPtr->raid_cinfo[c].ci_vp,
2588 &clabel);
2589 /* make sure status is noted */
2590 clabel.status = rf_ds_optimal;
2591
2592 /* bump the counter */
2593 clabel.mod_counter = raidPtr->mod_counter;
2594
2595 /* note what unit we are configured as */
2596 clabel.last_unit = raidPtr->raidid;
2597
2598 raidwrite_component_label(
2599 raidPtr->Disks[c].dev,
2600 raidPtr->raid_cinfo[c].ci_vp,
2601 &clabel);
2602 if (final == RF_FINAL_COMPONENT_UPDATE) {
2603 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2604 raidmarkclean(
2605 raidPtr->Disks[c].dev,
2606 raidPtr->raid_cinfo[c].ci_vp,
2607 raidPtr->mod_counter);
2608 }
2609 }
2610 }
2611 /* else we don't touch it.. */
2612 }
2613
2614 for( c = 0; c < raidPtr->numSpare ; c++) {
2615 sparecol = raidPtr->numCol + c;
2616 /* Need to ensure that the reconstruct actually completed! */
2617 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2618 /*
2619
2620 we claim this disk is "optimal" if it's
2621 rf_ds_used_spare, as that means it should be
2622 directly substitutable for the disk it replaced.
2623 We note that too...
2624
2625 */
2626
2627 for(j=0;j<raidPtr->numCol;j++) {
2628 if (raidPtr->Disks[j].spareCol == sparecol) {
2629 scol = j;
2630 break;
2631 }
2632 }
2633
2634 /* XXX shouldn't *really* need this... */
2635 raidread_component_label(
2636 raidPtr->Disks[sparecol].dev,
2637 raidPtr->raid_cinfo[sparecol].ci_vp,
2638 &clabel);
2639 /* make sure status is noted */
2640
2641 raid_init_component_label(raidPtr, &clabel);
2642
2643 clabel.mod_counter = raidPtr->mod_counter;
2644 clabel.column = scol;
2645 clabel.status = rf_ds_optimal;
2646 clabel.last_unit = raidPtr->raidid;
2647
2648 raidwrite_component_label(
2649 raidPtr->Disks[sparecol].dev,
2650 raidPtr->raid_cinfo[sparecol].ci_vp,
2651 &clabel);
2652 if (final == RF_FINAL_COMPONENT_UPDATE) {
2653 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2654 raidmarkclean( raidPtr->Disks[sparecol].dev,
2655 raidPtr->raid_cinfo[sparecol].ci_vp,
2656 raidPtr->mod_counter);
2657 }
2658 }
2659 }
2660 }
2661 }
2662
2663 void
2664 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2665 {
2666
2667 if (vp != NULL) {
2668 if (auto_configured == 1) {
2669 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2670 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2671 vput(vp);
2672
2673 } else {
2674 (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2675 }
2676 }
2677 }
2678
2679
2680 void
2681 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2682 {
2683 int r,c;
2684 struct vnode *vp;
2685 int acd;
2686
2687
2688 /* We take this opportunity to close the vnodes like we should.. */
2689
2690 for (c = 0; c < raidPtr->numCol; c++) {
2691 vp = raidPtr->raid_cinfo[c].ci_vp;
2692 acd = raidPtr->Disks[c].auto_configured;
2693 rf_close_component(raidPtr, vp, acd);
2694 raidPtr->raid_cinfo[c].ci_vp = NULL;
2695 raidPtr->Disks[c].auto_configured = 0;
2696 }
2697
2698 for (r = 0; r < raidPtr->numSpare; r++) {
2699 vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2700 acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2701 rf_close_component(raidPtr, vp, acd);
2702 raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2703 raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2704 }
2705 }
2706
2707
2708 void
2709 rf_ReconThread(struct rf_recon_req *req)
2710 {
2711 int s;
2712 RF_Raid_t *raidPtr;
2713
2714 s = splbio();
2715 raidPtr = (RF_Raid_t *) req->raidPtr;
2716 raidPtr->recon_in_progress = 1;
2717
2718 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2719 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2720
2721 RF_Free(req, sizeof(*req));
2722
2723 raidPtr->recon_in_progress = 0;
2724 splx(s);
2725
2726 /* That's all... */
2727 kthread_exit(0); /* does not return */
2728 }
2729
2730 void
2731 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2732 {
2733 int retcode;
2734 int s;
2735
2736 raidPtr->parity_rewrite_stripes_done = 0;
2737 raidPtr->parity_rewrite_in_progress = 1;
2738 s = splbio();
2739 retcode = rf_RewriteParity(raidPtr);
2740 splx(s);
2741 if (retcode) {
2742 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2743 } else {
2744 /* set the clean bit! If we shutdown correctly,
2745 the clean bit on each component label will get
2746 set */
2747 raidPtr->parity_good = RF_RAID_CLEAN;
2748 }
2749 raidPtr->parity_rewrite_in_progress = 0;
2750
2751 /* Anyone waiting for us to stop? If so, inform them... */
2752 if (raidPtr->waitShutdown) {
2753 wakeup(&raidPtr->parity_rewrite_in_progress);
2754 }
2755
2756 /* That's all... */
2757 kthread_exit(0); /* does not return */
2758 }
2759
2760
2761 void
2762 rf_CopybackThread(RF_Raid_t *raidPtr)
2763 {
2764 int s;
2765
2766 raidPtr->copyback_in_progress = 1;
2767 s = splbio();
2768 rf_CopybackReconstructedData(raidPtr);
2769 splx(s);
2770 raidPtr->copyback_in_progress = 0;
2771
2772 /* That's all... */
2773 kthread_exit(0); /* does not return */
2774 }
2775
2776
2777 void
2778 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2779 {
2780 int s;
2781 RF_Raid_t *raidPtr;
2782
2783 s = splbio();
2784 raidPtr = req->raidPtr;
2785 raidPtr->recon_in_progress = 1;
2786 rf_ReconstructInPlace(raidPtr, req->col);
2787 RF_Free(req, sizeof(*req));
2788 raidPtr->recon_in_progress = 0;
2789 splx(s);
2790
2791 /* That's all... */
2792 kthread_exit(0); /* does not return */
2793 }
2794
2795 static RF_AutoConfig_t *
2796 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2797 const char *cname, RF_SectorCount_t size)
2798 {
2799 int good_one = 0;
2800 RF_ComponentLabel_t *clabel;
2801 RF_AutoConfig_t *ac;
2802
2803 clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2804 if (clabel == NULL) {
2805 oomem:
2806 while(ac_list) {
2807 ac = ac_list;
2808 if (ac->clabel)
2809 free(ac->clabel, M_RAIDFRAME);
2810 ac_list = ac_list->next;
2811 free(ac, M_RAIDFRAME);
2812 }
2813 printf("RAID auto config: out of memory!\n");
2814 return NULL; /* XXX probably should panic? */
2815 }
2816
2817 if (!raidread_component_label(dev, vp, clabel)) {
2818 /* Got the label. Does it look reasonable? */
2819 if (rf_reasonable_label(clabel) &&
2820 (clabel->partitionSize <= size)) {
2821 #ifdef DEBUG
2822 printf("Component on: %s: %llu\n",
2823 cname, (unsigned long long)size);
2824 rf_print_component_label(clabel);
2825 #endif
2826 /* if it's reasonable, add it, else ignore it. */
2827 ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2828 M_NOWAIT);
2829 if (ac == NULL) {
2830 free(clabel, M_RAIDFRAME);
2831 goto oomem;
2832 }
2833 strlcpy(ac->devname, cname, sizeof(ac->devname));
2834 ac->dev = dev;
2835 ac->vp = vp;
2836 ac->clabel = clabel;
2837 ac->next = ac_list;
2838 ac_list = ac;
2839 good_one = 1;
2840 }
2841 }
2842 if (!good_one) {
2843 /* cleanup */
2844 free(clabel, M_RAIDFRAME);
2845 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2846 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2847 vput(vp);
2848 }
2849 return ac_list;
2850 }
2851
2852 RF_AutoConfig_t *
2853 rf_find_raid_components(void)
2854 {
2855 struct vnode *vp;
2856 struct disklabel label;
2857 device_t dv;
2858 dev_t dev;
2859 int bmajor, bminor, wedge;
2860 int error;
2861 int i;
2862 RF_AutoConfig_t *ac_list;
2863
2864
2865 /* initialize the AutoConfig list */
2866 ac_list = NULL;
2867
2868 /* we begin by trolling through *all* the devices on the system */
2869
2870 for (dv = alldevs.tqh_first; dv != NULL;
2871 dv = dv->dv_list.tqe_next) {
2872
2873 /* we are only interested in disks... */
2874 if (device_class(dv) != DV_DISK)
2875 continue;
2876
2877 /* we don't care about floppies... */
2878 if (device_is_a(dv, "fd")) {
2879 continue;
2880 }
2881
2882 /* we don't care about CD's... */
2883 if (device_is_a(dv, "cd")) {
2884 continue;
2885 }
2886
2887 /* we don't care about md's... */
2888 if (device_is_a(dv, "md")) {
2889 continue;
2890 }
2891
2892 /* hdfd is the Atari/Hades floppy driver */
2893 if (device_is_a(dv, "hdfd")) {
2894 continue;
2895 }
2896
2897 /* fdisa is the Atari/Milan floppy driver */
2898 if (device_is_a(dv, "fdisa")) {
2899 continue;
2900 }
2901
2902 /* need to find the device_name_to_block_device_major stuff */
2903 bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
2904
2905 /* get a vnode for the raw partition of this disk */
2906
2907 wedge = device_is_a(dv, "dk");
2908 bminor = minor(device_unit(dv));
2909 dev = wedge ? makedev(bmajor, bminor) :
2910 MAKEDISKDEV(bmajor, bminor, RAW_PART);
2911 if (bdevvp(dev, &vp))
2912 panic("RAID can't alloc vnode");
2913
2914 error = VOP_OPEN(vp, FREAD, NOCRED);
2915
2916 if (error) {
2917 /* "Who cares." Continue looking
2918 for something that exists*/
2919 vput(vp);
2920 continue;
2921 }
2922
2923 if (wedge) {
2924 struct dkwedge_info dkw;
2925 error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
2926 NOCRED);
2927 if (error) {
2928 printf("RAIDframe: can't get wedge info for "
2929 "dev %s (%d)\n", device_xname(dv), error);
2930 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2931 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2932 vput(vp);
2933 continue;
2934 }
2935
2936 if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
2937 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2938 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2939 vput(vp);
2940 continue;
2941 }
2942
2943 ac_list = rf_get_component(ac_list, dev, vp,
2944 device_xname(dv), dkw.dkw_size);
2945 continue;
2946 }
2947
2948 /* Ok, the disk exists. Go get the disklabel. */
2949 error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
2950 if (error) {
2951 /*
2952 * XXX can't happen - open() would
2953 * have errored out (or faked up one)
2954 */
2955 if (error != ENOTTY)
2956 printf("RAIDframe: can't get label for dev "
2957 "%s (%d)\n", device_xname(dv), error);
2958 }
2959
2960 /* don't need this any more. We'll allocate it again
2961 a little later if we really do... */
2962 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2963 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2964 vput(vp);
2965
2966 if (error)
2967 continue;
2968
2969 for (i = 0; i < label.d_npartitions; i++) {
2970 char cname[sizeof(ac_list->devname)];
2971
2972 /* We only support partitions marked as RAID */
2973 if (label.d_partitions[i].p_fstype != FS_RAID)
2974 continue;
2975
2976 dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
2977 if (bdevvp(dev, &vp))
2978 panic("RAID can't alloc vnode");
2979
2980 error = VOP_OPEN(vp, FREAD, NOCRED);
2981 if (error) {
2982 /* Whatever... */
2983 vput(vp);
2984 continue;
2985 }
2986 snprintf(cname, sizeof(cname), "%s%c",
2987 device_xname(dv), 'a' + i);
2988 ac_list = rf_get_component(ac_list, dev, vp, cname,
2989 label.d_partitions[i].p_size);
2990 }
2991 }
2992 return ac_list;
2993 }
2994
2995
2996 static int
2997 rf_reasonable_label(RF_ComponentLabel_t *clabel)
2998 {
2999
3000 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3001 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3002 ((clabel->clean == RF_RAID_CLEAN) ||
3003 (clabel->clean == RF_RAID_DIRTY)) &&
3004 clabel->row >=0 &&
3005 clabel->column >= 0 &&
3006 clabel->num_rows > 0 &&
3007 clabel->num_columns > 0 &&
3008 clabel->row < clabel->num_rows &&
3009 clabel->column < clabel->num_columns &&
3010 clabel->blockSize > 0 &&
3011 clabel->numBlocks > 0) {
3012 /* label looks reasonable enough... */
3013 return(1);
3014 }
3015 return(0);
3016 }
3017
3018
3019 #ifdef DEBUG
3020 void
3021 rf_print_component_label(RF_ComponentLabel_t *clabel)
3022 {
3023 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3024 clabel->row, clabel->column,
3025 clabel->num_rows, clabel->num_columns);
3026 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3027 clabel->version, clabel->serial_number,
3028 clabel->mod_counter);
3029 printf(" Clean: %s Status: %d\n",
3030 clabel->clean ? "Yes" : "No", clabel->status );
3031 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3032 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3033 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
3034 (char) clabel->parityConfig, clabel->blockSize,
3035 clabel->numBlocks);
3036 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
3037 printf(" Contains root partition: %s\n",
3038 clabel->root_partition ? "Yes" : "No" );
3039 printf(" Last configured as: raid%d\n", clabel->last_unit );
3040 #if 0
3041 printf(" Config order: %d\n", clabel->config_order);
3042 #endif
3043
3044 }
3045 #endif
3046
3047 RF_ConfigSet_t *
3048 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3049 {
3050 RF_AutoConfig_t *ac;
3051 RF_ConfigSet_t *config_sets;
3052 RF_ConfigSet_t *cset;
3053 RF_AutoConfig_t *ac_next;
3054
3055
3056 config_sets = NULL;
3057
3058 /* Go through the AutoConfig list, and figure out which components
3059 belong to what sets. */
3060 ac = ac_list;
3061 while(ac!=NULL) {
3062 /* we're going to putz with ac->next, so save it here
3063 for use at the end of the loop */
3064 ac_next = ac->next;
3065
3066 if (config_sets == NULL) {
3067 /* will need at least this one... */
3068 config_sets = (RF_ConfigSet_t *)
3069 malloc(sizeof(RF_ConfigSet_t),
3070 M_RAIDFRAME, M_NOWAIT);
3071 if (config_sets == NULL) {
3072 panic("rf_create_auto_sets: No memory!");
3073 }
3074 /* this one is easy :) */
3075 config_sets->ac = ac;
3076 config_sets->next = NULL;
3077 config_sets->rootable = 0;
3078 ac->next = NULL;
3079 } else {
3080 /* which set does this component fit into? */
3081 cset = config_sets;
3082 while(cset!=NULL) {
3083 if (rf_does_it_fit(cset, ac)) {
3084 /* looks like it matches... */
3085 ac->next = cset->ac;
3086 cset->ac = ac;
3087 break;
3088 }
3089 cset = cset->next;
3090 }
3091 if (cset==NULL) {
3092 /* didn't find a match above... new set..*/
3093 cset = (RF_ConfigSet_t *)
3094 malloc(sizeof(RF_ConfigSet_t),
3095 M_RAIDFRAME, M_NOWAIT);
3096 if (cset == NULL) {
3097 panic("rf_create_auto_sets: No memory!");
3098 }
3099 cset->ac = ac;
3100 ac->next = NULL;
3101 cset->next = config_sets;
3102 cset->rootable = 0;
3103 config_sets = cset;
3104 }
3105 }
3106 ac = ac_next;
3107 }
3108
3109
3110 return(config_sets);
3111 }
3112
3113 static int
3114 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3115 {
3116 RF_ComponentLabel_t *clabel1, *clabel2;
3117
3118 /* If this one matches the *first* one in the set, that's good
3119 enough, since the other members of the set would have been
3120 through here too... */
3121 /* note that we are not checking partitionSize here..
3122
3123 Note that we are also not checking the mod_counters here.
3124 If everything else matches execpt the mod_counter, that's
3125 good enough for this test. We will deal with the mod_counters
3126 a little later in the autoconfiguration process.
3127
3128 (clabel1->mod_counter == clabel2->mod_counter) &&
3129
3130 The reason we don't check for this is that failed disks
3131 will have lower modification counts. If those disks are
3132 not added to the set they used to belong to, then they will
3133 form their own set, which may result in 2 different sets,
3134 for example, competing to be configured at raid0, and
3135 perhaps competing to be the root filesystem set. If the
3136 wrong ones get configured, or both attempt to become /,
3137 weird behaviour and or serious lossage will occur. Thus we
3138 need to bring them into the fold here, and kick them out at
3139 a later point.
3140
3141 */
3142
3143 clabel1 = cset->ac->clabel;
3144 clabel2 = ac->clabel;
3145 if ((clabel1->version == clabel2->version) &&
3146 (clabel1->serial_number == clabel2->serial_number) &&
3147 (clabel1->num_rows == clabel2->num_rows) &&
3148 (clabel1->num_columns == clabel2->num_columns) &&
3149 (clabel1->sectPerSU == clabel2->sectPerSU) &&
3150 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3151 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3152 (clabel1->parityConfig == clabel2->parityConfig) &&
3153 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3154 (clabel1->blockSize == clabel2->blockSize) &&
3155 (clabel1->numBlocks == clabel2->numBlocks) &&
3156 (clabel1->autoconfigure == clabel2->autoconfigure) &&
3157 (clabel1->root_partition == clabel2->root_partition) &&
3158 (clabel1->last_unit == clabel2->last_unit) &&
3159 (clabel1->config_order == clabel2->config_order)) {
3160 /* if it get's here, it almost *has* to be a match */
3161 } else {
3162 /* it's not consistent with somebody in the set..
3163 punt */
3164 return(0);
3165 }
3166 /* all was fine.. it must fit... */
3167 return(1);
3168 }
3169
3170 int
3171 rf_have_enough_components(RF_ConfigSet_t *cset)
3172 {
3173 RF_AutoConfig_t *ac;
3174 RF_AutoConfig_t *auto_config;
3175 RF_ComponentLabel_t *clabel;
3176 int c;
3177 int num_cols;
3178 int num_missing;
3179 int mod_counter;
3180 int mod_counter_found;
3181 int even_pair_failed;
3182 char parity_type;
3183
3184
3185 /* check to see that we have enough 'live' components
3186 of this set. If so, we can configure it if necessary */
3187
3188 num_cols = cset->ac->clabel->num_columns;
3189 parity_type = cset->ac->clabel->parityConfig;
3190
3191 /* XXX Check for duplicate components!?!?!? */
3192
3193 /* Determine what the mod_counter is supposed to be for this set. */
3194
3195 mod_counter_found = 0;
3196 mod_counter = 0;
3197 ac = cset->ac;
3198 while(ac!=NULL) {
3199 if (mod_counter_found==0) {
3200 mod_counter = ac->clabel->mod_counter;
3201 mod_counter_found = 1;
3202 } else {
3203 if (ac->clabel->mod_counter > mod_counter) {
3204 mod_counter = ac->clabel->mod_counter;
3205 }
3206 }
3207 ac = ac->next;
3208 }
3209
3210 num_missing = 0;
3211 auto_config = cset->ac;
3212
3213 even_pair_failed = 0;
3214 for(c=0; c<num_cols; c++) {
3215 ac = auto_config;
3216 while(ac!=NULL) {
3217 if ((ac->clabel->column == c) &&
3218 (ac->clabel->mod_counter == mod_counter)) {
3219 /* it's this one... */
3220 #ifdef DEBUG
3221 printf("Found: %s at %d\n",
3222 ac->devname,c);
3223 #endif
3224 break;
3225 }
3226 ac=ac->next;
3227 }
3228 if (ac==NULL) {
3229 /* Didn't find one here! */
3230 /* special case for RAID 1, especially
3231 where there are more than 2
3232 components (where RAIDframe treats
3233 things a little differently :( ) */
3234 if (parity_type == '1') {
3235 if (c%2 == 0) { /* even component */
3236 even_pair_failed = 1;
3237 } else { /* odd component. If
3238 we're failed, and
3239 so is the even
3240 component, it's
3241 "Good Night, Charlie" */
3242 if (even_pair_failed == 1) {
3243 return(0);
3244 }
3245 }
3246 } else {
3247 /* normal accounting */
3248 num_missing++;
3249 }
3250 }
3251 if ((parity_type == '1') && (c%2 == 1)) {
3252 /* Just did an even component, and we didn't
3253 bail.. reset the even_pair_failed flag,
3254 and go on to the next component.... */
3255 even_pair_failed = 0;
3256 }
3257 }
3258
3259 clabel = cset->ac->clabel;
3260
3261 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3262 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3263 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3264 /* XXX this needs to be made *much* more general */
3265 /* Too many failures */
3266 return(0);
3267 }
3268 /* otherwise, all is well, and we've got enough to take a kick
3269 at autoconfiguring this set */
3270 return(1);
3271 }
3272
3273 void
3274 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3275 RF_Raid_t *raidPtr)
3276 {
3277 RF_ComponentLabel_t *clabel;
3278 int i;
3279
3280 clabel = ac->clabel;
3281
3282 /* 1. Fill in the common stuff */
3283 config->numRow = clabel->num_rows = 1;
3284 config->numCol = clabel->num_columns;
3285 config->numSpare = 0; /* XXX should this be set here? */
3286 config->sectPerSU = clabel->sectPerSU;
3287 config->SUsPerPU = clabel->SUsPerPU;
3288 config->SUsPerRU = clabel->SUsPerRU;
3289 config->parityConfig = clabel->parityConfig;
3290 /* XXX... */
3291 strcpy(config->diskQueueType,"fifo");
3292 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3293 config->layoutSpecificSize = 0; /* XXX ?? */
3294
3295 while(ac!=NULL) {
3296 /* row/col values will be in range due to the checks
3297 in reasonable_label() */
3298 strcpy(config->devnames[0][ac->clabel->column],
3299 ac->devname);
3300 ac = ac->next;
3301 }
3302
3303 for(i=0;i<RF_MAXDBGV;i++) {
3304 config->debugVars[i][0] = 0;
3305 }
3306 }
3307
3308 int
3309 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3310 {
3311 RF_ComponentLabel_t clabel;
3312 struct vnode *vp;
3313 dev_t dev;
3314 int column;
3315 int sparecol;
3316
3317 raidPtr->autoconfigure = new_value;
3318
3319 for(column=0; column<raidPtr->numCol; column++) {
3320 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3321 dev = raidPtr->Disks[column].dev;
3322 vp = raidPtr->raid_cinfo[column].ci_vp;
3323 raidread_component_label(dev, vp, &clabel);
3324 clabel.autoconfigure = new_value;
3325 raidwrite_component_label(dev, vp, &clabel);
3326 }
3327 }
3328 for(column = 0; column < raidPtr->numSpare ; column++) {
3329 sparecol = raidPtr->numCol + column;
3330 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3331 dev = raidPtr->Disks[sparecol].dev;
3332 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3333 raidread_component_label(dev, vp, &clabel);
3334 clabel.autoconfigure = new_value;
3335 raidwrite_component_label(dev, vp, &clabel);
3336 }
3337 }
3338 return(new_value);
3339 }
3340
3341 int
3342 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3343 {
3344 RF_ComponentLabel_t clabel;
3345 struct vnode *vp;
3346 dev_t dev;
3347 int column;
3348 int sparecol;
3349
3350 raidPtr->root_partition = new_value;
3351 for(column=0; column<raidPtr->numCol; column++) {
3352 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3353 dev = raidPtr->Disks[column].dev;
3354 vp = raidPtr->raid_cinfo[column].ci_vp;
3355 raidread_component_label(dev, vp, &clabel);
3356 clabel.root_partition = new_value;
3357 raidwrite_component_label(dev, vp, &clabel);
3358 }
3359 }
3360 for(column = 0; column < raidPtr->numSpare ; column++) {
3361 sparecol = raidPtr->numCol + column;
3362 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3363 dev = raidPtr->Disks[sparecol].dev;
3364 vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3365 raidread_component_label(dev, vp, &clabel);
3366 clabel.root_partition = new_value;
3367 raidwrite_component_label(dev, vp, &clabel);
3368 }
3369 }
3370 return(new_value);
3371 }
3372
3373 void
3374 rf_release_all_vps(RF_ConfigSet_t *cset)
3375 {
3376 RF_AutoConfig_t *ac;
3377
3378 ac = cset->ac;
3379 while(ac!=NULL) {
3380 /* Close the vp, and give it back */
3381 if (ac->vp) {
3382 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3383 VOP_CLOSE(ac->vp, FREAD, NOCRED);
3384 vput(ac->vp);
3385 ac->vp = NULL;
3386 }
3387 ac = ac->next;
3388 }
3389 }
3390
3391
3392 void
3393 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3394 {
3395 RF_AutoConfig_t *ac;
3396 RF_AutoConfig_t *next_ac;
3397
3398 ac = cset->ac;
3399 while(ac!=NULL) {
3400 next_ac = ac->next;
3401 /* nuke the label */
3402 free(ac->clabel, M_RAIDFRAME);
3403 /* cleanup the config structure */
3404 free(ac, M_RAIDFRAME);
3405 /* "next.." */
3406 ac = next_ac;
3407 }
3408 /* and, finally, nuke the config set */
3409 free(cset, M_RAIDFRAME);
3410 }
3411
3412
3413 void
3414 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3415 {
3416 /* current version number */
3417 clabel->version = RF_COMPONENT_LABEL_VERSION;
3418 clabel->serial_number = raidPtr->serial_number;
3419 clabel->mod_counter = raidPtr->mod_counter;
3420 clabel->num_rows = 1;
3421 clabel->num_columns = raidPtr->numCol;
3422 clabel->clean = RF_RAID_DIRTY; /* not clean */
3423 clabel->status = rf_ds_optimal; /* "It's good!" */
3424
3425 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3426 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3427 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3428
3429 clabel->blockSize = raidPtr->bytesPerSector;
3430 clabel->numBlocks = raidPtr->sectorsPerDisk;
3431
3432 /* XXX not portable */
3433 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3434 clabel->maxOutstanding = raidPtr->maxOutstanding;
3435 clabel->autoconfigure = raidPtr->autoconfigure;
3436 clabel->root_partition = raidPtr->root_partition;
3437 clabel->last_unit = raidPtr->raidid;
3438 clabel->config_order = raidPtr->config_order;
3439 }
3440
3441 int
3442 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3443 {
3444 RF_Raid_t *raidPtr;
3445 RF_Config_t *config;
3446 int raidID;
3447 int retcode;
3448
3449 #ifdef DEBUG
3450 printf("RAID autoconfigure\n");
3451 #endif
3452
3453 retcode = 0;
3454 *unit = -1;
3455
3456 /* 1. Create a config structure */
3457
3458 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3459 M_RAIDFRAME,
3460 M_NOWAIT);
3461 if (config==NULL) {
3462 printf("Out of mem!?!?\n");
3463 /* XXX do something more intelligent here. */
3464 return(1);
3465 }
3466
3467 memset(config, 0, sizeof(RF_Config_t));
3468
3469 /*
3470 2. Figure out what RAID ID this one is supposed to live at
3471 See if we can get the same RAID dev that it was configured
3472 on last time..
3473 */
3474
3475 raidID = cset->ac->clabel->last_unit;
3476 if ((raidID < 0) || (raidID >= numraid)) {
3477 /* let's not wander off into lala land. */
3478 raidID = numraid - 1;
3479 }
3480 if (raidPtrs[raidID]->valid != 0) {
3481
3482 /*
3483 Nope... Go looking for an alternative...
3484 Start high so we don't immediately use raid0 if that's
3485 not taken.
3486 */
3487
3488 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3489 if (raidPtrs[raidID]->valid == 0) {
3490 /* can use this one! */
3491 break;
3492 }
3493 }
3494 }
3495
3496 if (raidID < 0) {
3497 /* punt... */
3498 printf("Unable to auto configure this set!\n");
3499 printf("(Out of RAID devs!)\n");
3500 free(config, M_RAIDFRAME);
3501 return(1);
3502 }
3503
3504 #ifdef DEBUG
3505 printf("Configuring raid%d:\n",raidID);
3506 #endif
3507
3508 raidPtr = raidPtrs[raidID];
3509
3510 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3511 raidPtr->raidid = raidID;
3512 raidPtr->openings = RAIDOUTSTANDING;
3513
3514 /* 3. Build the configuration structure */
3515 rf_create_configuration(cset->ac, config, raidPtr);
3516
3517 /* 4. Do the configuration */
3518 retcode = rf_Configure(raidPtr, config, cset->ac);
3519
3520 if (retcode == 0) {
3521
3522 raidinit(raidPtrs[raidID]);
3523
3524 rf_markalldirty(raidPtrs[raidID]);
3525 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3526 if (cset->ac->clabel->root_partition==1) {
3527 /* everything configured just fine. Make a note
3528 that this set is eligible to be root. */
3529 cset->rootable = 1;
3530 /* XXX do this here? */
3531 raidPtrs[raidID]->root_partition = 1;
3532 }
3533 }
3534
3535 /* 5. Cleanup */
3536 free(config, M_RAIDFRAME);
3537
3538 *unit = raidID;
3539 return(retcode);
3540 }
3541
3542 void
3543 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3544 {
3545 struct buf *bp;
3546
3547 bp = (struct buf *)desc->bp;
3548 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3549 (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3550 }
3551
3552 void
3553 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3554 size_t xmin, size_t xmax)
3555 {
3556 pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3557 pool_sethiwat(p, xmax);
3558 pool_prime(p, xmin);
3559 pool_setlowat(p, xmin);
3560 }
3561
3562 /*
3563 * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3564 * if there is IO pending and if that IO could possibly be done for a
3565 * given RAID set. Returns 0 if IO is waiting and can be done, 1
3566 * otherwise.
3567 *
3568 */
3569
3570 int
3571 rf_buf_queue_check(int raidid)
3572 {
3573 if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
3574 raidPtrs[raidid]->openings > 0) {
3575 /* there is work to do */
3576 return 0;
3577 }
3578 /* default is nothing to do */
3579 return 1;
3580 }
3581
3582 int
3583 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
3584 {
3585 struct partinfo dpart;
3586 struct dkwedge_info dkw;
3587 int error;
3588
3589 error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
3590 if (error == 0) {
3591 diskPtr->blockSize = dpart.disklab->d_secsize;
3592 diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
3593 diskPtr->partitionSize = dpart.part->p_size;
3594 return 0;
3595 }
3596
3597 error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
3598 if (error == 0) {
3599 diskPtr->blockSize = 512; /* XXX */
3600 diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
3601 diskPtr->partitionSize = dkw.dkw_size;
3602 return 0;
3603 }
3604 return error;
3605 }
3606
3607 static int
3608 raid_match(device_t self, cfdata_t cfdata, void *aux)
3609 {
3610 return 1;
3611 }
3612
3613 static void
3614 raid_attach(device_t parent, device_t self, void *aux)
3615 {
3616
3617 }
3618
3619
3620 static int
3621 raid_detach(device_t self, int flags)
3622 {
3623 struct raid_softc *rs = device_private(self);
3624
3625 if (rs->sc_flags & RAIDF_INITED)
3626 return EBUSY;
3627
3628 return 0;
3629 }
3630
3631 static void
3632 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
3633 {
3634 prop_dictionary_t disk_info, odisk_info, geom;
3635 disk_info = prop_dictionary_create();
3636 geom = prop_dictionary_create();
3637 prop_dictionary_set_uint64(geom, "sectors-per-unit",
3638 raidPtr->totalSectors);
3639 prop_dictionary_set_uint32(geom, "sector-size",
3640 raidPtr->bytesPerSector);
3641
3642 prop_dictionary_set_uint16(geom, "sectors-per-track",
3643 raidPtr->Layout.dataSectorsPerStripe);
3644 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
3645 4 * raidPtr->numCol);
3646
3647 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
3648 raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
3649 (4 * raidPtr->numCol)));
3650
3651 prop_dictionary_set(disk_info, "geometry", geom);
3652 prop_object_release(geom);
3653 prop_dictionary_set(device_properties(rs->sc_dev),
3654 "disk-info", disk_info);
3655 odisk_info = rs->sc_dkdev.dk_info;
3656 rs->sc_dkdev.dk_info = disk_info;
3657 if (odisk_info)
3658 prop_object_release(odisk_info);
3659 }
3660
3661 /*
3662 * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3663 * We end up returning whatever error was returned by the first cache flush
3664 * that fails.
3665 */
3666
3667 static int
3668 rf_sync_component_caches(RF_Raid_t *raidPtr)
3669 {
3670 int c, sparecol;
3671 int e,error;
3672 int force = 1;
3673
3674 error = 0;
3675 for (c = 0; c < raidPtr->numCol; c++) {
3676 if (raidPtr->Disks[c].status == rf_ds_optimal) {
3677 e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3678 &force, FWRITE, NOCRED);
3679 if (e) {
3680 if (e != ENODEV)
3681 printf("raid%d: cache flush to component %s failed.\n",
3682 raidPtr->raidid, raidPtr->Disks[c].devname);
3683 if (error == 0) {
3684 error = e;
3685 }
3686 }
3687 }
3688 }
3689
3690 for( c = 0; c < raidPtr->numSpare ; c++) {
3691 sparecol = raidPtr->numCol + c;
3692 /* Need to ensure that the reconstruct actually completed! */
3693 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3694 e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3695 DIOCCACHESYNC, &force, FWRITE, NOCRED);
3696 if (e) {
3697 if (e != ENODEV)
3698 printf("raid%d: cache flush to component %s failed.\n",
3699 raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3700 if (error == 0) {
3701 error = e;
3702 }
3703 }
3704 }
3705 }
3706 return error;
3707 }
3708