rf_netbsdkintf.c revision 1.276 1 /* $NetBSD: rf_netbsdkintf.c,v 1.276 2010/12/04 10:01:16 mrg Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * Copyright (c) 1990, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * This code is derived from software contributed to Berkeley by
36 * the Systems Programming Group of the University of Utah Computer
37 * Science Department.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * from: Utah $Hdr: cd.c 1.6 90/11/28$
64 *
65 * @(#)cd.c 8.2 (Berkeley) 11/16/93
66 */
67
68 /*
69 * Copyright (c) 1988 University of Utah.
70 *
71 * This code is derived from software contributed to Berkeley by
72 * the Systems Programming Group of the University of Utah Computer
73 * Science Department.
74 *
75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions
77 * are met:
78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution.
83 * 3. All advertising materials mentioning features or use of this software
84 * must display the following acknowledgement:
85 * This product includes software developed by the University of
86 * California, Berkeley and its contributors.
87 * 4. Neither the name of the University nor the names of its contributors
88 * may be used to endorse or promote products derived from this software
89 * without specific prior written permission.
90 *
91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 * SUCH DAMAGE.
102 *
103 * from: Utah $Hdr: cd.c 1.6 90/11/28$
104 *
105 * @(#)cd.c 8.2 (Berkeley) 11/16/93
106 */
107
108 /*
109 * Copyright (c) 1995 Carnegie-Mellon University.
110 * All rights reserved.
111 *
112 * Authors: Mark Holland, Jim Zelenka
113 *
114 * Permission to use, copy, modify and distribute this software and
115 * its documentation is hereby granted, provided that both the copyright
116 * notice and this permission notice appear in all copies of the
117 * software, derivative works or modified versions, and any portions
118 * thereof, and that both notices appear in supporting documentation.
119 *
120 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
121 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
122 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
123 *
124 * Carnegie Mellon requests users of this software to return to
125 *
126 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
127 * School of Computer Science
128 * Carnegie Mellon University
129 * Pittsburgh PA 15213-3890
130 *
131 * any improvements or extensions that they make and grant Carnegie the
132 * rights to redistribute these changes.
133 */
134
135 /***********************************************************
136 *
137 * rf_kintf.c -- the kernel interface routines for RAIDframe
138 *
139 ***********************************************************/
140
141 #include <sys/cdefs.h>
142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.276 2010/12/04 10:01:16 mrg Exp $");
143
144 #ifdef _KERNEL_OPT
145 #include "opt_compat_netbsd.h"
146 #include "opt_raid_autoconfig.h"
147 #include "raid.h"
148 #endif
149
150 #include <sys/param.h>
151 #include <sys/errno.h>
152 #include <sys/pool.h>
153 #include <sys/proc.h>
154 #include <sys/queue.h>
155 #include <sys/disk.h>
156 #include <sys/device.h>
157 #include <sys/stat.h>
158 #include <sys/ioctl.h>
159 #include <sys/fcntl.h>
160 #include <sys/systm.h>
161 #include <sys/vnode.h>
162 #include <sys/disklabel.h>
163 #include <sys/conf.h>
164 #include <sys/buf.h>
165 #include <sys/bufq.h>
166 #include <sys/reboot.h>
167 #include <sys/kauth.h>
168
169 #include <prop/proplib.h>
170
171 #include <dev/raidframe/raidframevar.h>
172 #include <dev/raidframe/raidframeio.h>
173 #include <dev/raidframe/rf_paritymap.h>
174
175 #include "rf_raid.h"
176 #include "rf_copyback.h"
177 #include "rf_dag.h"
178 #include "rf_dagflags.h"
179 #include "rf_desc.h"
180 #include "rf_diskqueue.h"
181 #include "rf_etimer.h"
182 #include "rf_general.h"
183 #include "rf_kintf.h"
184 #include "rf_options.h"
185 #include "rf_driver.h"
186 #include "rf_parityscan.h"
187 #include "rf_threadstuff.h"
188
189 #ifdef COMPAT_50
190 #include "rf_compat50.h"
191 #endif
192
193 #ifdef DEBUG
194 int rf_kdebug_level = 0;
195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
196 #else /* DEBUG */
197 #define db1_printf(a) { }
198 #endif /* DEBUG */
199
200 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
201
202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
204
205 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
206 * spare table */
207 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
208 * installation process */
209 #endif
210
211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
212
213 /* prototypes */
214 static void KernelWakeupFunc(struct buf *);
215 static void InitBP(struct buf *, struct vnode *, unsigned,
216 dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
217 void *, int, struct proc *);
218 static void raidinit(RF_Raid_t *);
219
220 void raidattach(int);
221 static int raid_match(device_t, cfdata_t, void *);
222 static void raid_attach(device_t, device_t, void *);
223 static int raid_detach(device_t, int);
224
225 static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
226 daddr_t, daddr_t);
227 static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
228 daddr_t, daddr_t, int);
229
230 static int raidwrite_component_label(unsigned,
231 dev_t, struct vnode *, RF_ComponentLabel_t *);
232 static int raidread_component_label(unsigned,
233 dev_t, struct vnode *, RF_ComponentLabel_t *);
234
235
236 dev_type_open(raidopen);
237 dev_type_close(raidclose);
238 dev_type_read(raidread);
239 dev_type_write(raidwrite);
240 dev_type_ioctl(raidioctl);
241 dev_type_strategy(raidstrategy);
242 dev_type_dump(raiddump);
243 dev_type_size(raidsize);
244
245 const struct bdevsw raid_bdevsw = {
246 raidopen, raidclose, raidstrategy, raidioctl,
247 raiddump, raidsize, D_DISK
248 };
249
250 const struct cdevsw raid_cdevsw = {
251 raidopen, raidclose, raidread, raidwrite, raidioctl,
252 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
253 };
254
255 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
256
257 /* XXX Not sure if the following should be replacing the raidPtrs above,
258 or if it should be used in conjunction with that...
259 */
260
261 struct raid_softc {
262 device_t sc_dev;
263 int sc_flags; /* flags */
264 int sc_cflags; /* configuration flags */
265 uint64_t sc_size; /* size of the raid device */
266 char sc_xname[20]; /* XXX external name */
267 struct disk sc_dkdev; /* generic disk device info */
268 struct bufq_state *buf_queue; /* used for the device queue */
269 };
270 /* sc_flags */
271 #define RAIDF_INITED 0x01 /* unit has been initialized */
272 #define RAIDF_WLABEL 0x02 /* label area is writable */
273 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
274 #define RAIDF_SHUTDOWN 0x08 /* unit is being shutdown */
275 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
276 #define RAIDF_LOCKED 0x80 /* unit is locked */
277
278 #define raidunit(x) DISKUNIT(x)
279 int numraid = 0;
280
281 extern struct cfdriver raid_cd;
282 CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
283 raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
284 DVF_DETACH_SHUTDOWN);
285
286 /*
287 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
288 * Be aware that large numbers can allow the driver to consume a lot of
289 * kernel memory, especially on writes, and in degraded mode reads.
290 *
291 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
292 * a single 64K write will typically require 64K for the old data,
293 * 64K for the old parity, and 64K for the new parity, for a total
294 * of 192K (if the parity buffer is not re-used immediately).
295 * Even it if is used immediately, that's still 128K, which when multiplied
296 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
297 *
298 * Now in degraded mode, for example, a 64K read on the above setup may
299 * require data reconstruction, which will require *all* of the 4 remaining
300 * disks to participate -- 4 * 32K/disk == 128K again.
301 */
302
303 #ifndef RAIDOUTSTANDING
304 #define RAIDOUTSTANDING 6
305 #endif
306
307 #define RAIDLABELDEV(dev) \
308 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
309
310 /* declared here, and made public, for the benefit of KVM stuff.. */
311 struct raid_softc *raid_softc;
312
313 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
314 struct disklabel *);
315 static void raidgetdisklabel(dev_t);
316 static void raidmakedisklabel(struct raid_softc *);
317
318 static int raidlock(struct raid_softc *);
319 static void raidunlock(struct raid_softc *);
320
321 static int raid_detach_unlocked(struct raid_softc *);
322
323 static void rf_markalldirty(RF_Raid_t *);
324 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
325
326 void rf_ReconThread(struct rf_recon_req *);
327 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
328 void rf_CopybackThread(RF_Raid_t *raidPtr);
329 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
330 int rf_autoconfig(device_t);
331 void rf_buildroothack(RF_ConfigSet_t *);
332
333 RF_AutoConfig_t *rf_find_raid_components(void);
334 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
335 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
336 static int rf_reasonable_label(RF_ComponentLabel_t *);
337 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
338 int rf_set_autoconfig(RF_Raid_t *, int);
339 int rf_set_rootpartition(RF_Raid_t *, int);
340 void rf_release_all_vps(RF_ConfigSet_t *);
341 void rf_cleanup_config_set(RF_ConfigSet_t *);
342 int rf_have_enough_components(RF_ConfigSet_t *);
343 int rf_auto_config_set(RF_ConfigSet_t *, int *);
344
345 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
346 allow autoconfig to take place.
347 Note that this is overridden by having
348 RAID_AUTOCONFIG as an option in the
349 kernel config file. */
350
351 struct RF_Pools_s rf_pools;
352
353 void
354 raidattach(int num)
355 {
356 int raidID;
357 int i, rc;
358
359 aprint_debug("raidattach: Asked for %d units\n", num);
360
361 if (num <= 0) {
362 #ifdef DIAGNOSTIC
363 panic("raidattach: count <= 0");
364 #endif
365 return;
366 }
367 /* This is where all the initialization stuff gets done. */
368
369 numraid = num;
370
371 /* Make some space for requested number of units... */
372
373 RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
374 if (raidPtrs == NULL) {
375 panic("raidPtrs is NULL!!");
376 }
377
378 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
379 rf_mutex_init(&rf_sparet_wait_mutex);
380
381 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
382 #endif
383
384 for (i = 0; i < num; i++)
385 raidPtrs[i] = NULL;
386 rc = rf_BootRaidframe();
387 if (rc == 0)
388 aprint_verbose("Kernelized RAIDframe activated\n");
389 else
390 panic("Serious error booting RAID!!");
391
392 /* put together some datastructures like the CCD device does.. This
393 * lets us lock the device and what-not when it gets opened. */
394
395 raid_softc = (struct raid_softc *)
396 malloc(num * sizeof(struct raid_softc),
397 M_RAIDFRAME, M_NOWAIT);
398 if (raid_softc == NULL) {
399 aprint_error("WARNING: no memory for RAIDframe driver\n");
400 return;
401 }
402
403 memset(raid_softc, 0, num * sizeof(struct raid_softc));
404
405 for (raidID = 0; raidID < num; raidID++) {
406 bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
407
408 RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
409 (RF_Raid_t *));
410 if (raidPtrs[raidID] == NULL) {
411 aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
412 numraid = raidID;
413 return;
414 }
415 }
416
417 if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
418 aprint_error("raidattach: config_cfattach_attach failed?\n");
419 }
420
421 #ifdef RAID_AUTOCONFIG
422 raidautoconfig = 1;
423 #endif
424
425 /*
426 * Register a finalizer which will be used to auto-config RAID
427 * sets once all real hardware devices have been found.
428 */
429 if (config_finalize_register(NULL, rf_autoconfig) != 0)
430 aprint_error("WARNING: unable to register RAIDframe finalizer\n");
431 }
432
433 int
434 rf_autoconfig(device_t self)
435 {
436 RF_AutoConfig_t *ac_list;
437 RF_ConfigSet_t *config_sets;
438
439 if (raidautoconfig == 0)
440 return (0);
441
442 /* XXX This code can only be run once. */
443 raidautoconfig = 0;
444
445 /* 1. locate all RAID components on the system */
446 aprint_debug("Searching for RAID components...\n");
447 ac_list = rf_find_raid_components();
448
449 /* 2. Sort them into their respective sets. */
450 config_sets = rf_create_auto_sets(ac_list);
451
452 /*
453 * 3. Evaluate each set andconfigure the valid ones.
454 * This gets done in rf_buildroothack().
455 */
456 rf_buildroothack(config_sets);
457
458 return 1;
459 }
460
461 void
462 rf_buildroothack(RF_ConfigSet_t *config_sets)
463 {
464 RF_ConfigSet_t *cset;
465 RF_ConfigSet_t *next_cset;
466 int retcode;
467 int raidID;
468 int rootID;
469 int col;
470 int num_root;
471 char *devname;
472
473 rootID = 0;
474 num_root = 0;
475 cset = config_sets;
476 while (cset != NULL) {
477 next_cset = cset->next;
478 if (rf_have_enough_components(cset) &&
479 cset->ac->clabel->autoconfigure==1) {
480 retcode = rf_auto_config_set(cset,&raidID);
481 if (!retcode) {
482 aprint_debug("raid%d: configured ok\n", raidID);
483 if (cset->rootable) {
484 rootID = raidID;
485 num_root++;
486 }
487 } else {
488 /* The autoconfig didn't work :( */
489 aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
490 rf_release_all_vps(cset);
491 }
492 } else {
493 /* we're not autoconfiguring this set...
494 release the associated resources */
495 rf_release_all_vps(cset);
496 }
497 /* cleanup */
498 rf_cleanup_config_set(cset);
499 cset = next_cset;
500 }
501
502 /* if the user has specified what the root device should be
503 then we don't touch booted_device or boothowto... */
504
505 if (rootspec != NULL)
506 return;
507
508 /* we found something bootable... */
509
510 if (num_root == 1) {
511 booted_device = raid_softc[rootID].sc_dev;
512 } else if (num_root > 1) {
513
514 /*
515 * Maybe the MD code can help. If it cannot, then
516 * setroot() will discover that we have no
517 * booted_device and will ask the user if nothing was
518 * hardwired in the kernel config file
519 */
520
521 if (booted_device == NULL)
522 cpu_rootconf();
523 if (booted_device == NULL)
524 return;
525
526 num_root = 0;
527 for (raidID = 0; raidID < numraid; raidID++) {
528 if (raidPtrs[raidID]->valid == 0)
529 continue;
530
531 if (raidPtrs[raidID]->root_partition == 0)
532 continue;
533
534 for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
535 devname = raidPtrs[raidID]->Disks[col].devname;
536 devname += sizeof("/dev/") - 1;
537 if (strncmp(devname, device_xname(booted_device),
538 strlen(device_xname(booted_device))) != 0)
539 continue;
540 aprint_debug("raid%d includes boot device %s\n",
541 raidID, devname);
542 num_root++;
543 rootID = raidID;
544 }
545 }
546
547 if (num_root == 1) {
548 booted_device = raid_softc[rootID].sc_dev;
549 } else {
550 /* we can't guess.. require the user to answer... */
551 boothowto |= RB_ASKNAME;
552 }
553 }
554 }
555
556
557 int
558 raidsize(dev_t dev)
559 {
560 struct raid_softc *rs;
561 struct disklabel *lp;
562 int part, unit, omask, size;
563
564 unit = raidunit(dev);
565 if (unit >= numraid)
566 return (-1);
567 rs = &raid_softc[unit];
568
569 if ((rs->sc_flags & RAIDF_INITED) == 0)
570 return (-1);
571
572 part = DISKPART(dev);
573 omask = rs->sc_dkdev.dk_openmask & (1 << part);
574 lp = rs->sc_dkdev.dk_label;
575
576 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
577 return (-1);
578
579 if (lp->d_partitions[part].p_fstype != FS_SWAP)
580 size = -1;
581 else
582 size = lp->d_partitions[part].p_size *
583 (lp->d_secsize / DEV_BSIZE);
584
585 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
586 return (-1);
587
588 return (size);
589
590 }
591
592 int
593 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
594 {
595 int unit = raidunit(dev);
596 struct raid_softc *rs;
597 const struct bdevsw *bdev;
598 struct disklabel *lp;
599 RF_Raid_t *raidPtr;
600 daddr_t offset;
601 int part, c, sparecol, j, scol, dumpto;
602 int error = 0;
603
604 if (unit >= numraid)
605 return (ENXIO);
606
607 rs = &raid_softc[unit];
608 raidPtr = raidPtrs[unit];
609
610 if ((rs->sc_flags & RAIDF_INITED) == 0)
611 return ENXIO;
612
613 /* we only support dumping to RAID 1 sets */
614 if (raidPtr->Layout.numDataCol != 1 ||
615 raidPtr->Layout.numParityCol != 1)
616 return EINVAL;
617
618
619 if ((error = raidlock(rs)) != 0)
620 return error;
621
622 if (size % DEV_BSIZE != 0) {
623 error = EINVAL;
624 goto out;
625 }
626
627 if (blkno + size / DEV_BSIZE > rs->sc_size) {
628 printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
629 "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
630 size / DEV_BSIZE, rs->sc_size);
631 error = EINVAL;
632 goto out;
633 }
634
635 part = DISKPART(dev);
636 lp = rs->sc_dkdev.dk_label;
637 offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
638
639 /* figure out what device is alive.. */
640
641 /*
642 Look for a component to dump to. The preference for the
643 component to dump to is as follows:
644 1) the master
645 2) a used_spare of the master
646 3) the slave
647 4) a used_spare of the slave
648 */
649
650 dumpto = -1;
651 for (c = 0; c < raidPtr->numCol; c++) {
652 if (raidPtr->Disks[c].status == rf_ds_optimal) {
653 /* this might be the one */
654 dumpto = c;
655 break;
656 }
657 }
658
659 /*
660 At this point we have possibly selected a live master or a
661 live slave. We now check to see if there is a spared
662 master (or a spared slave), if we didn't find a live master
663 or a live slave.
664 */
665
666 for (c = 0; c < raidPtr->numSpare; c++) {
667 sparecol = raidPtr->numCol + c;
668 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
669 /* How about this one? */
670 scol = -1;
671 for(j=0;j<raidPtr->numCol;j++) {
672 if (raidPtr->Disks[j].spareCol == sparecol) {
673 scol = j;
674 break;
675 }
676 }
677 if (scol == 0) {
678 /*
679 We must have found a spared master!
680 We'll take that over anything else
681 found so far. (We couldn't have
682 found a real master before, since
683 this is a used spare, and it's
684 saying that it's replacing the
685 master.) On reboot (with
686 autoconfiguration turned on)
687 sparecol will become the 1st
688 component (component0) of this set.
689 */
690 dumpto = sparecol;
691 break;
692 } else if (scol != -1) {
693 /*
694 Must be a spared slave. We'll dump
695 to that if we havn't found anything
696 else so far.
697 */
698 if (dumpto == -1)
699 dumpto = sparecol;
700 }
701 }
702 }
703
704 if (dumpto == -1) {
705 /* we couldn't find any live components to dump to!?!?
706 */
707 error = EINVAL;
708 goto out;
709 }
710
711 bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
712
713 /*
714 Note that blkno is relative to this particular partition.
715 By adding the offset of this partition in the RAID
716 set, and also adding RF_PROTECTED_SECTORS, we get a
717 value that is relative to the partition used for the
718 underlying component.
719 */
720
721 error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
722 blkno + offset, va, size);
723
724 out:
725 raidunlock(rs);
726
727 return error;
728 }
729 /* ARGSUSED */
730 int
731 raidopen(dev_t dev, int flags, int fmt,
732 struct lwp *l)
733 {
734 int unit = raidunit(dev);
735 struct raid_softc *rs;
736 struct disklabel *lp;
737 int part, pmask;
738 int error = 0;
739
740 if (unit >= numraid)
741 return (ENXIO);
742 rs = &raid_softc[unit];
743
744 if ((error = raidlock(rs)) != 0)
745 return (error);
746
747 if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
748 error = EBUSY;
749 goto bad;
750 }
751
752 lp = rs->sc_dkdev.dk_label;
753
754 part = DISKPART(dev);
755
756 /*
757 * If there are wedges, and this is not RAW_PART, then we
758 * need to fail.
759 */
760 if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
761 error = EBUSY;
762 goto bad;
763 }
764 pmask = (1 << part);
765
766 if ((rs->sc_flags & RAIDF_INITED) &&
767 (rs->sc_dkdev.dk_openmask == 0))
768 raidgetdisklabel(dev);
769
770 /* make sure that this partition exists */
771
772 if (part != RAW_PART) {
773 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
774 ((part >= lp->d_npartitions) ||
775 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
776 error = ENXIO;
777 goto bad;
778 }
779 }
780 /* Prevent this unit from being unconfigured while open. */
781 switch (fmt) {
782 case S_IFCHR:
783 rs->sc_dkdev.dk_copenmask |= pmask;
784 break;
785
786 case S_IFBLK:
787 rs->sc_dkdev.dk_bopenmask |= pmask;
788 break;
789 }
790
791 if ((rs->sc_dkdev.dk_openmask == 0) &&
792 ((rs->sc_flags & RAIDF_INITED) != 0)) {
793 /* First one... mark things as dirty... Note that we *MUST*
794 have done a configure before this. I DO NOT WANT TO BE
795 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
796 THAT THEY BELONG TOGETHER!!!!! */
797 /* XXX should check to see if we're only open for reading
798 here... If so, we needn't do this, but then need some
799 other way of keeping track of what's happened.. */
800
801 rf_markalldirty(raidPtrs[unit]);
802 }
803
804
805 rs->sc_dkdev.dk_openmask =
806 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
807
808 bad:
809 raidunlock(rs);
810
811 return (error);
812
813
814 }
815 /* ARGSUSED */
816 int
817 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
818 {
819 int unit = raidunit(dev);
820 struct raid_softc *rs;
821 int error = 0;
822 int part;
823
824 if (unit >= numraid)
825 return (ENXIO);
826 rs = &raid_softc[unit];
827
828 if ((error = raidlock(rs)) != 0)
829 return (error);
830
831 part = DISKPART(dev);
832
833 /* ...that much closer to allowing unconfiguration... */
834 switch (fmt) {
835 case S_IFCHR:
836 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
837 break;
838
839 case S_IFBLK:
840 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
841 break;
842 }
843 rs->sc_dkdev.dk_openmask =
844 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
845
846 if ((rs->sc_dkdev.dk_openmask == 0) &&
847 ((rs->sc_flags & RAIDF_INITED) != 0)) {
848 /* Last one... device is not unconfigured yet.
849 Device shutdown has taken care of setting the
850 clean bits if RAIDF_INITED is not set
851 mark things as clean... */
852
853 rf_update_component_labels(raidPtrs[unit],
854 RF_FINAL_COMPONENT_UPDATE);
855
856 /* If the kernel is shutting down, it will detach
857 * this RAID set soon enough.
858 */
859 }
860
861 raidunlock(rs);
862 return (0);
863
864 }
865
866 void
867 raidstrategy(struct buf *bp)
868 {
869 int s;
870
871 unsigned int raidID = raidunit(bp->b_dev);
872 RF_Raid_t *raidPtr;
873 struct raid_softc *rs = &raid_softc[raidID];
874 int wlabel;
875
876 if ((rs->sc_flags & RAIDF_INITED) ==0) {
877 bp->b_error = ENXIO;
878 goto done;
879 }
880 if (raidID >= numraid || !raidPtrs[raidID]) {
881 bp->b_error = ENODEV;
882 goto done;
883 }
884 raidPtr = raidPtrs[raidID];
885 if (!raidPtr->valid) {
886 bp->b_error = ENODEV;
887 goto done;
888 }
889 if (bp->b_bcount == 0) {
890 db1_printf(("b_bcount is zero..\n"));
891 goto done;
892 }
893
894 /*
895 * Do bounds checking and adjust transfer. If there's an
896 * error, the bounds check will flag that for us.
897 */
898
899 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
900 if (DISKPART(bp->b_dev) == RAW_PART) {
901 uint64_t size; /* device size in DEV_BSIZE unit */
902
903 if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
904 size = raidPtr->totalSectors <<
905 (raidPtr->logBytesPerSector - DEV_BSHIFT);
906 } else {
907 size = raidPtr->totalSectors >>
908 (DEV_BSHIFT - raidPtr->logBytesPerSector);
909 }
910 if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
911 goto done;
912 }
913 } else {
914 if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
915 db1_printf(("Bounds check failed!!:%d %d\n",
916 (int) bp->b_blkno, (int) wlabel));
917 goto done;
918 }
919 }
920 s = splbio();
921
922 bp->b_resid = 0;
923
924 /* stuff it onto our queue */
925 bufq_put(rs->buf_queue, bp);
926
927 /* scheduled the IO to happen at the next convenient time */
928 wakeup(&(raidPtrs[raidID]->iodone));
929
930 splx(s);
931 return;
932
933 done:
934 bp->b_resid = bp->b_bcount;
935 biodone(bp);
936 }
937 /* ARGSUSED */
938 int
939 raidread(dev_t dev, struct uio *uio, int flags)
940 {
941 int unit = raidunit(dev);
942 struct raid_softc *rs;
943
944 if (unit >= numraid)
945 return (ENXIO);
946 rs = &raid_softc[unit];
947
948 if ((rs->sc_flags & RAIDF_INITED) == 0)
949 return (ENXIO);
950
951 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
952
953 }
954 /* ARGSUSED */
955 int
956 raidwrite(dev_t dev, struct uio *uio, int flags)
957 {
958 int unit = raidunit(dev);
959 struct raid_softc *rs;
960
961 if (unit >= numraid)
962 return (ENXIO);
963 rs = &raid_softc[unit];
964
965 if ((rs->sc_flags & RAIDF_INITED) == 0)
966 return (ENXIO);
967
968 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
969
970 }
971
972 static int
973 raid_detach_unlocked(struct raid_softc *rs)
974 {
975 int error;
976 RF_Raid_t *raidPtr;
977
978 raidPtr = raidPtrs[device_unit(rs->sc_dev)];
979
980 /*
981 * If somebody has a partition mounted, we shouldn't
982 * shutdown.
983 */
984 if (rs->sc_dkdev.dk_openmask != 0)
985 return EBUSY;
986
987 if ((rs->sc_flags & RAIDF_INITED) == 0)
988 ; /* not initialized: nothing to do */
989 else if ((error = rf_Shutdown(raidPtr)) != 0)
990 return error;
991 else
992 rs->sc_flags &= ~(RAIDF_INITED|RAIDF_SHUTDOWN);
993
994 /* Detach the disk. */
995 disk_detach(&rs->sc_dkdev);
996 disk_destroy(&rs->sc_dkdev);
997
998 return 0;
999 }
1000
1001 int
1002 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1003 {
1004 int unit = raidunit(dev);
1005 int error = 0;
1006 int part, pmask;
1007 cfdata_t cf;
1008 struct raid_softc *rs;
1009 RF_Config_t *k_cfg, *u_cfg;
1010 RF_Raid_t *raidPtr;
1011 RF_RaidDisk_t *diskPtr;
1012 RF_AccTotals_t *totals;
1013 RF_DeviceConfig_t *d_cfg, **ucfgp;
1014 u_char *specific_buf;
1015 int retcode = 0;
1016 int column;
1017 /* int raidid; */
1018 struct rf_recon_req *rrcopy, *rr;
1019 RF_ComponentLabel_t *clabel;
1020 RF_ComponentLabel_t *ci_label;
1021 RF_ComponentLabel_t **clabel_ptr;
1022 RF_SingleComponent_t *sparePtr,*componentPtr;
1023 RF_SingleComponent_t component;
1024 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
1025 int i, j, d;
1026 #ifdef __HAVE_OLD_DISKLABEL
1027 struct disklabel newlabel;
1028 #endif
1029 struct dkwedge_info *dkw;
1030
1031 if (unit >= numraid)
1032 return (ENXIO);
1033 rs = &raid_softc[unit];
1034 raidPtr = raidPtrs[unit];
1035
1036 db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
1037 (int) DISKPART(dev), (int) unit, cmd));
1038
1039 /* Must be open for writes for these commands... */
1040 switch (cmd) {
1041 #ifdef DIOCGSECTORSIZE
1042 case DIOCGSECTORSIZE:
1043 *(u_int *)data = raidPtr->bytesPerSector;
1044 return 0;
1045 case DIOCGMEDIASIZE:
1046 *(off_t *)data =
1047 (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
1048 return 0;
1049 #endif
1050 case DIOCSDINFO:
1051 case DIOCWDINFO:
1052 #ifdef __HAVE_OLD_DISKLABEL
1053 case ODIOCWDINFO:
1054 case ODIOCSDINFO:
1055 #endif
1056 case DIOCWLABEL:
1057 case DIOCAWEDGE:
1058 case DIOCDWEDGE:
1059 if ((flag & FWRITE) == 0)
1060 return (EBADF);
1061 }
1062
1063 /* Must be initialized for these... */
1064 switch (cmd) {
1065 case DIOCGDINFO:
1066 case DIOCSDINFO:
1067 case DIOCWDINFO:
1068 #ifdef __HAVE_OLD_DISKLABEL
1069 case ODIOCGDINFO:
1070 case ODIOCWDINFO:
1071 case ODIOCSDINFO:
1072 case ODIOCGDEFLABEL:
1073 #endif
1074 case DIOCGPART:
1075 case DIOCWLABEL:
1076 case DIOCGDEFLABEL:
1077 case DIOCAWEDGE:
1078 case DIOCDWEDGE:
1079 case DIOCLWEDGES:
1080 case DIOCCACHESYNC:
1081 case RAIDFRAME_SHUTDOWN:
1082 case RAIDFRAME_REWRITEPARITY:
1083 case RAIDFRAME_GET_INFO:
1084 case RAIDFRAME_RESET_ACCTOTALS:
1085 case RAIDFRAME_GET_ACCTOTALS:
1086 case RAIDFRAME_KEEP_ACCTOTALS:
1087 case RAIDFRAME_GET_SIZE:
1088 case RAIDFRAME_FAIL_DISK:
1089 case RAIDFRAME_COPYBACK:
1090 case RAIDFRAME_CHECK_RECON_STATUS:
1091 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1092 case RAIDFRAME_GET_COMPONENT_LABEL:
1093 case RAIDFRAME_SET_COMPONENT_LABEL:
1094 case RAIDFRAME_ADD_HOT_SPARE:
1095 case RAIDFRAME_REMOVE_HOT_SPARE:
1096 case RAIDFRAME_INIT_LABELS:
1097 case RAIDFRAME_REBUILD_IN_PLACE:
1098 case RAIDFRAME_CHECK_PARITY:
1099 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1100 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1101 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1102 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1103 case RAIDFRAME_SET_AUTOCONFIG:
1104 case RAIDFRAME_SET_ROOT:
1105 case RAIDFRAME_DELETE_COMPONENT:
1106 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1107 case RAIDFRAME_PARITYMAP_STATUS:
1108 case RAIDFRAME_PARITYMAP_GET_DISABLE:
1109 case RAIDFRAME_PARITYMAP_SET_DISABLE:
1110 case RAIDFRAME_PARITYMAP_SET_PARAMS:
1111 if ((rs->sc_flags & RAIDF_INITED) == 0)
1112 return (ENXIO);
1113 }
1114
1115 switch (cmd) {
1116 #ifdef COMPAT_50
1117 case RAIDFRAME_GET_INFO50:
1118 return rf_get_info50(raidPtr, data);
1119
1120 case RAIDFRAME_CONFIGURE50:
1121 if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1122 return retcode;
1123 goto config;
1124 #endif
1125 /* configure the system */
1126 case RAIDFRAME_CONFIGURE:
1127
1128 if (raidPtr->valid) {
1129 /* There is a valid RAID set running on this unit! */
1130 printf("raid%d: Device already configured!\n",unit);
1131 return(EINVAL);
1132 }
1133
1134 /* copy-in the configuration information */
1135 /* data points to a pointer to the configuration structure */
1136
1137 u_cfg = *((RF_Config_t **) data);
1138 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1139 if (k_cfg == NULL) {
1140 return (ENOMEM);
1141 }
1142 retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1143 if (retcode) {
1144 RF_Free(k_cfg, sizeof(RF_Config_t));
1145 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1146 retcode));
1147 return (retcode);
1148 }
1149 goto config;
1150 config:
1151 /* allocate a buffer for the layout-specific data, and copy it
1152 * in */
1153 if (k_cfg->layoutSpecificSize) {
1154 if (k_cfg->layoutSpecificSize > 10000) {
1155 /* sanity check */
1156 RF_Free(k_cfg, sizeof(RF_Config_t));
1157 return (EINVAL);
1158 }
1159 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1160 (u_char *));
1161 if (specific_buf == NULL) {
1162 RF_Free(k_cfg, sizeof(RF_Config_t));
1163 return (ENOMEM);
1164 }
1165 retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1166 k_cfg->layoutSpecificSize);
1167 if (retcode) {
1168 RF_Free(k_cfg, sizeof(RF_Config_t));
1169 RF_Free(specific_buf,
1170 k_cfg->layoutSpecificSize);
1171 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1172 retcode));
1173 return (retcode);
1174 }
1175 } else
1176 specific_buf = NULL;
1177 k_cfg->layoutSpecific = specific_buf;
1178
1179 /* should do some kind of sanity check on the configuration.
1180 * Store the sum of all the bytes in the last byte? */
1181
1182 /* configure the system */
1183
1184 /*
1185 * Clear the entire RAID descriptor, just to make sure
1186 * there is no stale data left in the case of a
1187 * reconfiguration
1188 */
1189 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
1190 raidPtr->raidid = unit;
1191
1192 retcode = rf_Configure(raidPtr, k_cfg, NULL);
1193
1194 if (retcode == 0) {
1195
1196 /* allow this many simultaneous IO's to
1197 this RAID device */
1198 raidPtr->openings = RAIDOUTSTANDING;
1199
1200 raidinit(raidPtr);
1201 rf_markalldirty(raidPtr);
1202 }
1203 /* free the buffers. No return code here. */
1204 if (k_cfg->layoutSpecificSize) {
1205 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1206 }
1207 RF_Free(k_cfg, sizeof(RF_Config_t));
1208
1209 return (retcode);
1210
1211 /* shutdown the system */
1212 case RAIDFRAME_SHUTDOWN:
1213
1214 part = DISKPART(dev);
1215 pmask = (1 << part);
1216
1217 if ((error = raidlock(rs)) != 0)
1218 return (error);
1219
1220 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1221 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1222 (rs->sc_dkdev.dk_copenmask & pmask)))
1223 retcode = EBUSY;
1224 else {
1225 rs->sc_flags |= RAIDF_SHUTDOWN;
1226 rs->sc_dkdev.dk_copenmask &= ~pmask;
1227 rs->sc_dkdev.dk_bopenmask &= ~pmask;
1228 rs->sc_dkdev.dk_openmask &= ~pmask;
1229 retcode = 0;
1230 }
1231
1232 raidunlock(rs);
1233
1234 if (retcode != 0)
1235 return retcode;
1236
1237 /* free the pseudo device attach bits */
1238
1239 cf = device_cfdata(rs->sc_dev);
1240 if ((retcode = config_detach(rs->sc_dev, DETACH_QUIET)) == 0)
1241 free(cf, M_RAIDFRAME);
1242
1243 return (retcode);
1244 case RAIDFRAME_GET_COMPONENT_LABEL:
1245 clabel_ptr = (RF_ComponentLabel_t **) data;
1246 /* need to read the component label for the disk indicated
1247 by row,column in clabel */
1248
1249 /*
1250 * Perhaps there should be an option to skip the in-core
1251 * copy and hit the disk, as with disklabel(8).
1252 */
1253 RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
1254
1255 retcode = copyin( *clabel_ptr, clabel,
1256 sizeof(RF_ComponentLabel_t));
1257
1258 if (retcode) {
1259 return(retcode);
1260 }
1261
1262 clabel->row = 0; /* Don't allow looking at anything else.*/
1263
1264 column = clabel->column;
1265
1266 if ((column < 0) || (column >= raidPtr->numCol +
1267 raidPtr->numSpare)) {
1268 return(EINVAL);
1269 }
1270
1271 RF_Free(clabel, sizeof(*clabel));
1272
1273 clabel = raidget_component_label(raidPtr, column);
1274
1275 if (retcode == 0) {
1276 retcode = copyout(clabel, *clabel_ptr,
1277 sizeof(RF_ComponentLabel_t));
1278 }
1279 return (retcode);
1280
1281 #if 0
1282 case RAIDFRAME_SET_COMPONENT_LABEL:
1283 clabel = (RF_ComponentLabel_t *) data;
1284
1285 /* XXX check the label for valid stuff... */
1286 /* Note that some things *should not* get modified --
1287 the user should be re-initing the labels instead of
1288 trying to patch things.
1289 */
1290
1291 raidid = raidPtr->raidid;
1292 #ifdef DEBUG
1293 printf("raid%d: Got component label:\n", raidid);
1294 printf("raid%d: Version: %d\n", raidid, clabel->version);
1295 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1296 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1297 printf("raid%d: Column: %d\n", raidid, clabel->column);
1298 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1299 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1300 printf("raid%d: Status: %d\n", raidid, clabel->status);
1301 #endif
1302 clabel->row = 0;
1303 column = clabel->column;
1304
1305 if ((column < 0) || (column >= raidPtr->numCol)) {
1306 return(EINVAL);
1307 }
1308
1309 /* XXX this isn't allowed to do anything for now :-) */
1310
1311 /* XXX and before it is, we need to fill in the rest
1312 of the fields!?!?!?! */
1313 memcpy(raidget_component_label(raidPtr, column),
1314 clabel, sizeof(*clabel));
1315 raidflush_component_label(raidPtr, column);
1316 return (0);
1317 #endif
1318
1319 case RAIDFRAME_INIT_LABELS:
1320 clabel = (RF_ComponentLabel_t *) data;
1321 /*
1322 we only want the serial number from
1323 the above. We get all the rest of the information
1324 from the config that was used to create this RAID
1325 set.
1326 */
1327
1328 raidPtr->serial_number = clabel->serial_number;
1329
1330 for(column=0;column<raidPtr->numCol;column++) {
1331 diskPtr = &raidPtr->Disks[column];
1332 if (!RF_DEAD_DISK(diskPtr->status)) {
1333 ci_label = raidget_component_label(raidPtr,
1334 column);
1335 /* Zeroing this is important. */
1336 memset(ci_label, 0, sizeof(*ci_label));
1337 raid_init_component_label(raidPtr, ci_label);
1338 ci_label->serial_number =
1339 raidPtr->serial_number;
1340 ci_label->row = 0; /* we dont' pretend to support more */
1341 ci_label->partitionSize =
1342 diskPtr->partitionSize;
1343 ci_label->column = column;
1344 raidflush_component_label(raidPtr, column);
1345 }
1346 /* XXXjld what about the spares? */
1347 }
1348
1349 return (retcode);
1350 case RAIDFRAME_SET_AUTOCONFIG:
1351 d = rf_set_autoconfig(raidPtr, *(int *) data);
1352 printf("raid%d: New autoconfig value is: %d\n",
1353 raidPtr->raidid, d);
1354 *(int *) data = d;
1355 return (retcode);
1356
1357 case RAIDFRAME_SET_ROOT:
1358 d = rf_set_rootpartition(raidPtr, *(int *) data);
1359 printf("raid%d: New rootpartition value is: %d\n",
1360 raidPtr->raidid, d);
1361 *(int *) data = d;
1362 return (retcode);
1363
1364 /* initialize all parity */
1365 case RAIDFRAME_REWRITEPARITY:
1366
1367 if (raidPtr->Layout.map->faultsTolerated == 0) {
1368 /* Parity for RAID 0 is trivially correct */
1369 raidPtr->parity_good = RF_RAID_CLEAN;
1370 return(0);
1371 }
1372
1373 if (raidPtr->parity_rewrite_in_progress == 1) {
1374 /* Re-write is already in progress! */
1375 return(EINVAL);
1376 }
1377
1378 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1379 rf_RewriteParityThread,
1380 raidPtr,"raid_parity");
1381 return (retcode);
1382
1383
1384 case RAIDFRAME_ADD_HOT_SPARE:
1385 sparePtr = (RF_SingleComponent_t *) data;
1386 memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1387 retcode = rf_add_hot_spare(raidPtr, &component);
1388 return(retcode);
1389
1390 case RAIDFRAME_REMOVE_HOT_SPARE:
1391 return(retcode);
1392
1393 case RAIDFRAME_DELETE_COMPONENT:
1394 componentPtr = (RF_SingleComponent_t *)data;
1395 memcpy( &component, componentPtr,
1396 sizeof(RF_SingleComponent_t));
1397 retcode = rf_delete_component(raidPtr, &component);
1398 return(retcode);
1399
1400 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1401 componentPtr = (RF_SingleComponent_t *)data;
1402 memcpy( &component, componentPtr,
1403 sizeof(RF_SingleComponent_t));
1404 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1405 return(retcode);
1406
1407 case RAIDFRAME_REBUILD_IN_PLACE:
1408
1409 if (raidPtr->Layout.map->faultsTolerated == 0) {
1410 /* Can't do this on a RAID 0!! */
1411 return(EINVAL);
1412 }
1413
1414 if (raidPtr->recon_in_progress == 1) {
1415 /* a reconstruct is already in progress! */
1416 return(EINVAL);
1417 }
1418
1419 componentPtr = (RF_SingleComponent_t *) data;
1420 memcpy( &component, componentPtr,
1421 sizeof(RF_SingleComponent_t));
1422 component.row = 0; /* we don't support any more */
1423 column = component.column;
1424
1425 if ((column < 0) || (column >= raidPtr->numCol)) {
1426 return(EINVAL);
1427 }
1428
1429 RF_LOCK_MUTEX(raidPtr->mutex);
1430 if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1431 (raidPtr->numFailures > 0)) {
1432 /* XXX 0 above shouldn't be constant!!! */
1433 /* some component other than this has failed.
1434 Let's not make things worse than they already
1435 are... */
1436 printf("raid%d: Unable to reconstruct to disk at:\n",
1437 raidPtr->raidid);
1438 printf("raid%d: Col: %d Too many failures.\n",
1439 raidPtr->raidid, column);
1440 RF_UNLOCK_MUTEX(raidPtr->mutex);
1441 return (EINVAL);
1442 }
1443 if (raidPtr->Disks[column].status ==
1444 rf_ds_reconstructing) {
1445 printf("raid%d: Unable to reconstruct to disk at:\n",
1446 raidPtr->raidid);
1447 printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr->raidid, column);
1448
1449 RF_UNLOCK_MUTEX(raidPtr->mutex);
1450 return (EINVAL);
1451 }
1452 if (raidPtr->Disks[column].status == rf_ds_spared) {
1453 RF_UNLOCK_MUTEX(raidPtr->mutex);
1454 return (EINVAL);
1455 }
1456 RF_UNLOCK_MUTEX(raidPtr->mutex);
1457
1458 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1459 if (rrcopy == NULL)
1460 return(ENOMEM);
1461
1462 rrcopy->raidPtr = (void *) raidPtr;
1463 rrcopy->col = column;
1464
1465 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1466 rf_ReconstructInPlaceThread,
1467 rrcopy,"raid_reconip");
1468 return(retcode);
1469
1470 case RAIDFRAME_GET_INFO:
1471 if (!raidPtr->valid)
1472 return (ENODEV);
1473 ucfgp = (RF_DeviceConfig_t **) data;
1474 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1475 (RF_DeviceConfig_t *));
1476 if (d_cfg == NULL)
1477 return (ENOMEM);
1478 d_cfg->rows = 1; /* there is only 1 row now */
1479 d_cfg->cols = raidPtr->numCol;
1480 d_cfg->ndevs = raidPtr->numCol;
1481 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1482 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1483 return (ENOMEM);
1484 }
1485 d_cfg->nspares = raidPtr->numSpare;
1486 if (d_cfg->nspares >= RF_MAX_DISKS) {
1487 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1488 return (ENOMEM);
1489 }
1490 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1491 d = 0;
1492 for (j = 0; j < d_cfg->cols; j++) {
1493 d_cfg->devs[d] = raidPtr->Disks[j];
1494 d++;
1495 }
1496 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1497 d_cfg->spares[i] = raidPtr->Disks[j];
1498 }
1499 retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1500 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1501
1502 return (retcode);
1503
1504 case RAIDFRAME_CHECK_PARITY:
1505 *(int *) data = raidPtr->parity_good;
1506 return (0);
1507
1508 case RAIDFRAME_PARITYMAP_STATUS:
1509 if (rf_paritymap_ineligible(raidPtr))
1510 return EINVAL;
1511 rf_paritymap_status(raidPtr->parity_map,
1512 (struct rf_pmstat *)data);
1513 return 0;
1514
1515 case RAIDFRAME_PARITYMAP_SET_PARAMS:
1516 if (rf_paritymap_ineligible(raidPtr))
1517 return EINVAL;
1518 if (raidPtr->parity_map == NULL)
1519 return ENOENT; /* ??? */
1520 if (0 != rf_paritymap_set_params(raidPtr->parity_map,
1521 (struct rf_pmparams *)data, 1))
1522 return EINVAL;
1523 return 0;
1524
1525 case RAIDFRAME_PARITYMAP_GET_DISABLE:
1526 if (rf_paritymap_ineligible(raidPtr))
1527 return EINVAL;
1528 *(int *) data = rf_paritymap_get_disable(raidPtr);
1529 return 0;
1530
1531 case RAIDFRAME_PARITYMAP_SET_DISABLE:
1532 if (rf_paritymap_ineligible(raidPtr))
1533 return EINVAL;
1534 rf_paritymap_set_disable(raidPtr, *(int *)data);
1535 /* XXX should errors be passed up? */
1536 return 0;
1537
1538 case RAIDFRAME_RESET_ACCTOTALS:
1539 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1540 return (0);
1541
1542 case RAIDFRAME_GET_ACCTOTALS:
1543 totals = (RF_AccTotals_t *) data;
1544 *totals = raidPtr->acc_totals;
1545 return (0);
1546
1547 case RAIDFRAME_KEEP_ACCTOTALS:
1548 raidPtr->keep_acc_totals = *(int *)data;
1549 return (0);
1550
1551 case RAIDFRAME_GET_SIZE:
1552 *(int *) data = raidPtr->totalSectors;
1553 return (0);
1554
1555 /* fail a disk & optionally start reconstruction */
1556 case RAIDFRAME_FAIL_DISK:
1557
1558 if (raidPtr->Layout.map->faultsTolerated == 0) {
1559 /* Can't do this on a RAID 0!! */
1560 return(EINVAL);
1561 }
1562
1563 rr = (struct rf_recon_req *) data;
1564 rr->row = 0;
1565 if (rr->col < 0 || rr->col >= raidPtr->numCol)
1566 return (EINVAL);
1567
1568
1569 RF_LOCK_MUTEX(raidPtr->mutex);
1570 if (raidPtr->status == rf_rs_reconstructing) {
1571 /* you can't fail a disk while we're reconstructing! */
1572 /* XXX wrong for RAID6 */
1573 RF_UNLOCK_MUTEX(raidPtr->mutex);
1574 return (EINVAL);
1575 }
1576 if ((raidPtr->Disks[rr->col].status ==
1577 rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1578 /* some other component has failed. Let's not make
1579 things worse. XXX wrong for RAID6 */
1580 RF_UNLOCK_MUTEX(raidPtr->mutex);
1581 return (EINVAL);
1582 }
1583 if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1584 /* Can't fail a spared disk! */
1585 RF_UNLOCK_MUTEX(raidPtr->mutex);
1586 return (EINVAL);
1587 }
1588 RF_UNLOCK_MUTEX(raidPtr->mutex);
1589
1590 /* make a copy of the recon request so that we don't rely on
1591 * the user's buffer */
1592 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1593 if (rrcopy == NULL)
1594 return(ENOMEM);
1595 memcpy(rrcopy, rr, sizeof(*rr));
1596 rrcopy->raidPtr = (void *) raidPtr;
1597
1598 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1599 rf_ReconThread,
1600 rrcopy,"raid_recon");
1601 return (0);
1602
1603 /* invoke a copyback operation after recon on whatever disk
1604 * needs it, if any */
1605 case RAIDFRAME_COPYBACK:
1606
1607 if (raidPtr->Layout.map->faultsTolerated == 0) {
1608 /* This makes no sense on a RAID 0!! */
1609 return(EINVAL);
1610 }
1611
1612 if (raidPtr->copyback_in_progress == 1) {
1613 /* Copyback is already in progress! */
1614 return(EINVAL);
1615 }
1616
1617 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1618 rf_CopybackThread,
1619 raidPtr,"raid_copyback");
1620 return (retcode);
1621
1622 /* return the percentage completion of reconstruction */
1623 case RAIDFRAME_CHECK_RECON_STATUS:
1624 if (raidPtr->Layout.map->faultsTolerated == 0) {
1625 /* This makes no sense on a RAID 0, so tell the
1626 user it's done. */
1627 *(int *) data = 100;
1628 return(0);
1629 }
1630 if (raidPtr->status != rf_rs_reconstructing)
1631 *(int *) data = 100;
1632 else {
1633 if (raidPtr->reconControl->numRUsTotal > 0) {
1634 *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1635 } else {
1636 *(int *) data = 0;
1637 }
1638 }
1639 return (0);
1640 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1641 progressInfoPtr = (RF_ProgressInfo_t **) data;
1642 if (raidPtr->status != rf_rs_reconstructing) {
1643 progressInfo.remaining = 0;
1644 progressInfo.completed = 100;
1645 progressInfo.total = 100;
1646 } else {
1647 progressInfo.total =
1648 raidPtr->reconControl->numRUsTotal;
1649 progressInfo.completed =
1650 raidPtr->reconControl->numRUsComplete;
1651 progressInfo.remaining = progressInfo.total -
1652 progressInfo.completed;
1653 }
1654 retcode = copyout(&progressInfo, *progressInfoPtr,
1655 sizeof(RF_ProgressInfo_t));
1656 return (retcode);
1657
1658 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1659 if (raidPtr->Layout.map->faultsTolerated == 0) {
1660 /* This makes no sense on a RAID 0, so tell the
1661 user it's done. */
1662 *(int *) data = 100;
1663 return(0);
1664 }
1665 if (raidPtr->parity_rewrite_in_progress == 1) {
1666 *(int *) data = 100 *
1667 raidPtr->parity_rewrite_stripes_done /
1668 raidPtr->Layout.numStripe;
1669 } else {
1670 *(int *) data = 100;
1671 }
1672 return (0);
1673
1674 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1675 progressInfoPtr = (RF_ProgressInfo_t **) data;
1676 if (raidPtr->parity_rewrite_in_progress == 1) {
1677 progressInfo.total = raidPtr->Layout.numStripe;
1678 progressInfo.completed =
1679 raidPtr->parity_rewrite_stripes_done;
1680 progressInfo.remaining = progressInfo.total -
1681 progressInfo.completed;
1682 } else {
1683 progressInfo.remaining = 0;
1684 progressInfo.completed = 100;
1685 progressInfo.total = 100;
1686 }
1687 retcode = copyout(&progressInfo, *progressInfoPtr,
1688 sizeof(RF_ProgressInfo_t));
1689 return (retcode);
1690
1691 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1692 if (raidPtr->Layout.map->faultsTolerated == 0) {
1693 /* This makes no sense on a RAID 0 */
1694 *(int *) data = 100;
1695 return(0);
1696 }
1697 if (raidPtr->copyback_in_progress == 1) {
1698 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1699 raidPtr->Layout.numStripe;
1700 } else {
1701 *(int *) data = 100;
1702 }
1703 return (0);
1704
1705 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1706 progressInfoPtr = (RF_ProgressInfo_t **) data;
1707 if (raidPtr->copyback_in_progress == 1) {
1708 progressInfo.total = raidPtr->Layout.numStripe;
1709 progressInfo.completed =
1710 raidPtr->copyback_stripes_done;
1711 progressInfo.remaining = progressInfo.total -
1712 progressInfo.completed;
1713 } else {
1714 progressInfo.remaining = 0;
1715 progressInfo.completed = 100;
1716 progressInfo.total = 100;
1717 }
1718 retcode = copyout(&progressInfo, *progressInfoPtr,
1719 sizeof(RF_ProgressInfo_t));
1720 return (retcode);
1721
1722 /* the sparetable daemon calls this to wait for the kernel to
1723 * need a spare table. this ioctl does not return until a
1724 * spare table is needed. XXX -- calling mpsleep here in the
1725 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1726 * -- I should either compute the spare table in the kernel,
1727 * or have a different -- XXX XXX -- interface (a different
1728 * character device) for delivering the table -- XXX */
1729 #if 0
1730 case RAIDFRAME_SPARET_WAIT:
1731 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1732 while (!rf_sparet_wait_queue)
1733 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1734 waitreq = rf_sparet_wait_queue;
1735 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1736 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1737
1738 /* structure assignment */
1739 *((RF_SparetWait_t *) data) = *waitreq;
1740
1741 RF_Free(waitreq, sizeof(*waitreq));
1742 return (0);
1743
1744 /* wakes up a process waiting on SPARET_WAIT and puts an error
1745 * code in it that will cause the dameon to exit */
1746 case RAIDFRAME_ABORT_SPARET_WAIT:
1747 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1748 waitreq->fcol = -1;
1749 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1750 waitreq->next = rf_sparet_wait_queue;
1751 rf_sparet_wait_queue = waitreq;
1752 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1753 wakeup(&rf_sparet_wait_queue);
1754 return (0);
1755
1756 /* used by the spare table daemon to deliver a spare table
1757 * into the kernel */
1758 case RAIDFRAME_SEND_SPARET:
1759
1760 /* install the spare table */
1761 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1762
1763 /* respond to the requestor. the return status of the spare
1764 * table installation is passed in the "fcol" field */
1765 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1766 waitreq->fcol = retcode;
1767 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1768 waitreq->next = rf_sparet_resp_queue;
1769 rf_sparet_resp_queue = waitreq;
1770 wakeup(&rf_sparet_resp_queue);
1771 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1772
1773 return (retcode);
1774 #endif
1775
1776 default:
1777 break; /* fall through to the os-specific code below */
1778
1779 }
1780
1781 if (!raidPtr->valid)
1782 return (EINVAL);
1783
1784 /*
1785 * Add support for "regular" device ioctls here.
1786 */
1787
1788 error = disk_ioctl(&rs->sc_dkdev, cmd, data, flag, l);
1789 if (error != EPASSTHROUGH)
1790 return (error);
1791
1792 switch (cmd) {
1793 case DIOCGDINFO:
1794 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1795 break;
1796 #ifdef __HAVE_OLD_DISKLABEL
1797 case ODIOCGDINFO:
1798 newlabel = *(rs->sc_dkdev.dk_label);
1799 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1800 return ENOTTY;
1801 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1802 break;
1803 #endif
1804
1805 case DIOCGPART:
1806 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1807 ((struct partinfo *) data)->part =
1808 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1809 break;
1810
1811 case DIOCWDINFO:
1812 case DIOCSDINFO:
1813 #ifdef __HAVE_OLD_DISKLABEL
1814 case ODIOCWDINFO:
1815 case ODIOCSDINFO:
1816 #endif
1817 {
1818 struct disklabel *lp;
1819 #ifdef __HAVE_OLD_DISKLABEL
1820 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1821 memset(&newlabel, 0, sizeof newlabel);
1822 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1823 lp = &newlabel;
1824 } else
1825 #endif
1826 lp = (struct disklabel *)data;
1827
1828 if ((error = raidlock(rs)) != 0)
1829 return (error);
1830
1831 rs->sc_flags |= RAIDF_LABELLING;
1832
1833 error = setdisklabel(rs->sc_dkdev.dk_label,
1834 lp, 0, rs->sc_dkdev.dk_cpulabel);
1835 if (error == 0) {
1836 if (cmd == DIOCWDINFO
1837 #ifdef __HAVE_OLD_DISKLABEL
1838 || cmd == ODIOCWDINFO
1839 #endif
1840 )
1841 error = writedisklabel(RAIDLABELDEV(dev),
1842 raidstrategy, rs->sc_dkdev.dk_label,
1843 rs->sc_dkdev.dk_cpulabel);
1844 }
1845 rs->sc_flags &= ~RAIDF_LABELLING;
1846
1847 raidunlock(rs);
1848
1849 if (error)
1850 return (error);
1851 break;
1852 }
1853
1854 case DIOCWLABEL:
1855 if (*(int *) data != 0)
1856 rs->sc_flags |= RAIDF_WLABEL;
1857 else
1858 rs->sc_flags &= ~RAIDF_WLABEL;
1859 break;
1860
1861 case DIOCGDEFLABEL:
1862 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1863 break;
1864
1865 #ifdef __HAVE_OLD_DISKLABEL
1866 case ODIOCGDEFLABEL:
1867 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1868 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1869 return ENOTTY;
1870 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1871 break;
1872 #endif
1873
1874 case DIOCAWEDGE:
1875 case DIOCDWEDGE:
1876 dkw = (void *)data;
1877
1878 /* If the ioctl happens here, the parent is us. */
1879 (void)strcpy(dkw->dkw_parent, rs->sc_xname);
1880 return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
1881
1882 case DIOCLWEDGES:
1883 return dkwedge_list(&rs->sc_dkdev,
1884 (struct dkwedge_list *)data, l);
1885 case DIOCCACHESYNC:
1886 return rf_sync_component_caches(raidPtr);
1887 default:
1888 retcode = ENOTTY;
1889 }
1890 return (retcode);
1891
1892 }
1893
1894
1895 /* raidinit -- complete the rest of the initialization for the
1896 RAIDframe device. */
1897
1898
1899 static void
1900 raidinit(RF_Raid_t *raidPtr)
1901 {
1902 cfdata_t cf;
1903 struct raid_softc *rs;
1904 int unit;
1905
1906 unit = raidPtr->raidid;
1907
1908 rs = &raid_softc[unit];
1909
1910 /* XXX should check return code first... */
1911 rs->sc_flags |= RAIDF_INITED;
1912
1913 /* XXX doesn't check bounds. */
1914 snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1915
1916 /* attach the pseudo device */
1917 cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1918 cf->cf_name = raid_cd.cd_name;
1919 cf->cf_atname = raid_cd.cd_name;
1920 cf->cf_unit = unit;
1921 cf->cf_fstate = FSTATE_STAR;
1922
1923 rs->sc_dev = config_attach_pseudo(cf);
1924
1925 if (rs->sc_dev == NULL) {
1926 printf("raid%d: config_attach_pseudo failed\n",
1927 raidPtr->raidid);
1928 rs->sc_flags &= ~RAIDF_INITED;
1929 free(cf, M_RAIDFRAME);
1930 return;
1931 }
1932
1933 /* disk_attach actually creates space for the CPU disklabel, among
1934 * other things, so it's critical to call this *BEFORE* we try putzing
1935 * with disklabels. */
1936
1937 disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1938 disk_attach(&rs->sc_dkdev);
1939 disk_blocksize(&rs->sc_dkdev, raidPtr->bytesPerSector);
1940
1941 /* XXX There may be a weird interaction here between this, and
1942 * protectedSectors, as used in RAIDframe. */
1943
1944 rs->sc_size = raidPtr->totalSectors;
1945
1946 dkwedge_discover(&rs->sc_dkdev);
1947
1948 rf_set_properties(rs, raidPtr);
1949
1950 }
1951 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1952 /* wake up the daemon & tell it to get us a spare table
1953 * XXX
1954 * the entries in the queues should be tagged with the raidPtr
1955 * so that in the extremely rare case that two recons happen at once,
1956 * we know for which device were requesting a spare table
1957 * XXX
1958 *
1959 * XXX This code is not currently used. GO
1960 */
1961 int
1962 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1963 {
1964 int retcode;
1965
1966 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1967 req->next = rf_sparet_wait_queue;
1968 rf_sparet_wait_queue = req;
1969 wakeup(&rf_sparet_wait_queue);
1970
1971 /* mpsleep unlocks the mutex */
1972 while (!rf_sparet_resp_queue) {
1973 tsleep(&rf_sparet_resp_queue, PRIBIO,
1974 "raidframe getsparetable", 0);
1975 }
1976 req = rf_sparet_resp_queue;
1977 rf_sparet_resp_queue = req->next;
1978 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1979
1980 retcode = req->fcol;
1981 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1982 * alloc'd */
1983 return (retcode);
1984 }
1985 #endif
1986
1987 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1988 * bp & passes it down.
1989 * any calls originating in the kernel must use non-blocking I/O
1990 * do some extra sanity checking to return "appropriate" error values for
1991 * certain conditions (to make some standard utilities work)
1992 *
1993 * Formerly known as: rf_DoAccessKernel
1994 */
1995 void
1996 raidstart(RF_Raid_t *raidPtr)
1997 {
1998 RF_SectorCount_t num_blocks, pb, sum;
1999 RF_RaidAddr_t raid_addr;
2000 struct partition *pp;
2001 daddr_t blocknum;
2002 int unit;
2003 struct raid_softc *rs;
2004 int do_async;
2005 struct buf *bp;
2006 int rc;
2007
2008 unit = raidPtr->raidid;
2009 rs = &raid_softc[unit];
2010
2011 /* quick check to see if anything has died recently */
2012 RF_LOCK_MUTEX(raidPtr->mutex);
2013 if (raidPtr->numNewFailures > 0) {
2014 RF_UNLOCK_MUTEX(raidPtr->mutex);
2015 rf_update_component_labels(raidPtr,
2016 RF_NORMAL_COMPONENT_UPDATE);
2017 RF_LOCK_MUTEX(raidPtr->mutex);
2018 raidPtr->numNewFailures--;
2019 }
2020
2021 /* Check to see if we're at the limit... */
2022 while (raidPtr->openings > 0) {
2023 RF_UNLOCK_MUTEX(raidPtr->mutex);
2024
2025 /* get the next item, if any, from the queue */
2026 if ((bp = bufq_get(rs->buf_queue)) == NULL) {
2027 /* nothing more to do */
2028 return;
2029 }
2030
2031 /* Ok, for the bp we have here, bp->b_blkno is relative to the
2032 * partition.. Need to make it absolute to the underlying
2033 * device.. */
2034
2035 blocknum = bp->b_blkno << DEV_BSHIFT >> raidPtr->logBytesPerSector;
2036 if (DISKPART(bp->b_dev) != RAW_PART) {
2037 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
2038 blocknum += pp->p_offset;
2039 }
2040
2041 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
2042 (int) blocknum));
2043
2044 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
2045 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
2046
2047 /* *THIS* is where we adjust what block we're going to...
2048 * but DO NOT TOUCH bp->b_blkno!!! */
2049 raid_addr = blocknum;
2050
2051 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
2052 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
2053 sum = raid_addr + num_blocks + pb;
2054 if (1 || rf_debugKernelAccess) {
2055 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2056 (int) raid_addr, (int) sum, (int) num_blocks,
2057 (int) pb, (int) bp->b_resid));
2058 }
2059 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2060 || (sum < num_blocks) || (sum < pb)) {
2061 bp->b_error = ENOSPC;
2062 bp->b_resid = bp->b_bcount;
2063 biodone(bp);
2064 RF_LOCK_MUTEX(raidPtr->mutex);
2065 continue;
2066 }
2067 /*
2068 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2069 */
2070
2071 if (bp->b_bcount & raidPtr->sectorMask) {
2072 bp->b_error = EINVAL;
2073 bp->b_resid = bp->b_bcount;
2074 biodone(bp);
2075 RF_LOCK_MUTEX(raidPtr->mutex);
2076 continue;
2077
2078 }
2079 db1_printf(("Calling DoAccess..\n"));
2080
2081
2082 RF_LOCK_MUTEX(raidPtr->mutex);
2083 raidPtr->openings--;
2084 RF_UNLOCK_MUTEX(raidPtr->mutex);
2085
2086 /*
2087 * Everything is async.
2088 */
2089 do_async = 1;
2090
2091 disk_busy(&rs->sc_dkdev);
2092
2093 /* XXX we're still at splbio() here... do we *really*
2094 need to be? */
2095
2096 /* don't ever condition on bp->b_flags & B_WRITE.
2097 * always condition on B_READ instead */
2098
2099 rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2100 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2101 do_async, raid_addr, num_blocks,
2102 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2103
2104 if (rc) {
2105 bp->b_error = rc;
2106 bp->b_resid = bp->b_bcount;
2107 biodone(bp);
2108 /* continue loop */
2109 }
2110
2111 RF_LOCK_MUTEX(raidPtr->mutex);
2112 }
2113 RF_UNLOCK_MUTEX(raidPtr->mutex);
2114 }
2115
2116
2117
2118
2119 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2120
2121 int
2122 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2123 {
2124 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2125 struct buf *bp;
2126
2127 req->queue = queue;
2128 bp = req->bp;
2129
2130 switch (req->type) {
2131 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2132 /* XXX need to do something extra here.. */
2133 /* I'm leaving this in, as I've never actually seen it used,
2134 * and I'd like folks to report it... GO */
2135 printf(("WAKEUP CALLED\n"));
2136 queue->numOutstanding++;
2137
2138 bp->b_flags = 0;
2139 bp->b_private = req;
2140
2141 KernelWakeupFunc(bp);
2142 break;
2143
2144 case RF_IO_TYPE_READ:
2145 case RF_IO_TYPE_WRITE:
2146 #if RF_ACC_TRACE > 0
2147 if (req->tracerec) {
2148 RF_ETIMER_START(req->tracerec->timer);
2149 }
2150 #endif
2151 InitBP(bp, queue->rf_cinfo->ci_vp,
2152 op, queue->rf_cinfo->ci_dev,
2153 req->sectorOffset, req->numSector,
2154 req->buf, KernelWakeupFunc, (void *) req,
2155 queue->raidPtr->logBytesPerSector, req->b_proc);
2156
2157 if (rf_debugKernelAccess) {
2158 db1_printf(("dispatch: bp->b_blkno = %ld\n",
2159 (long) bp->b_blkno));
2160 }
2161 queue->numOutstanding++;
2162 queue->last_deq_sector = req->sectorOffset;
2163 /* acc wouldn't have been let in if there were any pending
2164 * reqs at any other priority */
2165 queue->curPriority = req->priority;
2166
2167 db1_printf(("Going for %c to unit %d col %d\n",
2168 req->type, queue->raidPtr->raidid,
2169 queue->col));
2170 db1_printf(("sector %d count %d (%d bytes) %d\n",
2171 (int) req->sectorOffset, (int) req->numSector,
2172 (int) (req->numSector <<
2173 queue->raidPtr->logBytesPerSector),
2174 (int) queue->raidPtr->logBytesPerSector));
2175
2176 /*
2177 * XXX: drop lock here since this can block at
2178 * least with backing SCSI devices. Retake it
2179 * to minimize fuss with calling interfaces.
2180 */
2181
2182 RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2183 bdev_strategy(bp);
2184 RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2185 break;
2186
2187 default:
2188 panic("bad req->type in rf_DispatchKernelIO");
2189 }
2190 db1_printf(("Exiting from DispatchKernelIO\n"));
2191
2192 return (0);
2193 }
2194 /* this is the callback function associated with a I/O invoked from
2195 kernel code.
2196 */
2197 static void
2198 KernelWakeupFunc(struct buf *bp)
2199 {
2200 RF_DiskQueueData_t *req = NULL;
2201 RF_DiskQueue_t *queue;
2202 int s;
2203
2204 s = splbio();
2205 db1_printf(("recovering the request queue:\n"));
2206 req = bp->b_private;
2207
2208 queue = (RF_DiskQueue_t *) req->queue;
2209
2210 #if RF_ACC_TRACE > 0
2211 if (req->tracerec) {
2212 RF_ETIMER_STOP(req->tracerec->timer);
2213 RF_ETIMER_EVAL(req->tracerec->timer);
2214 RF_LOCK_MUTEX(rf_tracing_mutex);
2215 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2216 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2217 req->tracerec->num_phys_ios++;
2218 RF_UNLOCK_MUTEX(rf_tracing_mutex);
2219 }
2220 #endif
2221
2222 /* XXX Ok, let's get aggressive... If b_error is set, let's go
2223 * ballistic, and mark the component as hosed... */
2224
2225 if (bp->b_error != 0) {
2226 /* Mark the disk as dead */
2227 /* but only mark it once... */
2228 /* and only if it wouldn't leave this RAID set
2229 completely broken */
2230 if (((queue->raidPtr->Disks[queue->col].status ==
2231 rf_ds_optimal) ||
2232 (queue->raidPtr->Disks[queue->col].status ==
2233 rf_ds_used_spare)) &&
2234 (queue->raidPtr->numFailures <
2235 queue->raidPtr->Layout.map->faultsTolerated)) {
2236 printf("raid%d: IO Error. Marking %s as failed.\n",
2237 queue->raidPtr->raidid,
2238 queue->raidPtr->Disks[queue->col].devname);
2239 queue->raidPtr->Disks[queue->col].status =
2240 rf_ds_failed;
2241 queue->raidPtr->status = rf_rs_degraded;
2242 queue->raidPtr->numFailures++;
2243 queue->raidPtr->numNewFailures++;
2244 } else { /* Disk is already dead... */
2245 /* printf("Disk already marked as dead!\n"); */
2246 }
2247
2248 }
2249
2250 /* Fill in the error value */
2251
2252 req->error = bp->b_error;
2253
2254 simple_lock(&queue->raidPtr->iodone_lock);
2255
2256 /* Drop this one on the "finished" queue... */
2257 TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2258
2259 /* Let the raidio thread know there is work to be done. */
2260 wakeup(&(queue->raidPtr->iodone));
2261
2262 simple_unlock(&queue->raidPtr->iodone_lock);
2263
2264 splx(s);
2265 }
2266
2267
2268
2269 /*
2270 * initialize a buf structure for doing an I/O in the kernel.
2271 */
2272 static void
2273 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2274 RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2275 void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2276 struct proc *b_proc)
2277 {
2278 /* bp->b_flags = B_PHYS | rw_flag; */
2279 bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
2280 bp->b_oflags = 0;
2281 bp->b_cflags = 0;
2282 bp->b_bcount = numSect << logBytesPerSector;
2283 bp->b_bufsize = bp->b_bcount;
2284 bp->b_error = 0;
2285 bp->b_dev = dev;
2286 bp->b_data = bf;
2287 bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
2288 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2289 if (bp->b_bcount == 0) {
2290 panic("bp->b_bcount is zero in InitBP!!");
2291 }
2292 bp->b_proc = b_proc;
2293 bp->b_iodone = cbFunc;
2294 bp->b_private = cbArg;
2295 }
2296
2297 static void
2298 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2299 struct disklabel *lp)
2300 {
2301 memset(lp, 0, sizeof(*lp));
2302
2303 /* fabricate a label... */
2304 lp->d_secperunit = raidPtr->totalSectors;
2305 lp->d_secsize = raidPtr->bytesPerSector;
2306 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2307 lp->d_ntracks = 4 * raidPtr->numCol;
2308 lp->d_ncylinders = raidPtr->totalSectors /
2309 (lp->d_nsectors * lp->d_ntracks);
2310 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2311
2312 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2313 lp->d_type = DTYPE_RAID;
2314 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2315 lp->d_rpm = 3600;
2316 lp->d_interleave = 1;
2317 lp->d_flags = 0;
2318
2319 lp->d_partitions[RAW_PART].p_offset = 0;
2320 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2321 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2322 lp->d_npartitions = RAW_PART + 1;
2323
2324 lp->d_magic = DISKMAGIC;
2325 lp->d_magic2 = DISKMAGIC;
2326 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2327
2328 }
2329 /*
2330 * Read the disklabel from the raid device. If one is not present, fake one
2331 * up.
2332 */
2333 static void
2334 raidgetdisklabel(dev_t dev)
2335 {
2336 int unit = raidunit(dev);
2337 struct raid_softc *rs = &raid_softc[unit];
2338 const char *errstring;
2339 struct disklabel *lp = rs->sc_dkdev.dk_label;
2340 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2341 RF_Raid_t *raidPtr;
2342
2343 db1_printf(("Getting the disklabel...\n"));
2344
2345 memset(clp, 0, sizeof(*clp));
2346
2347 raidPtr = raidPtrs[unit];
2348
2349 raidgetdefaultlabel(raidPtr, rs, lp);
2350
2351 /*
2352 * Call the generic disklabel extraction routine.
2353 */
2354 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2355 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2356 if (errstring)
2357 raidmakedisklabel(rs);
2358 else {
2359 int i;
2360 struct partition *pp;
2361
2362 /*
2363 * Sanity check whether the found disklabel is valid.
2364 *
2365 * This is necessary since total size of the raid device
2366 * may vary when an interleave is changed even though exactly
2367 * same components are used, and old disklabel may used
2368 * if that is found.
2369 */
2370 if (lp->d_secperunit != rs->sc_size)
2371 printf("raid%d: WARNING: %s: "
2372 "total sector size in disklabel (%" PRIu32 ") != "
2373 "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
2374 lp->d_secperunit, rs->sc_size);
2375 for (i = 0; i < lp->d_npartitions; i++) {
2376 pp = &lp->d_partitions[i];
2377 if (pp->p_offset + pp->p_size > rs->sc_size)
2378 printf("raid%d: WARNING: %s: end of partition `%c' "
2379 "exceeds the size of raid (%" PRIu64 ")\n",
2380 unit, rs->sc_xname, 'a' + i, rs->sc_size);
2381 }
2382 }
2383
2384 }
2385 /*
2386 * Take care of things one might want to take care of in the event
2387 * that a disklabel isn't present.
2388 */
2389 static void
2390 raidmakedisklabel(struct raid_softc *rs)
2391 {
2392 struct disklabel *lp = rs->sc_dkdev.dk_label;
2393 db1_printf(("Making a label..\n"));
2394
2395 /*
2396 * For historical reasons, if there's no disklabel present
2397 * the raw partition must be marked FS_BSDFFS.
2398 */
2399
2400 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2401
2402 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2403
2404 lp->d_checksum = dkcksum(lp);
2405 }
2406 /*
2407 * Wait interruptibly for an exclusive lock.
2408 *
2409 * XXX
2410 * Several drivers do this; it should be abstracted and made MP-safe.
2411 * (Hmm... where have we seen this warning before :-> GO )
2412 */
2413 static int
2414 raidlock(struct raid_softc *rs)
2415 {
2416 int error;
2417
2418 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2419 rs->sc_flags |= RAIDF_WANTED;
2420 if ((error =
2421 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2422 return (error);
2423 }
2424 rs->sc_flags |= RAIDF_LOCKED;
2425 return (0);
2426 }
2427 /*
2428 * Unlock and wake up any waiters.
2429 */
2430 static void
2431 raidunlock(struct raid_softc *rs)
2432 {
2433
2434 rs->sc_flags &= ~RAIDF_LOCKED;
2435 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2436 rs->sc_flags &= ~RAIDF_WANTED;
2437 wakeup(rs);
2438 }
2439 }
2440
2441
2442 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2443 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2444 #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2445
2446 static daddr_t
2447 rf_component_info_offset(void)
2448 {
2449
2450 return RF_COMPONENT_INFO_OFFSET;
2451 }
2452
2453 static daddr_t
2454 rf_component_info_size(unsigned secsize)
2455 {
2456 daddr_t info_size;
2457
2458 KASSERT(secsize);
2459 if (secsize > RF_COMPONENT_INFO_SIZE)
2460 info_size = secsize;
2461 else
2462 info_size = RF_COMPONENT_INFO_SIZE;
2463
2464 return info_size;
2465 }
2466
2467 static daddr_t
2468 rf_parity_map_offset(RF_Raid_t *raidPtr)
2469 {
2470 daddr_t map_offset;
2471
2472 KASSERT(raidPtr->bytesPerSector);
2473 if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
2474 map_offset = raidPtr->bytesPerSector;
2475 else
2476 map_offset = RF_COMPONENT_INFO_SIZE;
2477 map_offset += rf_component_info_offset();
2478
2479 return map_offset;
2480 }
2481
2482 static daddr_t
2483 rf_parity_map_size(RF_Raid_t *raidPtr)
2484 {
2485 daddr_t map_size;
2486
2487 if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
2488 map_size = raidPtr->bytesPerSector;
2489 else
2490 map_size = RF_PARITY_MAP_SIZE;
2491
2492 return map_size;
2493 }
2494
2495 int
2496 raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2497 {
2498 RF_ComponentLabel_t *clabel;
2499
2500 clabel = raidget_component_label(raidPtr, col);
2501 clabel->clean = RF_RAID_CLEAN;
2502 raidflush_component_label(raidPtr, col);
2503 return(0);
2504 }
2505
2506
2507 int
2508 raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2509 {
2510 RF_ComponentLabel_t *clabel;
2511
2512 clabel = raidget_component_label(raidPtr, col);
2513 clabel->clean = RF_RAID_DIRTY;
2514 raidflush_component_label(raidPtr, col);
2515 return(0);
2516 }
2517
2518 int
2519 raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2520 {
2521 KASSERT(raidPtr->bytesPerSector);
2522 return raidread_component_label(raidPtr->bytesPerSector,
2523 raidPtr->Disks[col].dev,
2524 raidPtr->raid_cinfo[col].ci_vp,
2525 &raidPtr->raid_cinfo[col].ci_label);
2526 }
2527
2528 RF_ComponentLabel_t *
2529 raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2530 {
2531 return &raidPtr->raid_cinfo[col].ci_label;
2532 }
2533
2534 int
2535 raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2536 {
2537 RF_ComponentLabel_t *label;
2538
2539 label = &raidPtr->raid_cinfo[col].ci_label;
2540 label->mod_counter = raidPtr->mod_counter;
2541 #ifndef RF_NO_PARITY_MAP
2542 label->parity_map_modcount = label->mod_counter;
2543 #endif
2544 return raidwrite_component_label(raidPtr->bytesPerSector,
2545 raidPtr->Disks[col].dev,
2546 raidPtr->raid_cinfo[col].ci_vp, label);
2547 }
2548
2549
2550 static int
2551 raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2552 RF_ComponentLabel_t *clabel)
2553 {
2554 return raidread_component_area(dev, b_vp, clabel,
2555 sizeof(RF_ComponentLabel_t),
2556 rf_component_info_offset(),
2557 rf_component_info_size(secsize));
2558 }
2559
2560 /* ARGSUSED */
2561 static int
2562 raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2563 size_t msize, daddr_t offset, daddr_t dsize)
2564 {
2565 struct buf *bp;
2566 const struct bdevsw *bdev;
2567 int error;
2568
2569 /* XXX should probably ensure that we don't try to do this if
2570 someone has changed rf_protected_sectors. */
2571
2572 if (b_vp == NULL) {
2573 /* For whatever reason, this component is not valid.
2574 Don't try to read a component label from it. */
2575 return(EINVAL);
2576 }
2577
2578 /* get a block of the appropriate size... */
2579 bp = geteblk((int)dsize);
2580 bp->b_dev = dev;
2581
2582 /* get our ducks in a row for the read */
2583 bp->b_blkno = offset / DEV_BSIZE;
2584 bp->b_bcount = dsize;
2585 bp->b_flags |= B_READ;
2586 bp->b_resid = dsize;
2587
2588 bdev = bdevsw_lookup(bp->b_dev);
2589 if (bdev == NULL)
2590 return (ENXIO);
2591 (*bdev->d_strategy)(bp);
2592
2593 error = biowait(bp);
2594
2595 if (!error) {
2596 memcpy(data, bp->b_data, msize);
2597 }
2598
2599 brelse(bp, 0);
2600 return(error);
2601 }
2602
2603
2604 static int
2605 raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2606 RF_ComponentLabel_t *clabel)
2607 {
2608 return raidwrite_component_area(dev, b_vp, clabel,
2609 sizeof(RF_ComponentLabel_t),
2610 rf_component_info_offset(),
2611 rf_component_info_size(secsize), 0);
2612 }
2613
2614 /* ARGSUSED */
2615 static int
2616 raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2617 size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2618 {
2619 struct buf *bp;
2620 const struct bdevsw *bdev;
2621 int error;
2622
2623 /* get a block of the appropriate size... */
2624 bp = geteblk((int)dsize);
2625 bp->b_dev = dev;
2626
2627 /* get our ducks in a row for the write */
2628 bp->b_blkno = offset / DEV_BSIZE;
2629 bp->b_bcount = dsize;
2630 bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2631 bp->b_resid = dsize;
2632
2633 memset(bp->b_data, 0, dsize);
2634 memcpy(bp->b_data, data, msize);
2635
2636 bdev = bdevsw_lookup(bp->b_dev);
2637 if (bdev == NULL)
2638 return (ENXIO);
2639 (*bdev->d_strategy)(bp);
2640 if (asyncp)
2641 return 0;
2642 error = biowait(bp);
2643 brelse(bp, 0);
2644 if (error) {
2645 #if 1
2646 printf("Failed to write RAID component info!\n");
2647 #endif
2648 }
2649
2650 return(error);
2651 }
2652
2653 void
2654 rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2655 {
2656 int c;
2657
2658 for (c = 0; c < raidPtr->numCol; c++) {
2659 /* Skip dead disks. */
2660 if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2661 continue;
2662 /* XXXjld: what if an error occurs here? */
2663 raidwrite_component_area(raidPtr->Disks[c].dev,
2664 raidPtr->raid_cinfo[c].ci_vp, map,
2665 RF_PARITYMAP_NBYTE,
2666 rf_parity_map_offset(raidPtr),
2667 rf_parity_map_size(raidPtr), 0);
2668 }
2669 }
2670
2671 void
2672 rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2673 {
2674 struct rf_paritymap_ondisk tmp;
2675 int c,first;
2676
2677 first=1;
2678 for (c = 0; c < raidPtr->numCol; c++) {
2679 /* Skip dead disks. */
2680 if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2681 continue;
2682 raidread_component_area(raidPtr->Disks[c].dev,
2683 raidPtr->raid_cinfo[c].ci_vp, &tmp,
2684 RF_PARITYMAP_NBYTE,
2685 rf_parity_map_offset(raidPtr),
2686 rf_parity_map_size(raidPtr));
2687 if (first) {
2688 memcpy(map, &tmp, sizeof(*map));
2689 first = 0;
2690 } else {
2691 rf_paritymap_merge(map, &tmp);
2692 }
2693 }
2694 }
2695
2696 void
2697 rf_markalldirty(RF_Raid_t *raidPtr)
2698 {
2699 RF_ComponentLabel_t *clabel;
2700 int sparecol;
2701 int c;
2702 int j;
2703 int scol = -1;
2704
2705 raidPtr->mod_counter++;
2706 for (c = 0; c < raidPtr->numCol; c++) {
2707 /* we don't want to touch (at all) a disk that has
2708 failed */
2709 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2710 clabel = raidget_component_label(raidPtr, c);
2711 if (clabel->status == rf_ds_spared) {
2712 /* XXX do something special...
2713 but whatever you do, don't
2714 try to access it!! */
2715 } else {
2716 raidmarkdirty(raidPtr, c);
2717 }
2718 }
2719 }
2720
2721 for( c = 0; c < raidPtr->numSpare ; c++) {
2722 sparecol = raidPtr->numCol + c;
2723 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2724 /*
2725
2726 we claim this disk is "optimal" if it's
2727 rf_ds_used_spare, as that means it should be
2728 directly substitutable for the disk it replaced.
2729 We note that too...
2730
2731 */
2732
2733 for(j=0;j<raidPtr->numCol;j++) {
2734 if (raidPtr->Disks[j].spareCol == sparecol) {
2735 scol = j;
2736 break;
2737 }
2738 }
2739
2740 clabel = raidget_component_label(raidPtr, sparecol);
2741 /* make sure status is noted */
2742
2743 raid_init_component_label(raidPtr, clabel);
2744
2745 clabel->row = 0;
2746 clabel->column = scol;
2747 /* Note: we *don't* change status from rf_ds_used_spare
2748 to rf_ds_optimal */
2749 /* clabel.status = rf_ds_optimal; */
2750
2751 raidmarkdirty(raidPtr, sparecol);
2752 }
2753 }
2754 }
2755
2756
2757 void
2758 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2759 {
2760 RF_ComponentLabel_t *clabel;
2761 int sparecol;
2762 int c;
2763 int j;
2764 int scol;
2765
2766 scol = -1;
2767
2768 /* XXX should do extra checks to make sure things really are clean,
2769 rather than blindly setting the clean bit... */
2770
2771 raidPtr->mod_counter++;
2772
2773 for (c = 0; c < raidPtr->numCol; c++) {
2774 if (raidPtr->Disks[c].status == rf_ds_optimal) {
2775 clabel = raidget_component_label(raidPtr, c);
2776 /* make sure status is noted */
2777 clabel->status = rf_ds_optimal;
2778
2779 /* note what unit we are configured as */
2780 clabel->last_unit = raidPtr->raidid;
2781
2782 raidflush_component_label(raidPtr, c);
2783 if (final == RF_FINAL_COMPONENT_UPDATE) {
2784 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2785 raidmarkclean(raidPtr, c);
2786 }
2787 }
2788 }
2789 /* else we don't touch it.. */
2790 }
2791
2792 for( c = 0; c < raidPtr->numSpare ; c++) {
2793 sparecol = raidPtr->numCol + c;
2794 /* Need to ensure that the reconstruct actually completed! */
2795 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2796 /*
2797
2798 we claim this disk is "optimal" if it's
2799 rf_ds_used_spare, as that means it should be
2800 directly substitutable for the disk it replaced.
2801 We note that too...
2802
2803 */
2804
2805 for(j=0;j<raidPtr->numCol;j++) {
2806 if (raidPtr->Disks[j].spareCol == sparecol) {
2807 scol = j;
2808 break;
2809 }
2810 }
2811
2812 /* XXX shouldn't *really* need this... */
2813 clabel = raidget_component_label(raidPtr, sparecol);
2814 /* make sure status is noted */
2815
2816 raid_init_component_label(raidPtr, clabel);
2817
2818 clabel->column = scol;
2819 clabel->status = rf_ds_optimal;
2820 clabel->last_unit = raidPtr->raidid;
2821
2822 raidflush_component_label(raidPtr, sparecol);
2823 if (final == RF_FINAL_COMPONENT_UPDATE) {
2824 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2825 raidmarkclean(raidPtr, sparecol);
2826 }
2827 }
2828 }
2829 }
2830 }
2831
2832 void
2833 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2834 {
2835
2836 if (vp != NULL) {
2837 if (auto_configured == 1) {
2838 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2839 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2840 vput(vp);
2841
2842 } else {
2843 (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2844 }
2845 }
2846 }
2847
2848
2849 void
2850 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2851 {
2852 int r,c;
2853 struct vnode *vp;
2854 int acd;
2855
2856
2857 /* We take this opportunity to close the vnodes like we should.. */
2858
2859 for (c = 0; c < raidPtr->numCol; c++) {
2860 vp = raidPtr->raid_cinfo[c].ci_vp;
2861 acd = raidPtr->Disks[c].auto_configured;
2862 rf_close_component(raidPtr, vp, acd);
2863 raidPtr->raid_cinfo[c].ci_vp = NULL;
2864 raidPtr->Disks[c].auto_configured = 0;
2865 }
2866
2867 for (r = 0; r < raidPtr->numSpare; r++) {
2868 vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2869 acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2870 rf_close_component(raidPtr, vp, acd);
2871 raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2872 raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2873 }
2874 }
2875
2876
2877 void
2878 rf_ReconThread(struct rf_recon_req *req)
2879 {
2880 int s;
2881 RF_Raid_t *raidPtr;
2882
2883 s = splbio();
2884 raidPtr = (RF_Raid_t *) req->raidPtr;
2885 raidPtr->recon_in_progress = 1;
2886
2887 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2888 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2889
2890 RF_Free(req, sizeof(*req));
2891
2892 raidPtr->recon_in_progress = 0;
2893 splx(s);
2894
2895 /* That's all... */
2896 kthread_exit(0); /* does not return */
2897 }
2898
2899 void
2900 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2901 {
2902 int retcode;
2903 int s;
2904
2905 raidPtr->parity_rewrite_stripes_done = 0;
2906 raidPtr->parity_rewrite_in_progress = 1;
2907 s = splbio();
2908 retcode = rf_RewriteParity(raidPtr);
2909 splx(s);
2910 if (retcode) {
2911 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2912 } else {
2913 /* set the clean bit! If we shutdown correctly,
2914 the clean bit on each component label will get
2915 set */
2916 raidPtr->parity_good = RF_RAID_CLEAN;
2917 }
2918 raidPtr->parity_rewrite_in_progress = 0;
2919
2920 /* Anyone waiting for us to stop? If so, inform them... */
2921 if (raidPtr->waitShutdown) {
2922 wakeup(&raidPtr->parity_rewrite_in_progress);
2923 }
2924
2925 /* That's all... */
2926 kthread_exit(0); /* does not return */
2927 }
2928
2929
2930 void
2931 rf_CopybackThread(RF_Raid_t *raidPtr)
2932 {
2933 int s;
2934
2935 raidPtr->copyback_in_progress = 1;
2936 s = splbio();
2937 rf_CopybackReconstructedData(raidPtr);
2938 splx(s);
2939 raidPtr->copyback_in_progress = 0;
2940
2941 /* That's all... */
2942 kthread_exit(0); /* does not return */
2943 }
2944
2945
2946 void
2947 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2948 {
2949 int s;
2950 RF_Raid_t *raidPtr;
2951
2952 s = splbio();
2953 raidPtr = req->raidPtr;
2954 raidPtr->recon_in_progress = 1;
2955 rf_ReconstructInPlace(raidPtr, req->col);
2956 RF_Free(req, sizeof(*req));
2957 raidPtr->recon_in_progress = 0;
2958 splx(s);
2959
2960 /* That's all... */
2961 kthread_exit(0); /* does not return */
2962 }
2963
2964 static RF_AutoConfig_t *
2965 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2966 const char *cname, RF_SectorCount_t size, uint64_t numsecs,
2967 unsigned secsize)
2968 {
2969 int good_one = 0;
2970 RF_ComponentLabel_t *clabel;
2971 RF_AutoConfig_t *ac;
2972
2973 clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2974 if (clabel == NULL) {
2975 oomem:
2976 while(ac_list) {
2977 ac = ac_list;
2978 if (ac->clabel)
2979 free(ac->clabel, M_RAIDFRAME);
2980 ac_list = ac_list->next;
2981 free(ac, M_RAIDFRAME);
2982 }
2983 printf("RAID auto config: out of memory!\n");
2984 return NULL; /* XXX probably should panic? */
2985 }
2986
2987 if (!raidread_component_label(secsize, dev, vp, clabel)) {
2988 /* Got the label. Does it look reasonable? */
2989 if (rf_reasonable_label(clabel) &&
2990 (clabel->partitionSize <= size)) {
2991 #ifdef DEBUG
2992 printf("Component on: %s: %llu\n",
2993 cname, (unsigned long long)size);
2994 rf_print_component_label(clabel);
2995 #endif
2996 /* if it's reasonable, add it, else ignore it. */
2997 ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2998 M_NOWAIT);
2999 if (ac == NULL) {
3000 free(clabel, M_RAIDFRAME);
3001 goto oomem;
3002 }
3003 strlcpy(ac->devname, cname, sizeof(ac->devname));
3004 ac->dev = dev;
3005 ac->vp = vp;
3006 ac->clabel = clabel;
3007 ac->next = ac_list;
3008 ac_list = ac;
3009 good_one = 1;
3010 }
3011 }
3012 if (!good_one) {
3013 /* cleanup */
3014 free(clabel, M_RAIDFRAME);
3015 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3016 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3017 vput(vp);
3018 }
3019 return ac_list;
3020 }
3021
3022 RF_AutoConfig_t *
3023 rf_find_raid_components(void)
3024 {
3025 struct vnode *vp;
3026 struct disklabel label;
3027 device_t dv;
3028 deviter_t di;
3029 dev_t dev;
3030 int bmajor, bminor, wedge;
3031 int error;
3032 int i;
3033 RF_AutoConfig_t *ac_list;
3034 uint64_t numsecs;
3035 unsigned secsize;
3036
3037 RF_ASSERT(raidPtr->bytesPerSector < rf_component_info_offset());
3038
3039 /* initialize the AutoConfig list */
3040 ac_list = NULL;
3041
3042 /* we begin by trolling through *all* the devices on the system */
3043
3044 for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
3045 dv = deviter_next(&di)) {
3046
3047 /* we are only interested in disks... */
3048 if (device_class(dv) != DV_DISK)
3049 continue;
3050
3051 /* we don't care about floppies... */
3052 if (device_is_a(dv, "fd")) {
3053 continue;
3054 }
3055
3056 /* we don't care about CD's... */
3057 if (device_is_a(dv, "cd")) {
3058 continue;
3059 }
3060
3061 /* we don't care about md's... */
3062 if (device_is_a(dv, "md")) {
3063 continue;
3064 }
3065
3066 /* hdfd is the Atari/Hades floppy driver */
3067 if (device_is_a(dv, "hdfd")) {
3068 continue;
3069 }
3070
3071 /* fdisa is the Atari/Milan floppy driver */
3072 if (device_is_a(dv, "fdisa")) {
3073 continue;
3074 }
3075
3076 /* need to find the device_name_to_block_device_major stuff */
3077 bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
3078
3079 /* get a vnode for the raw partition of this disk */
3080
3081 wedge = device_is_a(dv, "dk");
3082 bminor = minor(device_unit(dv));
3083 dev = wedge ? makedev(bmajor, bminor) :
3084 MAKEDISKDEV(bmajor, bminor, RAW_PART);
3085 if (bdevvp(dev, &vp))
3086 panic("RAID can't alloc vnode");
3087
3088 error = VOP_OPEN(vp, FREAD, NOCRED);
3089
3090 if (error) {
3091 /* "Who cares." Continue looking
3092 for something that exists*/
3093 vput(vp);
3094 continue;
3095 }
3096
3097 error = getdisksize(vp, &numsecs, &secsize);
3098 if (error) {
3099 vput(vp);
3100 continue;
3101 }
3102 if (wedge) {
3103 struct dkwedge_info dkw;
3104 error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
3105 NOCRED);
3106 if (error) {
3107 printf("RAIDframe: can't get wedge info for "
3108 "dev %s (%d)\n", device_xname(dv), error);
3109 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3110 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3111 vput(vp);
3112 continue;
3113 }
3114
3115 if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
3116 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3117 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3118 vput(vp);
3119 continue;
3120 }
3121
3122 ac_list = rf_get_component(ac_list, dev, vp,
3123 device_xname(dv), dkw.dkw_size, numsecs, secsize);
3124 continue;
3125 }
3126
3127 /* Ok, the disk exists. Go get the disklabel. */
3128 error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
3129 if (error) {
3130 /*
3131 * XXX can't happen - open() would
3132 * have errored out (or faked up one)
3133 */
3134 if (error != ENOTTY)
3135 printf("RAIDframe: can't get label for dev "
3136 "%s (%d)\n", device_xname(dv), error);
3137 }
3138
3139 /* don't need this any more. We'll allocate it again
3140 a little later if we really do... */
3141 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3142 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3143 vput(vp);
3144
3145 if (error)
3146 continue;
3147
3148 for (i = 0; i < label.d_npartitions; i++) {
3149 char cname[sizeof(ac_list->devname)];
3150
3151 /* We only support partitions marked as RAID */
3152 if (label.d_partitions[i].p_fstype != FS_RAID)
3153 continue;
3154
3155 dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
3156 if (bdevvp(dev, &vp))
3157 panic("RAID can't alloc vnode");
3158
3159 error = VOP_OPEN(vp, FREAD, NOCRED);
3160 if (error) {
3161 /* Whatever... */
3162 vput(vp);
3163 continue;
3164 }
3165 snprintf(cname, sizeof(cname), "%s%c",
3166 device_xname(dv), 'a' + i);
3167 ac_list = rf_get_component(ac_list, dev, vp, cname,
3168 label.d_partitions[i].p_size, numsecs, secsize);
3169 }
3170 }
3171 deviter_release(&di);
3172 return ac_list;
3173 }
3174
3175
3176 static int
3177 rf_reasonable_label(RF_ComponentLabel_t *clabel)
3178 {
3179
3180 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3181 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3182 ((clabel->clean == RF_RAID_CLEAN) ||
3183 (clabel->clean == RF_RAID_DIRTY)) &&
3184 clabel->row >=0 &&
3185 clabel->column >= 0 &&
3186 clabel->num_rows > 0 &&
3187 clabel->num_columns > 0 &&
3188 clabel->row < clabel->num_rows &&
3189 clabel->column < clabel->num_columns &&
3190 clabel->blockSize > 0 &&
3191 clabel->numBlocks > 0) {
3192 /* label looks reasonable enough... */
3193 return(1);
3194 }
3195 return(0);
3196 }
3197
3198
3199 #ifdef DEBUG
3200 void
3201 rf_print_component_label(RF_ComponentLabel_t *clabel)
3202 {
3203 uint64_t numBlocks = clabel->numBlocks;
3204
3205 numBlocks |= (uint64_t)clabel->numBlocksHi << 32;
3206
3207 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3208 clabel->row, clabel->column,
3209 clabel->num_rows, clabel->num_columns);
3210 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3211 clabel->version, clabel->serial_number,
3212 clabel->mod_counter);
3213 printf(" Clean: %s Status: %d\n",
3214 clabel->clean ? "Yes" : "No", clabel->status);
3215 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3216 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3217 printf(" RAID Level: %c blocksize: %d numBlocks: %"PRIu64"\n",
3218 (char) clabel->parityConfig, clabel->blockSize, numBlocks);
3219 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3220 printf(" Contains root partition: %s\n",
3221 clabel->root_partition ? "Yes" : "No");
3222 printf(" Last configured as: raid%d\n", clabel->last_unit);
3223 #if 0
3224 printf(" Config order: %d\n", clabel->config_order);
3225 #endif
3226
3227 }
3228 #endif
3229
3230 RF_ConfigSet_t *
3231 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3232 {
3233 RF_AutoConfig_t *ac;
3234 RF_ConfigSet_t *config_sets;
3235 RF_ConfigSet_t *cset;
3236 RF_AutoConfig_t *ac_next;
3237
3238
3239 config_sets = NULL;
3240
3241 /* Go through the AutoConfig list, and figure out which components
3242 belong to what sets. */
3243 ac = ac_list;
3244 while(ac!=NULL) {
3245 /* we're going to putz with ac->next, so save it here
3246 for use at the end of the loop */
3247 ac_next = ac->next;
3248
3249 if (config_sets == NULL) {
3250 /* will need at least this one... */
3251 config_sets = (RF_ConfigSet_t *)
3252 malloc(sizeof(RF_ConfigSet_t),
3253 M_RAIDFRAME, M_NOWAIT);
3254 if (config_sets == NULL) {
3255 panic("rf_create_auto_sets: No memory!");
3256 }
3257 /* this one is easy :) */
3258 config_sets->ac = ac;
3259 config_sets->next = NULL;
3260 config_sets->rootable = 0;
3261 ac->next = NULL;
3262 } else {
3263 /* which set does this component fit into? */
3264 cset = config_sets;
3265 while(cset!=NULL) {
3266 if (rf_does_it_fit(cset, ac)) {
3267 /* looks like it matches... */
3268 ac->next = cset->ac;
3269 cset->ac = ac;
3270 break;
3271 }
3272 cset = cset->next;
3273 }
3274 if (cset==NULL) {
3275 /* didn't find a match above... new set..*/
3276 cset = (RF_ConfigSet_t *)
3277 malloc(sizeof(RF_ConfigSet_t),
3278 M_RAIDFRAME, M_NOWAIT);
3279 if (cset == NULL) {
3280 panic("rf_create_auto_sets: No memory!");
3281 }
3282 cset->ac = ac;
3283 ac->next = NULL;
3284 cset->next = config_sets;
3285 cset->rootable = 0;
3286 config_sets = cset;
3287 }
3288 }
3289 ac = ac_next;
3290 }
3291
3292
3293 return(config_sets);
3294 }
3295
3296 static int
3297 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3298 {
3299 RF_ComponentLabel_t *clabel1, *clabel2;
3300
3301 /* If this one matches the *first* one in the set, that's good
3302 enough, since the other members of the set would have been
3303 through here too... */
3304 /* note that we are not checking partitionSize here..
3305
3306 Note that we are also not checking the mod_counters here.
3307 If everything else matches execpt the mod_counter, that's
3308 good enough for this test. We will deal with the mod_counters
3309 a little later in the autoconfiguration process.
3310
3311 (clabel1->mod_counter == clabel2->mod_counter) &&
3312
3313 The reason we don't check for this is that failed disks
3314 will have lower modification counts. If those disks are
3315 not added to the set they used to belong to, then they will
3316 form their own set, which may result in 2 different sets,
3317 for example, competing to be configured at raid0, and
3318 perhaps competing to be the root filesystem set. If the
3319 wrong ones get configured, or both attempt to become /,
3320 weird behaviour and or serious lossage will occur. Thus we
3321 need to bring them into the fold here, and kick them out at
3322 a later point.
3323
3324 */
3325
3326 clabel1 = cset->ac->clabel;
3327 clabel2 = ac->clabel;
3328 if ((clabel1->version == clabel2->version) &&
3329 (clabel1->serial_number == clabel2->serial_number) &&
3330 (clabel1->num_rows == clabel2->num_rows) &&
3331 (clabel1->num_columns == clabel2->num_columns) &&
3332 (clabel1->sectPerSU == clabel2->sectPerSU) &&
3333 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3334 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3335 (clabel1->parityConfig == clabel2->parityConfig) &&
3336 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3337 (clabel1->blockSize == clabel2->blockSize) &&
3338 (clabel1->numBlocks == clabel2->numBlocks) &&
3339 (clabel1->numBlocksHi == clabel2->numBlocksHi) &&
3340 (clabel1->autoconfigure == clabel2->autoconfigure) &&
3341 (clabel1->root_partition == clabel2->root_partition) &&
3342 (clabel1->last_unit == clabel2->last_unit) &&
3343 (clabel1->config_order == clabel2->config_order)) {
3344 /* if it get's here, it almost *has* to be a match */
3345 } else {
3346 /* it's not consistent with somebody in the set..
3347 punt */
3348 return(0);
3349 }
3350 /* all was fine.. it must fit... */
3351 return(1);
3352 }
3353
3354 int
3355 rf_have_enough_components(RF_ConfigSet_t *cset)
3356 {
3357 RF_AutoConfig_t *ac;
3358 RF_AutoConfig_t *auto_config;
3359 RF_ComponentLabel_t *clabel;
3360 int c;
3361 int num_cols;
3362 int num_missing;
3363 int mod_counter;
3364 int mod_counter_found;
3365 int even_pair_failed;
3366 char parity_type;
3367
3368
3369 /* check to see that we have enough 'live' components
3370 of this set. If so, we can configure it if necessary */
3371
3372 num_cols = cset->ac->clabel->num_columns;
3373 parity_type = cset->ac->clabel->parityConfig;
3374
3375 /* XXX Check for duplicate components!?!?!? */
3376
3377 /* Determine what the mod_counter is supposed to be for this set. */
3378
3379 mod_counter_found = 0;
3380 mod_counter = 0;
3381 ac = cset->ac;
3382 while(ac!=NULL) {
3383 if (mod_counter_found==0) {
3384 mod_counter = ac->clabel->mod_counter;
3385 mod_counter_found = 1;
3386 } else {
3387 if (ac->clabel->mod_counter > mod_counter) {
3388 mod_counter = ac->clabel->mod_counter;
3389 }
3390 }
3391 ac = ac->next;
3392 }
3393
3394 num_missing = 0;
3395 auto_config = cset->ac;
3396
3397 even_pair_failed = 0;
3398 for(c=0; c<num_cols; c++) {
3399 ac = auto_config;
3400 while(ac!=NULL) {
3401 if ((ac->clabel->column == c) &&
3402 (ac->clabel->mod_counter == mod_counter)) {
3403 /* it's this one... */
3404 #ifdef DEBUG
3405 printf("Found: %s at %d\n",
3406 ac->devname,c);
3407 #endif
3408 break;
3409 }
3410 ac=ac->next;
3411 }
3412 if (ac==NULL) {
3413 /* Didn't find one here! */
3414 /* special case for RAID 1, especially
3415 where there are more than 2
3416 components (where RAIDframe treats
3417 things a little differently :( ) */
3418 if (parity_type == '1') {
3419 if (c%2 == 0) { /* even component */
3420 even_pair_failed = 1;
3421 } else { /* odd component. If
3422 we're failed, and
3423 so is the even
3424 component, it's
3425 "Good Night, Charlie" */
3426 if (even_pair_failed == 1) {
3427 return(0);
3428 }
3429 }
3430 } else {
3431 /* normal accounting */
3432 num_missing++;
3433 }
3434 }
3435 if ((parity_type == '1') && (c%2 == 1)) {
3436 /* Just did an even component, and we didn't
3437 bail.. reset the even_pair_failed flag,
3438 and go on to the next component.... */
3439 even_pair_failed = 0;
3440 }
3441 }
3442
3443 clabel = cset->ac->clabel;
3444
3445 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3446 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3447 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3448 /* XXX this needs to be made *much* more general */
3449 /* Too many failures */
3450 return(0);
3451 }
3452 /* otherwise, all is well, and we've got enough to take a kick
3453 at autoconfiguring this set */
3454 return(1);
3455 }
3456
3457 void
3458 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3459 RF_Raid_t *raidPtr)
3460 {
3461 RF_ComponentLabel_t *clabel;
3462 int i;
3463
3464 clabel = ac->clabel;
3465
3466 /* 1. Fill in the common stuff */
3467 config->numRow = clabel->num_rows = 1;
3468 config->numCol = clabel->num_columns;
3469 config->numSpare = 0; /* XXX should this be set here? */
3470 config->sectPerSU = clabel->sectPerSU;
3471 config->SUsPerPU = clabel->SUsPerPU;
3472 config->SUsPerRU = clabel->SUsPerRU;
3473 config->parityConfig = clabel->parityConfig;
3474 /* XXX... */
3475 strcpy(config->diskQueueType,"fifo");
3476 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3477 config->layoutSpecificSize = 0; /* XXX ?? */
3478
3479 while(ac!=NULL) {
3480 /* row/col values will be in range due to the checks
3481 in reasonable_label() */
3482 strcpy(config->devnames[0][ac->clabel->column],
3483 ac->devname);
3484 ac = ac->next;
3485 }
3486
3487 for(i=0;i<RF_MAXDBGV;i++) {
3488 config->debugVars[i][0] = 0;
3489 }
3490 }
3491
3492 int
3493 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3494 {
3495 RF_ComponentLabel_t *clabel;
3496 int column;
3497 int sparecol;
3498
3499 raidPtr->autoconfigure = new_value;
3500
3501 for(column=0; column<raidPtr->numCol; column++) {
3502 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3503 clabel = raidget_component_label(raidPtr, column);
3504 clabel->autoconfigure = new_value;
3505 raidflush_component_label(raidPtr, column);
3506 }
3507 }
3508 for(column = 0; column < raidPtr->numSpare ; column++) {
3509 sparecol = raidPtr->numCol + column;
3510 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3511 clabel = raidget_component_label(raidPtr, sparecol);
3512 clabel->autoconfigure = new_value;
3513 raidflush_component_label(raidPtr, sparecol);
3514 }
3515 }
3516 return(new_value);
3517 }
3518
3519 int
3520 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3521 {
3522 RF_ComponentLabel_t *clabel;
3523 int column;
3524 int sparecol;
3525
3526 raidPtr->root_partition = new_value;
3527 for(column=0; column<raidPtr->numCol; column++) {
3528 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3529 clabel = raidget_component_label(raidPtr, column);
3530 clabel->root_partition = new_value;
3531 raidflush_component_label(raidPtr, column);
3532 }
3533 }
3534 for(column = 0; column < raidPtr->numSpare ; column++) {
3535 sparecol = raidPtr->numCol + column;
3536 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3537 clabel = raidget_component_label(raidPtr, sparecol);
3538 clabel->root_partition = new_value;
3539 raidflush_component_label(raidPtr, sparecol);
3540 }
3541 }
3542 return(new_value);
3543 }
3544
3545 void
3546 rf_release_all_vps(RF_ConfigSet_t *cset)
3547 {
3548 RF_AutoConfig_t *ac;
3549
3550 ac = cset->ac;
3551 while(ac!=NULL) {
3552 /* Close the vp, and give it back */
3553 if (ac->vp) {
3554 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3555 VOP_CLOSE(ac->vp, FREAD, NOCRED);
3556 vput(ac->vp);
3557 ac->vp = NULL;
3558 }
3559 ac = ac->next;
3560 }
3561 }
3562
3563
3564 void
3565 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3566 {
3567 RF_AutoConfig_t *ac;
3568 RF_AutoConfig_t *next_ac;
3569
3570 ac = cset->ac;
3571 while(ac!=NULL) {
3572 next_ac = ac->next;
3573 /* nuke the label */
3574 free(ac->clabel, M_RAIDFRAME);
3575 /* cleanup the config structure */
3576 free(ac, M_RAIDFRAME);
3577 /* "next.." */
3578 ac = next_ac;
3579 }
3580 /* and, finally, nuke the config set */
3581 free(cset, M_RAIDFRAME);
3582 }
3583
3584
3585 void
3586 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3587 {
3588 /* current version number */
3589 clabel->version = RF_COMPONENT_LABEL_VERSION;
3590 clabel->serial_number = raidPtr->serial_number;
3591 clabel->mod_counter = raidPtr->mod_counter;
3592
3593 clabel->num_rows = 1;
3594 clabel->num_columns = raidPtr->numCol;
3595 clabel->clean = RF_RAID_DIRTY; /* not clean */
3596 clabel->status = rf_ds_optimal; /* "It's good!" */
3597
3598 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3599 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3600 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3601
3602 clabel->blockSize = raidPtr->bytesPerSector;
3603 clabel->numBlocks = raidPtr->sectorsPerDisk;
3604 clabel->numBlocksHi = raidPtr->sectorsPerDisk >> 32;
3605
3606 /* XXX not portable */
3607 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3608 clabel->maxOutstanding = raidPtr->maxOutstanding;
3609 clabel->autoconfigure = raidPtr->autoconfigure;
3610 clabel->root_partition = raidPtr->root_partition;
3611 clabel->last_unit = raidPtr->raidid;
3612 clabel->config_order = raidPtr->config_order;
3613
3614 #ifndef RF_NO_PARITY_MAP
3615 rf_paritymap_init_label(raidPtr->parity_map, clabel);
3616 #endif
3617 }
3618
3619 int
3620 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3621 {
3622 RF_Raid_t *raidPtr;
3623 RF_Config_t *config;
3624 int raidID;
3625 int retcode;
3626
3627 #ifdef DEBUG
3628 printf("RAID autoconfigure\n");
3629 #endif
3630
3631 retcode = 0;
3632 *unit = -1;
3633
3634 /* 1. Create a config structure */
3635
3636 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3637 M_RAIDFRAME,
3638 M_NOWAIT);
3639 if (config==NULL) {
3640 printf("Out of mem!?!?\n");
3641 /* XXX do something more intelligent here. */
3642 return(1);
3643 }
3644
3645 memset(config, 0, sizeof(RF_Config_t));
3646
3647 /*
3648 2. Figure out what RAID ID this one is supposed to live at
3649 See if we can get the same RAID dev that it was configured
3650 on last time..
3651 */
3652
3653 raidID = cset->ac->clabel->last_unit;
3654 if ((raidID < 0) || (raidID >= numraid)) {
3655 /* let's not wander off into lala land. */
3656 raidID = numraid - 1;
3657 }
3658 if (raidPtrs[raidID]->valid != 0) {
3659
3660 /*
3661 Nope... Go looking for an alternative...
3662 Start high so we don't immediately use raid0 if that's
3663 not taken.
3664 */
3665
3666 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3667 if (raidPtrs[raidID]->valid == 0) {
3668 /* can use this one! */
3669 break;
3670 }
3671 }
3672 }
3673
3674 if (raidID < 0) {
3675 /* punt... */
3676 printf("Unable to auto configure this set!\n");
3677 printf("(Out of RAID devs!)\n");
3678 free(config, M_RAIDFRAME);
3679 return(1);
3680 }
3681
3682 #ifdef DEBUG
3683 printf("Configuring raid%d:\n",raidID);
3684 #endif
3685
3686 raidPtr = raidPtrs[raidID];
3687
3688 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3689 raidPtr->raidid = raidID;
3690 raidPtr->openings = RAIDOUTSTANDING;
3691
3692 /* 3. Build the configuration structure */
3693 rf_create_configuration(cset->ac, config, raidPtr);
3694
3695 /* 4. Do the configuration */
3696 retcode = rf_Configure(raidPtr, config, cset->ac);
3697
3698 if (retcode == 0) {
3699
3700 raidinit(raidPtrs[raidID]);
3701
3702 rf_markalldirty(raidPtrs[raidID]);
3703 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3704 if (cset->ac->clabel->root_partition==1) {
3705 /* everything configured just fine. Make a note
3706 that this set is eligible to be root. */
3707 cset->rootable = 1;
3708 /* XXX do this here? */
3709 raidPtrs[raidID]->root_partition = 1;
3710 }
3711 }
3712
3713 /* 5. Cleanup */
3714 free(config, M_RAIDFRAME);
3715
3716 *unit = raidID;
3717 return(retcode);
3718 }
3719
3720 void
3721 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3722 {
3723 struct buf *bp;
3724
3725 bp = (struct buf *)desc->bp;
3726 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3727 (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3728 }
3729
3730 void
3731 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3732 size_t xmin, size_t xmax)
3733 {
3734 pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3735 pool_sethiwat(p, xmax);
3736 pool_prime(p, xmin);
3737 pool_setlowat(p, xmin);
3738 }
3739
3740 /*
3741 * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3742 * if there is IO pending and if that IO could possibly be done for a
3743 * given RAID set. Returns 0 if IO is waiting and can be done, 1
3744 * otherwise.
3745 *
3746 */
3747
3748 int
3749 rf_buf_queue_check(int raidid)
3750 {
3751 if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
3752 raidPtrs[raidid]->openings > 0) {
3753 /* there is work to do */
3754 return 0;
3755 }
3756 /* default is nothing to do */
3757 return 1;
3758 }
3759
3760 int
3761 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
3762 {
3763 uint64_t numsecs;
3764 unsigned secsize;
3765 int error;
3766
3767 error = getdisksize(vp, &numsecs, &secsize);
3768 if (error == 0) {
3769 diskPtr->blockSize = secsize;
3770 diskPtr->numBlocks = numsecs - rf_protectedSectors;
3771 diskPtr->partitionSize = numsecs;
3772 return 0;
3773 }
3774 return error;
3775 }
3776
3777 static int
3778 raid_match(device_t self, cfdata_t cfdata, void *aux)
3779 {
3780 return 1;
3781 }
3782
3783 static void
3784 raid_attach(device_t parent, device_t self, void *aux)
3785 {
3786
3787 }
3788
3789
3790 static int
3791 raid_detach(device_t self, int flags)
3792 {
3793 int error;
3794 struct raid_softc *rs = &raid_softc[device_unit(self)];
3795
3796 if ((error = raidlock(rs)) != 0)
3797 return (error);
3798
3799 error = raid_detach_unlocked(rs);
3800
3801 raidunlock(rs);
3802
3803 return error;
3804 }
3805
3806 static void
3807 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
3808 {
3809 prop_dictionary_t disk_info, odisk_info, geom;
3810 disk_info = prop_dictionary_create();
3811 geom = prop_dictionary_create();
3812 prop_dictionary_set_uint64(geom, "sectors-per-unit",
3813 raidPtr->totalSectors);
3814 prop_dictionary_set_uint32(geom, "sector-size",
3815 raidPtr->bytesPerSector);
3816
3817 prop_dictionary_set_uint16(geom, "sectors-per-track",
3818 raidPtr->Layout.dataSectorsPerStripe);
3819 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
3820 4 * raidPtr->numCol);
3821
3822 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
3823 raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
3824 (4 * raidPtr->numCol)));
3825
3826 prop_dictionary_set(disk_info, "geometry", geom);
3827 prop_object_release(geom);
3828 prop_dictionary_set(device_properties(rs->sc_dev),
3829 "disk-info", disk_info);
3830 odisk_info = rs->sc_dkdev.dk_info;
3831 rs->sc_dkdev.dk_info = disk_info;
3832 if (odisk_info)
3833 prop_object_release(odisk_info);
3834 }
3835
3836 /*
3837 * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3838 * We end up returning whatever error was returned by the first cache flush
3839 * that fails.
3840 */
3841
3842 int
3843 rf_sync_component_caches(RF_Raid_t *raidPtr)
3844 {
3845 int c, sparecol;
3846 int e,error;
3847 int force = 1;
3848
3849 error = 0;
3850 for (c = 0; c < raidPtr->numCol; c++) {
3851 if (raidPtr->Disks[c].status == rf_ds_optimal) {
3852 e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3853 &force, FWRITE, NOCRED);
3854 if (e) {
3855 if (e != ENODEV)
3856 printf("raid%d: cache flush to component %s failed.\n",
3857 raidPtr->raidid, raidPtr->Disks[c].devname);
3858 if (error == 0) {
3859 error = e;
3860 }
3861 }
3862 }
3863 }
3864
3865 for( c = 0; c < raidPtr->numSpare ; c++) {
3866 sparecol = raidPtr->numCol + c;
3867 /* Need to ensure that the reconstruct actually completed! */
3868 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3869 e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3870 DIOCCACHESYNC, &force, FWRITE, NOCRED);
3871 if (e) {
3872 if (e != ENODEV)
3873 printf("raid%d: cache flush to component %s failed.\n",
3874 raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3875 if (error == 0) {
3876 error = e;
3877 }
3878 }
3879 }
3880 }
3881 return error;
3882 }
3883