rf_netbsdkintf.c revision 1.410.4.3 1 1.410.4.3 martin /* $NetBSD: rf_netbsdkintf.c,v 1.410.4.3 2023/10/18 11:44:22 martin Exp $ */
2 1.281 rmind
3 1.1 oster /*-
4 1.295 erh * Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
5 1.1 oster * All rights reserved.
6 1.1 oster *
7 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
8 1.1 oster * by Greg Oster; Jason R. Thorpe.
9 1.1 oster *
10 1.1 oster * Redistribution and use in source and binary forms, with or without
11 1.1 oster * modification, are permitted provided that the following conditions
12 1.1 oster * are met:
13 1.1 oster * 1. Redistributions of source code must retain the above copyright
14 1.1 oster * notice, this list of conditions and the following disclaimer.
15 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 oster * notice, this list of conditions and the following disclaimer in the
17 1.1 oster * documentation and/or other materials provided with the distribution.
18 1.1 oster *
19 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
30 1.1 oster */
31 1.1 oster
32 1.1 oster /*
33 1.281 rmind * Copyright (c) 1988 University of Utah.
34 1.1 oster * Copyright (c) 1990, 1993
35 1.1 oster * The Regents of the University of California. All rights reserved.
36 1.1 oster *
37 1.1 oster * This code is derived from software contributed to Berkeley by
38 1.1 oster * the Systems Programming Group of the University of Utah Computer
39 1.1 oster * Science Department.
40 1.1 oster *
41 1.1 oster * Redistribution and use in source and binary forms, with or without
42 1.1 oster * modification, are permitted provided that the following conditions
43 1.1 oster * are met:
44 1.1 oster * 1. Redistributions of source code must retain the above copyright
45 1.1 oster * notice, this list of conditions and the following disclaimer.
46 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
47 1.1 oster * notice, this list of conditions and the following disclaimer in the
48 1.1 oster * documentation and/or other materials provided with the distribution.
49 1.162 agc * 3. Neither the name of the University nor the names of its contributors
50 1.162 agc * may be used to endorse or promote products derived from this software
51 1.162 agc * without specific prior written permission.
52 1.162 agc *
53 1.162 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.162 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.162 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.162 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.162 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.162 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.162 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.162 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.162 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.162 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.162 agc * SUCH DAMAGE.
64 1.162 agc *
65 1.405 wiz * from: Utah $Hdr: cd.c 1.6 90/11/28$
66 1.162 agc *
67 1.162 agc * @(#)cd.c 8.2 (Berkeley) 11/16/93
68 1.162 agc */
69 1.162 agc
70 1.162 agc /*
71 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
72 1.1 oster * All rights reserved.
73 1.1 oster *
74 1.1 oster * Authors: Mark Holland, Jim Zelenka
75 1.1 oster *
76 1.1 oster * Permission to use, copy, modify and distribute this software and
77 1.1 oster * its documentation is hereby granted, provided that both the copyright
78 1.1 oster * notice and this permission notice appear in all copies of the
79 1.1 oster * software, derivative works or modified versions, and any portions
80 1.1 oster * thereof, and that both notices appear in supporting documentation.
81 1.1 oster *
82 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
83 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
84 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
85 1.1 oster *
86 1.1 oster * Carnegie Mellon requests users of this software to return to
87 1.1 oster *
88 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
89 1.1 oster * School of Computer Science
90 1.1 oster * Carnegie Mellon University
91 1.1 oster * Pittsburgh PA 15213-3890
92 1.1 oster *
93 1.1 oster * any improvements or extensions that they make and grant Carnegie the
94 1.1 oster * rights to redistribute these changes.
95 1.1 oster */
96 1.1 oster
97 1.1 oster /***********************************************************
98 1.1 oster *
99 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
100 1.1 oster *
101 1.1 oster ***********************************************************/
102 1.112 lukem
103 1.112 lukem #include <sys/cdefs.h>
104 1.410.4.3 martin __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.410.4.3 2023/10/18 11:44:22 martin Exp $");
105 1.251 ad
106 1.251 ad #ifdef _KERNEL_OPT
107 1.251 ad #include "opt_raid_autoconfig.h"
108 1.363 mrg #include "opt_compat_netbsd32.h"
109 1.251 ad #endif
110 1.1 oster
111 1.113 lukem #include <sys/param.h>
112 1.1 oster #include <sys/errno.h>
113 1.1 oster #include <sys/pool.h>
114 1.152 thorpej #include <sys/proc.h>
115 1.1 oster #include <sys/queue.h>
116 1.1 oster #include <sys/disk.h>
117 1.1 oster #include <sys/device.h>
118 1.1 oster #include <sys/stat.h>
119 1.1 oster #include <sys/ioctl.h>
120 1.1 oster #include <sys/fcntl.h>
121 1.1 oster #include <sys/systm.h>
122 1.1 oster #include <sys/vnode.h>
123 1.1 oster #include <sys/disklabel.h>
124 1.1 oster #include <sys/conf.h>
125 1.1 oster #include <sys/buf.h>
126 1.182 yamt #include <sys/bufq.h>
127 1.65 oster #include <sys/reboot.h>
128 1.208 elad #include <sys/kauth.h>
129 1.327 pgoyette #include <sys/module.h>
130 1.358 pgoyette #include <sys/compat_stub.h>
131 1.8 oster
132 1.234 oster #include <prop/proplib.h>
133 1.234 oster
134 1.110 oster #include <dev/raidframe/raidframevar.h>
135 1.110 oster #include <dev/raidframe/raidframeio.h>
136 1.269 jld #include <dev/raidframe/rf_paritymap.h>
137 1.251 ad
138 1.1 oster #include "rf_raid.h"
139 1.44 oster #include "rf_copyback.h"
140 1.1 oster #include "rf_dag.h"
141 1.1 oster #include "rf_dagflags.h"
142 1.99 oster #include "rf_desc.h"
143 1.1 oster #include "rf_diskqueue.h"
144 1.1 oster #include "rf_etimer.h"
145 1.1 oster #include "rf_general.h"
146 1.1 oster #include "rf_kintf.h"
147 1.1 oster #include "rf_options.h"
148 1.1 oster #include "rf_driver.h"
149 1.1 oster #include "rf_parityscan.h"
150 1.1 oster #include "rf_threadstuff.h"
151 1.1 oster
152 1.325 christos #include "ioconf.h"
153 1.325 christos
154 1.133 oster #ifdef DEBUG
155 1.9 oster int rf_kdebug_level = 0;
156 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
157 1.9 oster #else /* DEBUG */
158 1.1 oster #define db1_printf(a) { }
159 1.9 oster #endif /* DEBUG */
160 1.1 oster
161 1.410.4.3 martin #define DEVICE_XNAME(dev) dev ? device_xname(dev) : "null"
162 1.410.4.3 martin
163 1.249 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
164 1.289 mrg static rf_declare_mutex2(rf_sparet_wait_mutex);
165 1.287 mrg static rf_declare_cond2(rf_sparet_wait_cv);
166 1.287 mrg static rf_declare_cond2(rf_sparet_resp_cv);
167 1.1 oster
168 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
169 1.10 oster * spare table */
170 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
171 1.10 oster * installation process */
172 1.249 oster #endif
173 1.153 thorpej
174 1.384 jdolecek const int rf_b_pass = (B_PHYS|B_RAW|B_MEDIA_FLAGS);
175 1.384 jdolecek
176 1.153 thorpej MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
177 1.10 oster
178 1.1 oster /* prototypes */
179 1.187 christos static void KernelWakeupFunc(struct buf *);
180 1.187 christos static void InitBP(struct buf *, struct vnode *, unsigned,
181 1.225 christos dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
182 1.384 jdolecek void *, int);
183 1.300 christos static void raidinit(struct raid_softc *);
184 1.335 mlelstv static int raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp);
185 1.348 jdolecek static int rf_get_component_caches(RF_Raid_t *raidPtr, int *);
186 1.1 oster
187 1.261 dyoung static int raid_match(device_t, cfdata_t, void *);
188 1.261 dyoung static void raid_attach(device_t, device_t, void *);
189 1.261 dyoung static int raid_detach(device_t, int);
190 1.130 gehenna
191 1.385 riastrad static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
192 1.269 jld daddr_t, daddr_t);
193 1.269 jld static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
194 1.269 jld daddr_t, daddr_t, int);
195 1.269 jld
196 1.276 mrg static int raidwrite_component_label(unsigned,
197 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
198 1.276 mrg static int raidread_component_label(unsigned,
199 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
200 1.269 jld
201 1.335 mlelstv static int raid_diskstart(device_t, struct buf *bp);
202 1.335 mlelstv static int raid_dumpblocks(device_t, void *, daddr_t, int);
203 1.335 mlelstv static int raid_lastclose(device_t);
204 1.269 jld
205 1.324 mrg static dev_type_open(raidopen);
206 1.324 mrg static dev_type_close(raidclose);
207 1.324 mrg static dev_type_read(raidread);
208 1.324 mrg static dev_type_write(raidwrite);
209 1.324 mrg static dev_type_ioctl(raidioctl);
210 1.324 mrg static dev_type_strategy(raidstrategy);
211 1.324 mrg static dev_type_dump(raiddump);
212 1.324 mrg static dev_type_size(raidsize);
213 1.130 gehenna
214 1.130 gehenna const struct bdevsw raid_bdevsw = {
215 1.305 dholland .d_open = raidopen,
216 1.305 dholland .d_close = raidclose,
217 1.305 dholland .d_strategy = raidstrategy,
218 1.305 dholland .d_ioctl = raidioctl,
219 1.305 dholland .d_dump = raiddump,
220 1.305 dholland .d_psize = raidsize,
221 1.311 dholland .d_discard = nodiscard,
222 1.305 dholland .d_flag = D_DISK
223 1.130 gehenna };
224 1.130 gehenna
225 1.130 gehenna const struct cdevsw raid_cdevsw = {
226 1.305 dholland .d_open = raidopen,
227 1.305 dholland .d_close = raidclose,
228 1.305 dholland .d_read = raidread,
229 1.305 dholland .d_write = raidwrite,
230 1.305 dholland .d_ioctl = raidioctl,
231 1.305 dholland .d_stop = nostop,
232 1.305 dholland .d_tty = notty,
233 1.305 dholland .d_poll = nopoll,
234 1.305 dholland .d_mmap = nommap,
235 1.305 dholland .d_kqfilter = nokqfilter,
236 1.312 dholland .d_discard = nodiscard,
237 1.305 dholland .d_flag = D_DISK
238 1.130 gehenna };
239 1.1 oster
240 1.323 mlelstv static struct dkdriver rf_dkdriver = {
241 1.335 mlelstv .d_open = raidopen,
242 1.335 mlelstv .d_close = raidclose,
243 1.323 mlelstv .d_strategy = raidstrategy,
244 1.335 mlelstv .d_diskstart = raid_diskstart,
245 1.335 mlelstv .d_dumpblocks = raid_dumpblocks,
246 1.335 mlelstv .d_lastclose = raid_lastclose,
247 1.323 mlelstv .d_minphys = minphys
248 1.323 mlelstv };
249 1.235 oster
250 1.1 oster #define raidunit(x) DISKUNIT(x)
251 1.335 mlelstv #define raidsoftc(dev) (((struct raid_softc *)device_private(dev))->sc_r.softc)
252 1.1 oster
253 1.202 oster extern struct cfdriver raid_cd;
254 1.266 dyoung CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
255 1.266 dyoung raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
256 1.266 dyoung DVF_DETACH_SHUTDOWN);
257 1.202 oster
258 1.353 mrg /* Internal representation of a rf_recon_req */
259 1.353 mrg struct rf_recon_req_internal {
260 1.353 mrg RF_RowCol_t col;
261 1.353 mrg RF_ReconReqFlags_t flags;
262 1.353 mrg void *raidPtr;
263 1.353 mrg };
264 1.353 mrg
265 1.186 perry /*
266 1.186 perry * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
267 1.186 perry * Be aware that large numbers can allow the driver to consume a lot of
268 1.28 oster * kernel memory, especially on writes, and in degraded mode reads.
269 1.186 perry *
270 1.186 perry * For example: with a stripe width of 64 blocks (32k) and 5 disks,
271 1.186 perry * a single 64K write will typically require 64K for the old data,
272 1.186 perry * 64K for the old parity, and 64K for the new parity, for a total
273 1.28 oster * of 192K (if the parity buffer is not re-used immediately).
274 1.110 oster * Even it if is used immediately, that's still 128K, which when multiplied
275 1.28 oster * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
276 1.186 perry *
277 1.28 oster * Now in degraded mode, for example, a 64K read on the above setup may
278 1.186 perry * require data reconstruction, which will require *all* of the 4 remaining
279 1.28 oster * disks to participate -- 4 * 32K/disk == 128K again.
280 1.20 oster */
281 1.20 oster
282 1.20 oster #ifndef RAIDOUTSTANDING
283 1.28 oster #define RAIDOUTSTANDING 6
284 1.20 oster #endif
285 1.20 oster
286 1.1 oster #define RAIDLABELDEV(dev) \
287 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
288 1.1 oster
289 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
290 1.9 oster
291 1.104 oster static int raidlock(struct raid_softc *);
292 1.104 oster static void raidunlock(struct raid_softc *);
293 1.1 oster
294 1.266 dyoung static int raid_detach_unlocked(struct raid_softc *);
295 1.266 dyoung
296 1.104 oster static void rf_markalldirty(RF_Raid_t *);
297 1.304 christos static void rf_set_geometry(struct raid_softc *, RF_Raid_t *);
298 1.48 oster
299 1.393 mrg static void rf_ReconThread(struct rf_recon_req_internal *);
300 1.393 mrg static void rf_RewriteParityThread(RF_Raid_t *raidPtr);
301 1.393 mrg static void rf_CopybackThread(RF_Raid_t *raidPtr);
302 1.393 mrg static void rf_ReconstructInPlaceThread(struct rf_recon_req_internal *);
303 1.393 mrg static int rf_autoconfig(device_t);
304 1.398 oster static int rf_rescan(void);
305 1.393 mrg static void rf_buildroothack(RF_ConfigSet_t *);
306 1.104 oster
307 1.393 mrg static RF_AutoConfig_t *rf_find_raid_components(void);
308 1.393 mrg static RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
309 1.104 oster static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
310 1.393 mrg static void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
311 1.393 mrg static int rf_set_autoconfig(RF_Raid_t *, int);
312 1.393 mrg static int rf_set_rootpartition(RF_Raid_t *, int);
313 1.393 mrg static void rf_release_all_vps(RF_ConfigSet_t *);
314 1.393 mrg static void rf_cleanup_config_set(RF_ConfigSet_t *);
315 1.393 mrg static int rf_have_enough_components(RF_ConfigSet_t *);
316 1.393 mrg static struct raid_softc *rf_auto_config_set(RF_ConfigSet_t *);
317 1.278 mrg static void rf_fix_old_label_size(RF_ComponentLabel_t *, uint64_t);
318 1.48 oster
319 1.295 erh /*
320 1.295 erh * Debugging, mostly. Set to 0 to not allow autoconfig to take place.
321 1.295 erh * Note that this is overridden by having RAID_AUTOCONFIG as an option
322 1.295 erh * in the kernel config file.
323 1.295 erh */
324 1.295 erh #ifdef RAID_AUTOCONFIG
325 1.295 erh int raidautoconfig = 1;
326 1.295 erh #else
327 1.295 erh int raidautoconfig = 0;
328 1.295 erh #endif
329 1.295 erh static bool raidautoconfigdone = false;
330 1.37 oster
331 1.395 oster struct pool rf_alloclist_pool; /* AllocList */
332 1.177 oster
333 1.300 christos static LIST_HEAD(, raid_softc) raids = LIST_HEAD_INITIALIZER(raids);
334 1.300 christos static kmutex_t raid_lock;
335 1.1 oster
336 1.300 christos static struct raid_softc *
337 1.300 christos raidcreate(int unit) {
338 1.300 christos struct raid_softc *sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
339 1.300 christos sc->sc_unit = unit;
340 1.327 pgoyette cv_init(&sc->sc_cv, "raidunit");
341 1.327 pgoyette mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
342 1.300 christos return sc;
343 1.300 christos }
344 1.1 oster
345 1.300 christos static void
346 1.300 christos raiddestroy(struct raid_softc *sc) {
347 1.327 pgoyette cv_destroy(&sc->sc_cv);
348 1.327 pgoyette mutex_destroy(&sc->sc_mutex);
349 1.300 christos kmem_free(sc, sizeof(*sc));
350 1.300 christos }
351 1.50 oster
352 1.300 christos static struct raid_softc *
353 1.327 pgoyette raidget(int unit, bool create) {
354 1.300 christos struct raid_softc *sc;
355 1.300 christos if (unit < 0) {
356 1.300 christos #ifdef DIAGNOSTIC
357 1.300 christos panic("%s: unit %d!", __func__, unit);
358 1.300 christos #endif
359 1.300 christos return NULL;
360 1.300 christos }
361 1.300 christos mutex_enter(&raid_lock);
362 1.300 christos LIST_FOREACH(sc, &raids, sc_link) {
363 1.300 christos if (sc->sc_unit == unit) {
364 1.300 christos mutex_exit(&raid_lock);
365 1.300 christos return sc;
366 1.300 christos }
367 1.300 christos }
368 1.300 christos mutex_exit(&raid_lock);
369 1.327 pgoyette if (!create)
370 1.327 pgoyette return NULL;
371 1.379 chs sc = raidcreate(unit);
372 1.300 christos mutex_enter(&raid_lock);
373 1.300 christos LIST_INSERT_HEAD(&raids, sc, sc_link);
374 1.300 christos mutex_exit(&raid_lock);
375 1.300 christos return sc;
376 1.300 christos }
377 1.300 christos
378 1.385 riastrad static void
379 1.300 christos raidput(struct raid_softc *sc) {
380 1.300 christos mutex_enter(&raid_lock);
381 1.300 christos LIST_REMOVE(sc, sc_link);
382 1.300 christos mutex_exit(&raid_lock);
383 1.300 christos raiddestroy(sc);
384 1.300 christos }
385 1.1 oster
386 1.300 christos void
387 1.300 christos raidattach(int num)
388 1.300 christos {
389 1.62 oster
390 1.142 thorpej /*
391 1.327 pgoyette * Device attachment and associated initialization now occurs
392 1.327 pgoyette * as part of the module initialization.
393 1.142 thorpej */
394 1.142 thorpej }
395 1.142 thorpej
396 1.393 mrg static int
397 1.261 dyoung rf_autoconfig(device_t self)
398 1.142 thorpej {
399 1.142 thorpej RF_AutoConfig_t *ac_list;
400 1.142 thorpej RF_ConfigSet_t *config_sets;
401 1.142 thorpej
402 1.295 erh if (!raidautoconfig || raidautoconfigdone == true)
403 1.389 skrll return 0;
404 1.142 thorpej
405 1.142 thorpej /* XXX This code can only be run once. */
406 1.295 erh raidautoconfigdone = true;
407 1.142 thorpej
408 1.307 christos #ifdef __HAVE_CPU_BOOTCONF
409 1.307 christos /*
410 1.307 christos * 0. find the boot device if needed first so we can use it later
411 1.307 christos * this needs to be done before we autoconfigure any raid sets,
412 1.307 christos * because if we use wedges we are not going to be able to open
413 1.307 christos * the boot device later
414 1.307 christos */
415 1.307 christos if (booted_device == NULL)
416 1.307 christos cpu_bootconf();
417 1.307 christos #endif
418 1.48 oster /* 1. locate all RAID components on the system */
419 1.258 ad aprint_debug("Searching for RAID components...\n");
420 1.48 oster ac_list = rf_find_raid_components();
421 1.48 oster
422 1.142 thorpej /* 2. Sort them into their respective sets. */
423 1.48 oster config_sets = rf_create_auto_sets(ac_list);
424 1.48 oster
425 1.142 thorpej /*
426 1.299 oster * 3. Evaluate each set and configure the valid ones.
427 1.142 thorpej * This gets done in rf_buildroothack().
428 1.142 thorpej */
429 1.142 thorpej rf_buildroothack(config_sets);
430 1.48 oster
431 1.213 christos return 1;
432 1.48 oster }
433 1.48 oster
434 1.367 christos int
435 1.367 christos rf_inited(const struct raid_softc *rs) {
436 1.367 christos return (rs->sc_flags & RAIDF_INITED) != 0;
437 1.367 christos }
438 1.367 christos
439 1.368 oster RF_Raid_t *
440 1.368 oster rf_get_raid(struct raid_softc *rs) {
441 1.368 oster return &rs->sc_r;
442 1.368 oster }
443 1.368 oster
444 1.367 christos int
445 1.367 christos rf_get_unit(const struct raid_softc *rs) {
446 1.367 christos return rs->sc_unit;
447 1.367 christos }
448 1.367 christos
449 1.306 christos static int
450 1.307 christos rf_containsboot(RF_Raid_t *r, device_t bdv) {
451 1.359 bad const char *bootname;
452 1.359 bad size_t len;
453 1.359 bad
454 1.359 bad /* if bdv is NULL, the set can't contain it. exit early. */
455 1.359 bad if (bdv == NULL)
456 1.359 bad return 0;
457 1.359 bad
458 1.359 bad bootname = device_xname(bdv);
459 1.359 bad len = strlen(bootname);
460 1.306 christos
461 1.306 christos for (int col = 0; col < r->numCol; col++) {
462 1.307 christos const char *devname = r->Disks[col].devname;
463 1.306 christos devname += sizeof("/dev/") - 1;
464 1.307 christos if (strncmp(devname, "dk", 2) == 0) {
465 1.307 christos const char *parent =
466 1.307 christos dkwedge_get_parent_name(r->Disks[col].dev);
467 1.307 christos if (parent != NULL)
468 1.307 christos devname = parent;
469 1.307 christos }
470 1.306 christos if (strncmp(devname, bootname, len) == 0) {
471 1.306 christos struct raid_softc *sc = r->softc;
472 1.306 christos aprint_debug("raid%d includes boot device %s\n",
473 1.306 christos sc->sc_unit, devname);
474 1.306 christos return 1;
475 1.306 christos }
476 1.306 christos }
477 1.306 christos return 0;
478 1.306 christos }
479 1.306 christos
480 1.398 oster static int
481 1.398 oster rf_rescan(void)
482 1.398 oster {
483 1.398 oster RF_AutoConfig_t *ac_list;
484 1.398 oster RF_ConfigSet_t *config_sets, *cset, *next_cset;
485 1.398 oster struct raid_softc *sc;
486 1.398 oster int raid_added;
487 1.398 oster
488 1.398 oster ac_list = rf_find_raid_components();
489 1.398 oster config_sets = rf_create_auto_sets(ac_list);
490 1.398 oster
491 1.398 oster raid_added = 1;
492 1.398 oster while (raid_added > 0) {
493 1.398 oster raid_added = 0;
494 1.398 oster cset = config_sets;
495 1.398 oster while (cset != NULL) {
496 1.398 oster next_cset = cset->next;
497 1.398 oster if (rf_have_enough_components(cset) &&
498 1.398 oster cset->ac->clabel->autoconfigure == 1) {
499 1.398 oster sc = rf_auto_config_set(cset);
500 1.398 oster if (sc != NULL) {
501 1.398 oster aprint_debug("raid%d: configured ok, rootable %d\n",
502 1.398 oster sc->sc_unit, cset->rootable);
503 1.398 oster /* We added one RAID set */
504 1.398 oster raid_added++;
505 1.398 oster } else {
506 1.398 oster /* The autoconfig didn't work :( */
507 1.398 oster aprint_debug("Autoconfig failed\n");
508 1.398 oster rf_release_all_vps(cset);
509 1.398 oster }
510 1.398 oster } else {
511 1.398 oster /* we're not autoconfiguring this set...
512 1.398 oster release the associated resources */
513 1.398 oster rf_release_all_vps(cset);
514 1.398 oster }
515 1.398 oster /* cleanup */
516 1.398 oster rf_cleanup_config_set(cset);
517 1.398 oster cset = next_cset;
518 1.398 oster }
519 1.398 oster if (raid_added > 0) {
520 1.398 oster /* We added at least one RAID set, so re-scan for recursive RAID */
521 1.398 oster ac_list = rf_find_raid_components();
522 1.398 oster config_sets = rf_create_auto_sets(ac_list);
523 1.398 oster }
524 1.398 oster }
525 1.398 oster
526 1.398 oster return 0;
527 1.398 oster }
528 1.398 oster
529 1.410.4.3 martin /*
530 1.410.4.3 martin * Example setup:
531 1.410.4.3 martin * dk1 at wd0: "raid@wd0", 171965 blocks at 32802, type: raidframe
532 1.410.4.3 martin * dk3 at wd1: "raid@wd1", 171965 blocks at 32802, type: raidframz
533 1.410.4.3 martin * raid1: Components: /dev/dk1 /dev/dk3
534 1.410.4.3 martin * dk4 at raid1: "empty@raid1", 8192 blocks at 34, type: msdos
535 1.410.4.3 martin * dk5 at raid1: "root@raid1", 163517 blocks at 8226, type: ffs
536 1.410.4.3 martin *
537 1.410.4.3 martin * If booted from wd0, booted_device will be
538 1.410.4.3 martin * disk wd0, startblk = 41092, nblks = 163517
539 1.410.4.3 martin *
540 1.410.4.3 martin * That is, dk5 with startblk computed from the beginning of wd0
541 1.410.4.3 martin * instead of beginning of raid1:
542 1.410.4.3 martin * 32802 + 64 (RF_PROTECTED_SECTORS) + 8226 = 41092
543 1.410.4.3 martin *
544 1.410.4.3 martin * In order to find the boot wedge, we must iterate on each component,
545 1.410.4.3 martin * find its offset from disk beginning, abd look for the boot wedge with
546 1.410.4.3 martin * startblck adjusted.
547 1.410.4.3 martin */
548 1.410.4.3 martin static device_t
549 1.410.4.3 martin rf_find_bootwedge(struct raid_softc *rsc)
550 1.410.4.3 martin {
551 1.410.4.3 martin RF_Raid_t *r = &rsc->sc_r;
552 1.410.4.3 martin const char *bootname;
553 1.410.4.3 martin size_t len;
554 1.410.4.3 martin device_t rdev = NULL;
555 1.410.4.3 martin
556 1.410.4.3 martin if (booted_device == NULL)
557 1.410.4.3 martin goto out;
558 1.410.4.3 martin
559 1.410.4.3 martin bootname = device_xname(booted_device);
560 1.410.4.3 martin len = strlen(bootname);
561 1.410.4.3 martin
562 1.410.4.3 martin aprint_debug("%s: booted_device %s, startblk = %"PRId64", "
563 1.410.4.3 martin "nblks = %"PRId64"\n", __func__,
564 1.410.4.3 martin bootname, booted_startblk, booted_nblks);
565 1.410.4.3 martin
566 1.410.4.3 martin for (int col = 0; col < r->numCol; col++) {
567 1.410.4.3 martin const char *devname = r->Disks[col].devname;
568 1.410.4.3 martin const char *parent;
569 1.410.4.3 martin struct disk *dk;
570 1.410.4.3 martin u_int nwedges;
571 1.410.4.3 martin struct dkwedge_info *dkwi;
572 1.410.4.3 martin struct dkwedge_list dkwl;
573 1.410.4.3 martin size_t dkwi_len;
574 1.410.4.3 martin int i;
575 1.410.4.3 martin
576 1.410.4.3 martin devname += sizeof("/dev/") - 1;
577 1.410.4.3 martin if (strncmp(devname, "dk", 2) != 0)
578 1.410.4.3 martin continue;
579 1.410.4.3 martin
580 1.410.4.3 martin parent = dkwedge_get_parent_name(r->Disks[col].dev);
581 1.410.4.3 martin if (parent == NULL) {
582 1.410.4.3 martin aprint_debug("%s: cannot find parent for "
583 1.410.4.3 martin "component /dev/%s", __func__, devname);
584 1.410.4.3 martin continue;
585 1.410.4.3 martin }
586 1.410.4.3 martin
587 1.410.4.3 martin if (strncmp(parent, bootname, len) != 0)
588 1.410.4.3 martin continue;
589 1.410.4.3 martin
590 1.410.4.3 martin aprint_debug("%s: looking up wedge %s in device %s\n",
591 1.410.4.3 martin __func__, devname, parent);
592 1.410.4.3 martin
593 1.410.4.3 martin dk = disk_find(parent);
594 1.410.4.3 martin nwedges = dk->dk_nwedges;
595 1.410.4.3 martin dkwi_len = sizeof(*dkwi) * nwedges;
596 1.410.4.3 martin dkwi = RF_Malloc(dkwi_len);
597 1.410.4.3 martin
598 1.410.4.3 martin dkwl.dkwl_buf = dkwi;
599 1.410.4.3 martin dkwl.dkwl_bufsize = dkwi_len;
600 1.410.4.3 martin dkwl.dkwl_nwedges = 0;
601 1.410.4.3 martin dkwl.dkwl_ncopied = 0;
602 1.410.4.3 martin
603 1.410.4.3 martin if (dkwedge_list(dk, &dkwl, curlwp) == 0) {
604 1.410.4.3 martin daddr_t startblk;
605 1.410.4.3 martin
606 1.410.4.3 martin for (i = 0; i < dkwl.dkwl_ncopied; i++) {
607 1.410.4.3 martin if (strcmp(dkwi[i].dkw_devname, devname) == 0)
608 1.410.4.3 martin break;
609 1.410.4.3 martin }
610 1.410.4.3 martin
611 1.410.4.3 martin KASSERT(i < dkwl.dkwl_ncopied);
612 1.410.4.3 martin
613 1.410.4.3 martin aprint_debug("%s: wedge %s, "
614 1.410.4.3 martin "startblk = %"PRId64", "
615 1.410.4.3 martin "nblks = %"PRId64"\n",
616 1.410.4.3 martin __func__,
617 1.410.4.3 martin dkwi[i].dkw_devname,
618 1.410.4.3 martin dkwi[i].dkw_offset,
619 1.410.4.3 martin dkwi[i].dkw_size);
620 1.410.4.3 martin
621 1.410.4.3 martin startblk = booted_startblk
622 1.410.4.3 martin - dkwi[i].dkw_offset
623 1.410.4.3 martin - RF_PROTECTED_SECTORS;
624 1.410.4.3 martin
625 1.410.4.3 martin aprint_debug("%s: looking for wedge in %s, "
626 1.410.4.3 martin "startblk = %"PRId64", "
627 1.410.4.3 martin "nblks = %"PRId64"\n",
628 1.410.4.3 martin __func__,
629 1.410.4.3 martin DEVICE_XNAME(rsc->sc_dksc.sc_dev),
630 1.410.4.3 martin startblk, booted_nblks);
631 1.410.4.3 martin
632 1.410.4.3 martin rdev = dkwedge_find_partition(rsc->sc_dksc.sc_dev,
633 1.410.4.3 martin startblk,
634 1.410.4.3 martin booted_nblks);
635 1.410.4.3 martin if (rdev) {
636 1.410.4.3 martin aprint_debug("%s: root candidate wedge %s "
637 1.410.4.3 martin "shifted from %s\n", __func__,
638 1.410.4.3 martin device_xname(rdev),
639 1.410.4.3 martin dkwi[i].dkw_devname);
640 1.410.4.3 martin goto done;
641 1.410.4.3 martin } else {
642 1.410.4.3 martin aprint_debug("%s: not found\n", __func__);
643 1.410.4.3 martin }
644 1.410.4.3 martin }
645 1.410.4.3 martin
646 1.410.4.3 martin aprint_debug("%s: nothing found for col %d\n", __func__, col);
647 1.410.4.3 martin done:
648 1.410.4.3 martin RF_Free(dkwi, dkwi_len);
649 1.410.4.3 martin }
650 1.410.4.3 martin
651 1.410.4.3 martin out:
652 1.410.4.3 martin if (!rdev)
653 1.410.4.3 martin aprint_debug("%s: nothing found\n", __func__);
654 1.410.4.3 martin
655 1.410.4.3 martin return rdev;
656 1.410.4.3 martin }
657 1.398 oster
658 1.393 mrg static void
659 1.142 thorpej rf_buildroothack(RF_ConfigSet_t *config_sets)
660 1.48 oster {
661 1.397 oster RF_AutoConfig_t *ac_list;
662 1.48 oster RF_ConfigSet_t *cset;
663 1.48 oster RF_ConfigSet_t *next_cset;
664 1.51 oster int num_root;
665 1.397 oster int raid_added;
666 1.300 christos struct raid_softc *sc, *rsc;
667 1.378 martin struct dk_softc *dksc = NULL; /* XXX gcc -Os: may be used uninit. */
668 1.48 oster
669 1.300 christos sc = rsc = NULL;
670 1.51 oster num_root = 0;
671 1.397 oster
672 1.397 oster raid_added = 1;
673 1.397 oster while (raid_added > 0) {
674 1.397 oster raid_added = 0;
675 1.397 oster cset = config_sets;
676 1.397 oster while (cset != NULL) {
677 1.397 oster next_cset = cset->next;
678 1.397 oster if (rf_have_enough_components(cset) &&
679 1.397 oster cset->ac->clabel->autoconfigure == 1) {
680 1.397 oster sc = rf_auto_config_set(cset);
681 1.397 oster if (sc != NULL) {
682 1.397 oster aprint_debug("raid%d: configured ok, rootable %d\n",
683 1.397 oster sc->sc_unit, cset->rootable);
684 1.397 oster /* We added one RAID set */
685 1.397 oster raid_added++;
686 1.397 oster if (cset->rootable) {
687 1.397 oster rsc = sc;
688 1.397 oster num_root++;
689 1.397 oster }
690 1.397 oster } else {
691 1.397 oster /* The autoconfig didn't work :( */
692 1.397 oster aprint_debug("Autoconfig failed\n");
693 1.397 oster rf_release_all_vps(cset);
694 1.51 oster }
695 1.51 oster } else {
696 1.397 oster /* we're not autoconfiguring this set...
697 1.397 oster release the associated resources */
698 1.51 oster rf_release_all_vps(cset);
699 1.48 oster }
700 1.397 oster /* cleanup */
701 1.397 oster rf_cleanup_config_set(cset);
702 1.397 oster cset = next_cset;
703 1.397 oster }
704 1.397 oster if (raid_added > 0) {
705 1.397 oster /* We added at least one RAID set, so re-scan for recursive RAID */
706 1.397 oster ac_list = rf_find_raid_components();
707 1.397 oster config_sets = rf_create_auto_sets(ac_list);
708 1.48 oster }
709 1.48 oster }
710 1.397 oster
711 1.223 oster /* if the user has specified what the root device should be
712 1.223 oster then we don't touch booted_device or boothowto... */
713 1.223 oster
714 1.359 bad if (rootspec != NULL) {
715 1.403 mrg aprint_debug("%s: rootspec %s\n", __func__, rootspec);
716 1.223 oster return;
717 1.359 bad }
718 1.223 oster
719 1.122 oster /* we found something bootable... */
720 1.122 oster if (num_root == 1) {
721 1.410.4.3 martin device_t candidate_root = NULL;
722 1.377 maxv dksc = &rsc->sc_dksc;
723 1.410.4.3 martin
724 1.335 mlelstv if (dksc->sc_dkdev.dk_nwedges != 0) {
725 1.410.4.3 martin
726 1.410.4.3 martin /* Find the wedge we booted from */
727 1.410.4.3 martin candidate_root = rf_find_bootwedge(rsc);
728 1.410.4.3 martin
729 1.410.4.3 martin /* Try first partition */
730 1.344 christos if (candidate_root == NULL) {
731 1.344 christos size_t i = 0;
732 1.344 christos candidate_root = dkwedge_find_by_parent(
733 1.344 christos device_xname(dksc->sc_dev), &i);
734 1.344 christos }
735 1.410.4.3 martin aprint_debug("%s: candidate wedge root %s\n",
736 1.410.4.3 martin __func__, DEVICE_XNAME(candidate_root));
737 1.410.4.3 martin } else {
738 1.335 mlelstv candidate_root = dksc->sc_dev;
739 1.410.4.3 martin }
740 1.410.4.3 martin
741 1.410.4.3 martin aprint_debug("%s: candidate root = %s, booted_device = %s, "
742 1.410.4.3 martin "root_partition = %d, contains_boot=%d\n",
743 1.410.4.3 martin __func__, DEVICE_XNAME(candidate_root),
744 1.410.4.3 martin DEVICE_XNAME(booted_device), rsc->sc_r.root_partition,
745 1.402 mrg rf_containsboot(&rsc->sc_r, booted_device));
746 1.410.4.3 martin
747 1.359 bad /* XXX the check for booted_device == NULL can probably be
748 1.359 bad * dropped, now that rf_containsboot handles that case.
749 1.359 bad */
750 1.308 christos if (booted_device == NULL ||
751 1.308 christos rsc->sc_r.root_partition == 1 ||
752 1.310 christos rf_containsboot(&rsc->sc_r, booted_device)) {
753 1.308 christos booted_device = candidate_root;
754 1.351 christos booted_method = "raidframe/single";
755 1.310 christos booted_partition = 0; /* XXX assume 'a' */
756 1.410.4.3 martin aprint_debug("%s: set booted_device = %s\n", __func__,
757 1.410.4.3 martin DEVICE_XNAME(booted_device));
758 1.310 christos }
759 1.122 oster } else if (num_root > 1) {
760 1.410.4.3 martin aprint_debug("%s: many roots=%d, %s\n", __func__, num_root,
761 1.410.4.3 martin DEVICE_XNAME(booted_device));
762 1.226 oster
763 1.385 riastrad /*
764 1.226 oster * Maybe the MD code can help. If it cannot, then
765 1.226 oster * setroot() will discover that we have no
766 1.226 oster * booted_device and will ask the user if nothing was
767 1.385 riastrad * hardwired in the kernel config file
768 1.226 oster */
769 1.385 riastrad if (booted_device == NULL)
770 1.226 oster return;
771 1.226 oster
772 1.226 oster num_root = 0;
773 1.300 christos mutex_enter(&raid_lock);
774 1.300 christos LIST_FOREACH(sc, &raids, sc_link) {
775 1.300 christos RF_Raid_t *r = &sc->sc_r;
776 1.300 christos if (r->valid == 0)
777 1.226 oster continue;
778 1.226 oster
779 1.300 christos if (r->root_partition == 0)
780 1.226 oster continue;
781 1.226 oster
782 1.306 christos if (rf_containsboot(r, booted_device)) {
783 1.226 oster num_root++;
784 1.300 christos rsc = sc;
785 1.335 mlelstv dksc = &rsc->sc_dksc;
786 1.226 oster }
787 1.226 oster }
788 1.300 christos mutex_exit(&raid_lock);
789 1.295 erh
790 1.226 oster if (num_root == 1) {
791 1.335 mlelstv booted_device = dksc->sc_dev;
792 1.351 christos booted_method = "raidframe/multi";
793 1.310 christos booted_partition = 0; /* XXX assume 'a' */
794 1.226 oster } else {
795 1.226 oster /* we can't guess.. require the user to answer... */
796 1.226 oster boothowto |= RB_ASKNAME;
797 1.226 oster }
798 1.51 oster }
799 1.1 oster }
800 1.1 oster
801 1.324 mrg static int
802 1.169 oster raidsize(dev_t dev)
803 1.1 oster {
804 1.1 oster struct raid_softc *rs;
805 1.335 mlelstv struct dk_softc *dksc;
806 1.335 mlelstv unsigned int unit;
807 1.1 oster
808 1.1 oster unit = raidunit(dev);
809 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL)
810 1.336 mlelstv return -1;
811 1.335 mlelstv dksc = &rs->sc_dksc;
812 1.335 mlelstv
813 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
814 1.336 mlelstv return -1;
815 1.1 oster
816 1.335 mlelstv return dk_size(dksc, dev);
817 1.335 mlelstv }
818 1.1 oster
819 1.335 mlelstv static int
820 1.335 mlelstv raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
821 1.335 mlelstv {
822 1.335 mlelstv unsigned int unit;
823 1.335 mlelstv struct raid_softc *rs;
824 1.335 mlelstv struct dk_softc *dksc;
825 1.1 oster
826 1.335 mlelstv unit = raidunit(dev);
827 1.335 mlelstv if ((rs = raidget(unit, false)) == NULL)
828 1.335 mlelstv return ENXIO;
829 1.335 mlelstv dksc = &rs->sc_dksc;
830 1.1 oster
831 1.335 mlelstv if ((rs->sc_flags & RAIDF_INITED) == 0)
832 1.335 mlelstv return ENODEV;
833 1.1 oster
834 1.336 mlelstv /*
835 1.336 mlelstv Note that blkno is relative to this particular partition.
836 1.336 mlelstv By adding adding RF_PROTECTED_SECTORS, we get a value that
837 1.336 mlelstv is relative to the partition used for the underlying component.
838 1.336 mlelstv */
839 1.336 mlelstv blkno += RF_PROTECTED_SECTORS;
840 1.336 mlelstv
841 1.380 riastrad return dk_dump(dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
842 1.1 oster }
843 1.1 oster
844 1.324 mrg static int
845 1.335 mlelstv raid_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
846 1.1 oster {
847 1.335 mlelstv struct raid_softc *rs = raidsoftc(dev);
848 1.231 oster const struct bdevsw *bdev;
849 1.231 oster RF_Raid_t *raidPtr;
850 1.335 mlelstv int c, sparecol, j, scol, dumpto;
851 1.231 oster int error = 0;
852 1.231 oster
853 1.300 christos raidPtr = &rs->sc_r;
854 1.231 oster
855 1.231 oster /* we only support dumping to RAID 1 sets */
856 1.385 riastrad if (raidPtr->Layout.numDataCol != 1 ||
857 1.231 oster raidPtr->Layout.numParityCol != 1)
858 1.231 oster return EINVAL;
859 1.231 oster
860 1.231 oster if ((error = raidlock(rs)) != 0)
861 1.231 oster return error;
862 1.231 oster
863 1.231 oster /* figure out what device is alive.. */
864 1.231 oster
865 1.385 riastrad /*
866 1.231 oster Look for a component to dump to. The preference for the
867 1.231 oster component to dump to is as follows:
868 1.383 oster 1) the first component
869 1.383 oster 2) a used_spare of the first component
870 1.383 oster 3) the second component
871 1.383 oster 4) a used_spare of the second component
872 1.231 oster */
873 1.231 oster
874 1.231 oster dumpto = -1;
875 1.231 oster for (c = 0; c < raidPtr->numCol; c++) {
876 1.231 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
877 1.231 oster /* this might be the one */
878 1.231 oster dumpto = c;
879 1.231 oster break;
880 1.231 oster }
881 1.231 oster }
882 1.385 riastrad
883 1.385 riastrad /*
884 1.383 oster At this point we have possibly selected a live component.
885 1.383 oster If we didn't find a live ocmponent, we now check to see
886 1.383 oster if there is a relevant spared component.
887 1.231 oster */
888 1.231 oster
889 1.231 oster for (c = 0; c < raidPtr->numSpare; c++) {
890 1.231 oster sparecol = raidPtr->numCol + c;
891 1.231 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
892 1.231 oster /* How about this one? */
893 1.231 oster scol = -1;
894 1.231 oster for(j=0;j<raidPtr->numCol;j++) {
895 1.231 oster if (raidPtr->Disks[j].spareCol == sparecol) {
896 1.231 oster scol = j;
897 1.231 oster break;
898 1.231 oster }
899 1.231 oster }
900 1.231 oster if (scol == 0) {
901 1.385 riastrad /*
902 1.383 oster We must have found a spared first
903 1.383 oster component! We'll take that over
904 1.383 oster anything else found so far. (We
905 1.383 oster couldn't have found a real first
906 1.383 oster component before, since this is a
907 1.383 oster used spare, and it's saying that
908 1.383 oster it's replacing the first
909 1.383 oster component.) On reboot (with
910 1.231 oster autoconfiguration turned on)
911 1.383 oster sparecol will become the first
912 1.383 oster component (component0) of this set.
913 1.231 oster */
914 1.231 oster dumpto = sparecol;
915 1.231 oster break;
916 1.231 oster } else if (scol != -1) {
917 1.385 riastrad /*
918 1.385 riastrad Must be a spared second component.
919 1.385 riastrad We'll dump to that if we havn't found
920 1.385 riastrad anything else so far.
921 1.231 oster */
922 1.231 oster if (dumpto == -1)
923 1.231 oster dumpto = sparecol;
924 1.231 oster }
925 1.231 oster }
926 1.231 oster }
927 1.385 riastrad
928 1.231 oster if (dumpto == -1) {
929 1.231 oster /* we couldn't find any live components to dump to!?!?
930 1.231 oster */
931 1.231 oster error = EINVAL;
932 1.231 oster goto out;
933 1.231 oster }
934 1.231 oster
935 1.231 oster bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
936 1.342 mlelstv if (bdev == NULL) {
937 1.342 mlelstv error = ENXIO;
938 1.342 mlelstv goto out;
939 1.342 mlelstv }
940 1.231 oster
941 1.385 riastrad error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
942 1.336 mlelstv blkno, va, nblk * raidPtr->bytesPerSector);
943 1.385 riastrad
944 1.231 oster out:
945 1.231 oster raidunlock(rs);
946 1.385 riastrad
947 1.231 oster return error;
948 1.1 oster }
949 1.324 mrg
950 1.1 oster /* ARGSUSED */
951 1.324 mrg static int
952 1.222 christos raidopen(dev_t dev, int flags, int fmt,
953 1.222 christos struct lwp *l)
954 1.1 oster {
955 1.9 oster int unit = raidunit(dev);
956 1.1 oster struct raid_softc *rs;
957 1.335 mlelstv struct dk_softc *dksc;
958 1.335 mlelstv int error = 0;
959 1.9 oster int part, pmask;
960 1.9 oster
961 1.327 pgoyette if ((rs = raidget(unit, true)) == NULL)
962 1.300 christos return ENXIO;
963 1.1 oster if ((error = raidlock(rs)) != 0)
964 1.389 skrll return error;
965 1.266 dyoung
966 1.266 dyoung if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
967 1.266 dyoung error = EBUSY;
968 1.266 dyoung goto bad;
969 1.266 dyoung }
970 1.266 dyoung
971 1.335 mlelstv dksc = &rs->sc_dksc;
972 1.1 oster
973 1.1 oster part = DISKPART(dev);
974 1.1 oster pmask = (1 << part);
975 1.1 oster
976 1.335 mlelstv if (!DK_BUSY(dksc, pmask) &&
977 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
978 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
979 1.13 oster have done a configure before this. I DO NOT WANT TO BE
980 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
981 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
982 1.13 oster /* XXX should check to see if we're only open for reading
983 1.13 oster here... If so, we needn't do this, but then need some
984 1.13 oster other way of keeping track of what's happened.. */
985 1.13 oster
986 1.300 christos rf_markalldirty(&rs->sc_r);
987 1.13 oster }
988 1.13 oster
989 1.335 mlelstv if ((rs->sc_flags & RAIDF_INITED) != 0)
990 1.335 mlelstv error = dk_open(dksc, dev, flags, fmt, l);
991 1.1 oster
992 1.213 christos bad:
993 1.1 oster raidunlock(rs);
994 1.1 oster
995 1.389 skrll return error;
996 1.1 oster
997 1.1 oster
998 1.1 oster }
999 1.324 mrg
1000 1.335 mlelstv static int
1001 1.335 mlelstv raid_lastclose(device_t self)
1002 1.335 mlelstv {
1003 1.335 mlelstv struct raid_softc *rs = raidsoftc(self);
1004 1.335 mlelstv
1005 1.335 mlelstv /* Last one... device is not unconfigured yet.
1006 1.335 mlelstv Device shutdown has taken care of setting the
1007 1.335 mlelstv clean bits if RAIDF_INITED is not set
1008 1.335 mlelstv mark things as clean... */
1009 1.335 mlelstv
1010 1.335 mlelstv rf_update_component_labels(&rs->sc_r,
1011 1.335 mlelstv RF_FINAL_COMPONENT_UPDATE);
1012 1.335 mlelstv
1013 1.335 mlelstv /* pass to unlocked code */
1014 1.335 mlelstv if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
1015 1.335 mlelstv rs->sc_flags |= RAIDF_DETACH;
1016 1.335 mlelstv
1017 1.335 mlelstv return 0;
1018 1.335 mlelstv }
1019 1.335 mlelstv
1020 1.1 oster /* ARGSUSED */
1021 1.324 mrg static int
1022 1.222 christos raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
1023 1.1 oster {
1024 1.9 oster int unit = raidunit(dev);
1025 1.1 oster struct raid_softc *rs;
1026 1.335 mlelstv struct dk_softc *dksc;
1027 1.335 mlelstv cfdata_t cf;
1028 1.335 mlelstv int error = 0, do_detach = 0, do_put = 0;
1029 1.1 oster
1030 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL)
1031 1.300 christos return ENXIO;
1032 1.335 mlelstv dksc = &rs->sc_dksc;
1033 1.1 oster
1034 1.1 oster if ((error = raidlock(rs)) != 0)
1035 1.389 skrll return error;
1036 1.1 oster
1037 1.335 mlelstv if ((rs->sc_flags & RAIDF_INITED) != 0) {
1038 1.335 mlelstv error = dk_close(dksc, dev, flags, fmt, l);
1039 1.335 mlelstv if ((rs->sc_flags & RAIDF_DETACH) != 0)
1040 1.335 mlelstv do_detach = 1;
1041 1.335 mlelstv } else if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
1042 1.335 mlelstv do_put = 1;
1043 1.1 oster
1044 1.335 mlelstv raidunlock(rs);
1045 1.1 oster
1046 1.335 mlelstv if (do_detach) {
1047 1.335 mlelstv /* free the pseudo device attach bits */
1048 1.335 mlelstv cf = device_cfdata(dksc->sc_dev);
1049 1.335 mlelstv error = config_detach(dksc->sc_dev, 0);
1050 1.385 riastrad if (error == 0)
1051 1.335 mlelstv free(cf, M_RAIDFRAME);
1052 1.335 mlelstv } else if (do_put) {
1053 1.335 mlelstv raidput(rs);
1054 1.1 oster }
1055 1.186 perry
1056 1.389 skrll return error;
1057 1.147 oster
1058 1.335 mlelstv }
1059 1.327 pgoyette
1060 1.335 mlelstv static void
1061 1.335 mlelstv raid_wakeup(RF_Raid_t *raidPtr)
1062 1.335 mlelstv {
1063 1.335 mlelstv rf_lock_mutex2(raidPtr->iodone_lock);
1064 1.335 mlelstv rf_signal_cond2(raidPtr->iodone_cv);
1065 1.335 mlelstv rf_unlock_mutex2(raidPtr->iodone_lock);
1066 1.1 oster }
1067 1.1 oster
1068 1.324 mrg static void
1069 1.169 oster raidstrategy(struct buf *bp)
1070 1.1 oster {
1071 1.335 mlelstv unsigned int unit;
1072 1.335 mlelstv struct raid_softc *rs;
1073 1.335 mlelstv struct dk_softc *dksc;
1074 1.1 oster RF_Raid_t *raidPtr;
1075 1.1 oster
1076 1.335 mlelstv unit = raidunit(bp->b_dev);
1077 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL) {
1078 1.30 oster bp->b_error = ENXIO;
1079 1.335 mlelstv goto fail;
1080 1.30 oster }
1081 1.300 christos if ((rs->sc_flags & RAIDF_INITED) == 0) {
1082 1.300 christos bp->b_error = ENXIO;
1083 1.335 mlelstv goto fail;
1084 1.1 oster }
1085 1.335 mlelstv dksc = &rs->sc_dksc;
1086 1.300 christos raidPtr = &rs->sc_r;
1087 1.335 mlelstv
1088 1.335 mlelstv /* Queue IO only */
1089 1.335 mlelstv if (dk_strategy_defer(dksc, bp))
1090 1.196 yamt goto done;
1091 1.1 oster
1092 1.335 mlelstv /* schedule the IO to happen at the next convenient time */
1093 1.335 mlelstv raid_wakeup(raidPtr);
1094 1.335 mlelstv
1095 1.335 mlelstv done:
1096 1.335 mlelstv return;
1097 1.335 mlelstv
1098 1.335 mlelstv fail:
1099 1.335 mlelstv bp->b_resid = bp->b_bcount;
1100 1.335 mlelstv biodone(bp);
1101 1.335 mlelstv }
1102 1.335 mlelstv
1103 1.335 mlelstv static int
1104 1.335 mlelstv raid_diskstart(device_t dev, struct buf *bp)
1105 1.335 mlelstv {
1106 1.335 mlelstv struct raid_softc *rs = raidsoftc(dev);
1107 1.335 mlelstv RF_Raid_t *raidPtr;
1108 1.1 oster
1109 1.335 mlelstv raidPtr = &rs->sc_r;
1110 1.335 mlelstv if (!raidPtr->valid) {
1111 1.335 mlelstv db1_printf(("raid is not valid..\n"));
1112 1.335 mlelstv return ENODEV;
1113 1.196 yamt }
1114 1.285 mrg
1115 1.335 mlelstv /* XXX */
1116 1.335 mlelstv bp->b_resid = 0;
1117 1.335 mlelstv
1118 1.335 mlelstv return raiddoaccess(raidPtr, bp);
1119 1.335 mlelstv }
1120 1.1 oster
1121 1.335 mlelstv void
1122 1.335 mlelstv raiddone(RF_Raid_t *raidPtr, struct buf *bp)
1123 1.335 mlelstv {
1124 1.335 mlelstv struct raid_softc *rs;
1125 1.335 mlelstv struct dk_softc *dksc;
1126 1.34 oster
1127 1.335 mlelstv rs = raidPtr->softc;
1128 1.335 mlelstv dksc = &rs->sc_dksc;
1129 1.34 oster
1130 1.335 mlelstv dk_done(dksc, bp);
1131 1.34 oster
1132 1.335 mlelstv rf_lock_mutex2(raidPtr->mutex);
1133 1.335 mlelstv raidPtr->openings++;
1134 1.335 mlelstv rf_unlock_mutex2(raidPtr->mutex);
1135 1.196 yamt
1136 1.335 mlelstv /* schedule more IO */
1137 1.335 mlelstv raid_wakeup(raidPtr);
1138 1.1 oster }
1139 1.324 mrg
1140 1.1 oster /* ARGSUSED */
1141 1.324 mrg static int
1142 1.222 christos raidread(dev_t dev, struct uio *uio, int flags)
1143 1.1 oster {
1144 1.9 oster int unit = raidunit(dev);
1145 1.1 oster struct raid_softc *rs;
1146 1.1 oster
1147 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL)
1148 1.300 christos return ENXIO;
1149 1.1 oster
1150 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
1151 1.389 skrll return ENXIO;
1152 1.1 oster
1153 1.389 skrll return physio(raidstrategy, NULL, dev, B_READ, minphys, uio);
1154 1.1 oster
1155 1.1 oster }
1156 1.324 mrg
1157 1.1 oster /* ARGSUSED */
1158 1.324 mrg static int
1159 1.222 christos raidwrite(dev_t dev, struct uio *uio, int flags)
1160 1.1 oster {
1161 1.9 oster int unit = raidunit(dev);
1162 1.1 oster struct raid_softc *rs;
1163 1.1 oster
1164 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL)
1165 1.300 christos return ENXIO;
1166 1.1 oster
1167 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
1168 1.389 skrll return ENXIO;
1169 1.147 oster
1170 1.389 skrll return physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio);
1171 1.1 oster
1172 1.1 oster }
1173 1.1 oster
1174 1.266 dyoung static int
1175 1.266 dyoung raid_detach_unlocked(struct raid_softc *rs)
1176 1.266 dyoung {
1177 1.335 mlelstv struct dk_softc *dksc = &rs->sc_dksc;
1178 1.335 mlelstv RF_Raid_t *raidPtr;
1179 1.266 dyoung int error;
1180 1.266 dyoung
1181 1.300 christos raidPtr = &rs->sc_r;
1182 1.266 dyoung
1183 1.337 mlelstv if (DK_BUSY(dksc, 0) ||
1184 1.337 mlelstv raidPtr->recon_in_progress != 0 ||
1185 1.337 mlelstv raidPtr->parity_rewrite_in_progress != 0 ||
1186 1.337 mlelstv raidPtr->copyback_in_progress != 0)
1187 1.266 dyoung return EBUSY;
1188 1.266 dyoung
1189 1.266 dyoung if ((rs->sc_flags & RAIDF_INITED) == 0)
1190 1.333 mlelstv return 0;
1191 1.333 mlelstv
1192 1.333 mlelstv rs->sc_flags &= ~RAIDF_SHUTDOWN;
1193 1.333 mlelstv
1194 1.333 mlelstv if ((error = rf_Shutdown(raidPtr)) != 0)
1195 1.266 dyoung return error;
1196 1.266 dyoung
1197 1.335 mlelstv rs->sc_flags &= ~RAIDF_INITED;
1198 1.335 mlelstv
1199 1.335 mlelstv /* Kill off any queued buffers */
1200 1.335 mlelstv dk_drain(dksc);
1201 1.335 mlelstv bufq_free(dksc->sc_bufq);
1202 1.335 mlelstv
1203 1.266 dyoung /* Detach the disk. */
1204 1.335 mlelstv dkwedge_delall(&dksc->sc_dkdev);
1205 1.335 mlelstv disk_detach(&dksc->sc_dkdev);
1206 1.335 mlelstv disk_destroy(&dksc->sc_dkdev);
1207 1.335 mlelstv dk_detach(dksc);
1208 1.333 mlelstv
1209 1.266 dyoung return 0;
1210 1.266 dyoung }
1211 1.266 dyoung
1212 1.366 christos int
1213 1.366 christos rf_fail_disk(RF_Raid_t *raidPtr, struct rf_recon_req *rr)
1214 1.366 christos {
1215 1.366 christos struct rf_recon_req_internal *rrint;
1216 1.366 christos
1217 1.366 christos if (raidPtr->Layout.map->faultsTolerated == 0) {
1218 1.366 christos /* Can't do this on a RAID 0!! */
1219 1.366 christos return EINVAL;
1220 1.366 christos }
1221 1.366 christos
1222 1.366 christos if (rr->col < 0 || rr->col >= raidPtr->numCol) {
1223 1.366 christos /* bad column */
1224 1.366 christos return EINVAL;
1225 1.366 christos }
1226 1.366 christos
1227 1.366 christos rf_lock_mutex2(raidPtr->mutex);
1228 1.366 christos if (raidPtr->status == rf_rs_reconstructing) {
1229 1.366 christos /* you can't fail a disk while we're reconstructing! */
1230 1.366 christos /* XXX wrong for RAID6 */
1231 1.366 christos goto out;
1232 1.366 christos }
1233 1.366 christos if ((raidPtr->Disks[rr->col].status == rf_ds_optimal) &&
1234 1.366 christos (raidPtr->numFailures > 0)) {
1235 1.366 christos /* some other component has failed. Let's not make
1236 1.366 christos things worse. XXX wrong for RAID6 */
1237 1.366 christos goto out;
1238 1.366 christos }
1239 1.366 christos if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1240 1.366 christos /* Can't fail a spared disk! */
1241 1.366 christos goto out;
1242 1.366 christos }
1243 1.366 christos rf_unlock_mutex2(raidPtr->mutex);
1244 1.366 christos
1245 1.366 christos /* make a copy of the recon request so that we don't rely on
1246 1.366 christos * the user's buffer */
1247 1.374 christos rrint = RF_Malloc(sizeof(*rrint));
1248 1.366 christos if (rrint == NULL)
1249 1.366 christos return(ENOMEM);
1250 1.366 christos rrint->col = rr->col;
1251 1.366 christos rrint->flags = rr->flags;
1252 1.366 christos rrint->raidPtr = raidPtr;
1253 1.366 christos
1254 1.366 christos return RF_CREATE_THREAD(raidPtr->recon_thread, rf_ReconThread,
1255 1.366 christos rrint, "raid_recon");
1256 1.366 christos out:
1257 1.366 christos rf_unlock_mutex2(raidPtr->mutex);
1258 1.366 christos return EINVAL;
1259 1.366 christos }
1260 1.366 christos
1261 1.324 mrg static int
1262 1.367 christos rf_copyinspecificbuf(RF_Config_t *k_cfg)
1263 1.367 christos {
1264 1.367 christos /* allocate a buffer for the layout-specific data, and copy it in */
1265 1.367 christos if (k_cfg->layoutSpecificSize == 0)
1266 1.367 christos return 0;
1267 1.367 christos
1268 1.367 christos if (k_cfg->layoutSpecificSize > 10000) {
1269 1.367 christos /* sanity check */
1270 1.367 christos return EINVAL;
1271 1.367 christos }
1272 1.367 christos
1273 1.367 christos u_char *specific_buf;
1274 1.374 christos specific_buf = RF_Malloc(k_cfg->layoutSpecificSize);
1275 1.367 christos if (specific_buf == NULL)
1276 1.367 christos return ENOMEM;
1277 1.367 christos
1278 1.367 christos int retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1279 1.367 christos k_cfg->layoutSpecificSize);
1280 1.367 christos if (retcode) {
1281 1.367 christos RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1282 1.367 christos db1_printf(("%s: retcode=%d copyin.2\n", __func__, retcode));
1283 1.367 christos return retcode;
1284 1.367 christos }
1285 1.367 christos
1286 1.367 christos k_cfg->layoutSpecific = specific_buf;
1287 1.367 christos return 0;
1288 1.367 christos }
1289 1.367 christos
1290 1.367 christos static int
1291 1.367 christos rf_getConfiguration(struct raid_softc *rs, void *data, RF_Config_t **k_cfg)
1292 1.367 christos {
1293 1.372 christos RF_Config_t *u_cfg = *((RF_Config_t **) data);
1294 1.372 christos
1295 1.367 christos if (rs->sc_r.valid) {
1296 1.367 christos /* There is a valid RAID set running on this unit! */
1297 1.367 christos printf("raid%d: Device already configured!\n", rs->sc_unit);
1298 1.367 christos return EINVAL;
1299 1.367 christos }
1300 1.367 christos
1301 1.367 christos /* copy-in the configuration information */
1302 1.367 christos /* data points to a pointer to the configuration structure */
1303 1.374 christos *k_cfg = RF_Malloc(sizeof(**k_cfg));
1304 1.367 christos if (*k_cfg == NULL) {
1305 1.367 christos return ENOMEM;
1306 1.367 christos }
1307 1.373 christos int retcode = copyin(u_cfg, *k_cfg, sizeof(RF_Config_t));
1308 1.367 christos if (retcode == 0)
1309 1.367 christos return 0;
1310 1.367 christos RF_Free(*k_cfg, sizeof(RF_Config_t));
1311 1.367 christos db1_printf(("%s: retcode=%d copyin.1\n", __func__, retcode));
1312 1.367 christos rs->sc_flags |= RAIDF_SHUTDOWN;
1313 1.367 christos return retcode;
1314 1.367 christos }
1315 1.367 christos
1316 1.367 christos int
1317 1.367 christos rf_construct(struct raid_softc *rs, RF_Config_t *k_cfg)
1318 1.367 christos {
1319 1.408 mrg int retcode, i;
1320 1.367 christos RF_Raid_t *raidPtr = &rs->sc_r;
1321 1.367 christos
1322 1.367 christos rs->sc_flags &= ~RAIDF_SHUTDOWN;
1323 1.367 christos
1324 1.367 christos if ((retcode = rf_copyinspecificbuf(k_cfg)) != 0)
1325 1.367 christos goto out;
1326 1.367 christos
1327 1.367 christos /* should do some kind of sanity check on the configuration.
1328 1.367 christos * Store the sum of all the bytes in the last byte? */
1329 1.367 christos
1330 1.408 mrg /* Force nul-termination on all strings. */
1331 1.408 mrg #define ZERO_FINAL(s) do { s[sizeof(s) - 1] = '\0'; } while (0)
1332 1.408 mrg for (i = 0; i < RF_MAXCOL; i++) {
1333 1.408 mrg ZERO_FINAL(k_cfg->devnames[0][i]);
1334 1.408 mrg }
1335 1.408 mrg for (i = 0; i < RF_MAXSPARE; i++) {
1336 1.408 mrg ZERO_FINAL(k_cfg->spare_names[i]);
1337 1.408 mrg }
1338 1.408 mrg for (i = 0; i < RF_MAXDBGV; i++) {
1339 1.408 mrg ZERO_FINAL(k_cfg->debugVars[i]);
1340 1.408 mrg }
1341 1.408 mrg #undef ZERO_FINAL
1342 1.408 mrg
1343 1.408 mrg /* Check some basic limits. */
1344 1.408 mrg if (k_cfg->numCol >= RF_MAXCOL || k_cfg->numCol < 0) {
1345 1.408 mrg retcode = EINVAL;
1346 1.408 mrg goto out;
1347 1.408 mrg }
1348 1.408 mrg if (k_cfg->numSpare >= RF_MAXSPARE || k_cfg->numSpare < 0) {
1349 1.408 mrg retcode = EINVAL;
1350 1.408 mrg goto out;
1351 1.408 mrg }
1352 1.408 mrg
1353 1.367 christos /* configure the system */
1354 1.367 christos
1355 1.367 christos /*
1356 1.367 christos * Clear the entire RAID descriptor, just to make sure
1357 1.367 christos * there is no stale data left in the case of a
1358 1.367 christos * reconfiguration
1359 1.367 christos */
1360 1.367 christos memset(raidPtr, 0, sizeof(*raidPtr));
1361 1.367 christos raidPtr->softc = rs;
1362 1.367 christos raidPtr->raidid = rs->sc_unit;
1363 1.367 christos
1364 1.367 christos retcode = rf_Configure(raidPtr, k_cfg, NULL);
1365 1.367 christos
1366 1.367 christos if (retcode == 0) {
1367 1.367 christos /* allow this many simultaneous IO's to
1368 1.367 christos this RAID device */
1369 1.367 christos raidPtr->openings = RAIDOUTSTANDING;
1370 1.367 christos
1371 1.367 christos raidinit(rs);
1372 1.367 christos raid_wakeup(raidPtr);
1373 1.367 christos rf_markalldirty(raidPtr);
1374 1.367 christos }
1375 1.367 christos
1376 1.367 christos /* free the buffers. No return code here. */
1377 1.367 christos if (k_cfg->layoutSpecificSize) {
1378 1.367 christos RF_Free(k_cfg->layoutSpecific, k_cfg->layoutSpecificSize);
1379 1.367 christos }
1380 1.367 christos out:
1381 1.367 christos RF_Free(k_cfg, sizeof(RF_Config_t));
1382 1.367 christos if (retcode) {
1383 1.367 christos /*
1384 1.367 christos * If configuration failed, set sc_flags so that we
1385 1.367 christos * will detach the device when we close it.
1386 1.367 christos */
1387 1.367 christos rs->sc_flags |= RAIDF_SHUTDOWN;
1388 1.367 christos }
1389 1.367 christos return retcode;
1390 1.367 christos }
1391 1.367 christos
1392 1.367 christos #if RF_DISABLED
1393 1.367 christos static int
1394 1.367 christos rf_set_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
1395 1.367 christos {
1396 1.367 christos
1397 1.367 christos /* XXX check the label for valid stuff... */
1398 1.367 christos /* Note that some things *should not* get modified --
1399 1.367 christos the user should be re-initing the labels instead of
1400 1.367 christos trying to patch things.
1401 1.367 christos */
1402 1.367 christos #ifdef DEBUG
1403 1.367 christos int raidid = raidPtr->raidid;
1404 1.367 christos printf("raid%d: Got component label:\n", raidid);
1405 1.367 christos printf("raid%d: Version: %d\n", raidid, clabel->version);
1406 1.367 christos printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1407 1.367 christos printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1408 1.367 christos printf("raid%d: Column: %d\n", raidid, clabel->column);
1409 1.367 christos printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1410 1.367 christos printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1411 1.367 christos printf("raid%d: Status: %d\n", raidid, clabel->status);
1412 1.367 christos #endif /* DEBUG */
1413 1.367 christos clabel->row = 0;
1414 1.367 christos int column = clabel->column;
1415 1.367 christos
1416 1.367 christos if ((column < 0) || (column >= raidPtr->numCol)) {
1417 1.367 christos return(EINVAL);
1418 1.367 christos }
1419 1.367 christos
1420 1.367 christos /* XXX this isn't allowed to do anything for now :-) */
1421 1.367 christos
1422 1.367 christos /* XXX and before it is, we need to fill in the rest
1423 1.367 christos of the fields!?!?!?! */
1424 1.367 christos memcpy(raidget_component_label(raidPtr, column),
1425 1.367 christos clabel, sizeof(*clabel));
1426 1.367 christos raidflush_component_label(raidPtr, column);
1427 1.367 christos return 0;
1428 1.367 christos }
1429 1.367 christos #endif
1430 1.367 christos
1431 1.367 christos static int
1432 1.367 christos rf_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
1433 1.367 christos {
1434 1.367 christos /*
1435 1.367 christos we only want the serial number from
1436 1.367 christos the above. We get all the rest of the information
1437 1.367 christos from the config that was used to create this RAID
1438 1.367 christos set.
1439 1.367 christos */
1440 1.367 christos
1441 1.367 christos raidPtr->serial_number = clabel->serial_number;
1442 1.367 christos
1443 1.367 christos for (int column = 0; column < raidPtr->numCol; column++) {
1444 1.367 christos RF_RaidDisk_t *diskPtr = &raidPtr->Disks[column];
1445 1.367 christos if (RF_DEAD_DISK(diskPtr->status))
1446 1.367 christos continue;
1447 1.367 christos RF_ComponentLabel_t *ci_label = raidget_component_label(
1448 1.367 christos raidPtr, column);
1449 1.367 christos /* Zeroing this is important. */
1450 1.367 christos memset(ci_label, 0, sizeof(*ci_label));
1451 1.367 christos raid_init_component_label(raidPtr, ci_label);
1452 1.367 christos ci_label->serial_number = raidPtr->serial_number;
1453 1.367 christos ci_label->row = 0; /* we dont' pretend to support more */
1454 1.367 christos rf_component_label_set_partitionsize(ci_label,
1455 1.367 christos diskPtr->partitionSize);
1456 1.367 christos ci_label->column = column;
1457 1.367 christos raidflush_component_label(raidPtr, column);
1458 1.367 christos /* XXXjld what about the spares? */
1459 1.367 christos }
1460 1.385 riastrad
1461 1.367 christos return 0;
1462 1.367 christos }
1463 1.367 christos
1464 1.367 christos static int
1465 1.367 christos rf_rebuild_in_place(RF_Raid_t *raidPtr, RF_SingleComponent_t *componentPtr)
1466 1.367 christos {
1467 1.367 christos
1468 1.367 christos if (raidPtr->Layout.map->faultsTolerated == 0) {
1469 1.367 christos /* Can't do this on a RAID 0!! */
1470 1.367 christos return EINVAL;
1471 1.367 christos }
1472 1.367 christos
1473 1.367 christos if (raidPtr->recon_in_progress == 1) {
1474 1.367 christos /* a reconstruct is already in progress! */
1475 1.367 christos return EINVAL;
1476 1.367 christos }
1477 1.367 christos
1478 1.367 christos RF_SingleComponent_t component;
1479 1.367 christos memcpy(&component, componentPtr, sizeof(RF_SingleComponent_t));
1480 1.367 christos component.row = 0; /* we don't support any more */
1481 1.367 christos int column = component.column;
1482 1.367 christos
1483 1.367 christos if ((column < 0) || (column >= raidPtr->numCol)) {
1484 1.367 christos return EINVAL;
1485 1.367 christos }
1486 1.367 christos
1487 1.367 christos rf_lock_mutex2(raidPtr->mutex);
1488 1.367 christos if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1489 1.367 christos (raidPtr->numFailures > 0)) {
1490 1.367 christos /* XXX 0 above shouldn't be constant!!! */
1491 1.367 christos /* some component other than this has failed.
1492 1.367 christos Let's not make things worse than they already
1493 1.367 christos are... */
1494 1.367 christos printf("raid%d: Unable to reconstruct to disk at:\n",
1495 1.367 christos raidPtr->raidid);
1496 1.367 christos printf("raid%d: Col: %d Too many failures.\n",
1497 1.367 christos raidPtr->raidid, column);
1498 1.367 christos rf_unlock_mutex2(raidPtr->mutex);
1499 1.367 christos return EINVAL;
1500 1.367 christos }
1501 1.367 christos
1502 1.367 christos if (raidPtr->Disks[column].status == rf_ds_reconstructing) {
1503 1.367 christos printf("raid%d: Unable to reconstruct to disk at:\n",
1504 1.367 christos raidPtr->raidid);
1505 1.367 christos printf("raid%d: Col: %d "
1506 1.367 christos "Reconstruction already occurring!\n",
1507 1.367 christos raidPtr->raidid, column);
1508 1.367 christos
1509 1.367 christos rf_unlock_mutex2(raidPtr->mutex);
1510 1.367 christos return EINVAL;
1511 1.367 christos }
1512 1.367 christos
1513 1.367 christos if (raidPtr->Disks[column].status == rf_ds_spared) {
1514 1.367 christos rf_unlock_mutex2(raidPtr->mutex);
1515 1.367 christos return EINVAL;
1516 1.367 christos }
1517 1.367 christos
1518 1.367 christos rf_unlock_mutex2(raidPtr->mutex);
1519 1.367 christos
1520 1.367 christos struct rf_recon_req_internal *rrint;
1521 1.374 christos rrint = RF_Malloc(sizeof(*rrint));
1522 1.367 christos if (rrint == NULL)
1523 1.367 christos return ENOMEM;
1524 1.367 christos
1525 1.367 christos rrint->col = column;
1526 1.367 christos rrint->raidPtr = raidPtr;
1527 1.367 christos
1528 1.367 christos return RF_CREATE_THREAD(raidPtr->recon_thread,
1529 1.367 christos rf_ReconstructInPlaceThread, rrint, "raid_reconip");
1530 1.367 christos }
1531 1.367 christos
1532 1.367 christos static int
1533 1.367 christos rf_check_recon_status(RF_Raid_t *raidPtr, int *data)
1534 1.367 christos {
1535 1.367 christos /*
1536 1.367 christos * This makes no sense on a RAID 0, or if we are not reconstructing
1537 1.367 christos * so tell the user it's done.
1538 1.367 christos */
1539 1.367 christos if (raidPtr->Layout.map->faultsTolerated == 0 ||
1540 1.367 christos raidPtr->status != rf_rs_reconstructing) {
1541 1.367 christos *data = 100;
1542 1.367 christos return 0;
1543 1.367 christos }
1544 1.367 christos if (raidPtr->reconControl->numRUsTotal == 0) {
1545 1.367 christos *data = 0;
1546 1.367 christos return 0;
1547 1.367 christos }
1548 1.367 christos *data = (raidPtr->reconControl->numRUsComplete * 100
1549 1.367 christos / raidPtr->reconControl->numRUsTotal);
1550 1.367 christos return 0;
1551 1.367 christos }
1552 1.367 christos
1553 1.408 mrg /*
1554 1.408 mrg * Copy a RF_SingleComponent_t from 'data', ensuring nul-termination
1555 1.408 mrg * on the component_name[] array.
1556 1.408 mrg */
1557 1.408 mrg static void
1558 1.408 mrg rf_copy_single_component(RF_SingleComponent_t *component, void *data)
1559 1.408 mrg {
1560 1.408 mrg
1561 1.408 mrg memcpy(component, data, sizeof *component);
1562 1.408 mrg component->component_name[sizeof(component->component_name) - 1] = '\0';
1563 1.408 mrg }
1564 1.408 mrg
1565 1.367 christos static int
1566 1.225 christos raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1567 1.1 oster {
1568 1.9 oster int unit = raidunit(dev);
1569 1.335 mlelstv int part, pmask;
1570 1.1 oster struct raid_softc *rs;
1571 1.335 mlelstv struct dk_softc *dksc;
1572 1.367 christos RF_Config_t *k_cfg;
1573 1.42 oster RF_Raid_t *raidPtr;
1574 1.41 oster RF_AccTotals_t *totals;
1575 1.367 christos RF_SingleComponent_t component;
1576 1.371 oster RF_DeviceConfig_t *d_cfg, *ucfgp;
1577 1.11 oster int retcode = 0;
1578 1.11 oster int column;
1579 1.48 oster RF_ComponentLabel_t *clabel;
1580 1.353 mrg int d;
1581 1.1 oster
1582 1.327 pgoyette if ((rs = raidget(unit, false)) == NULL)
1583 1.300 christos return ENXIO;
1584 1.366 christos
1585 1.335 mlelstv dksc = &rs->sc_dksc;
1586 1.300 christos raidPtr = &rs->sc_r;
1587 1.1 oster
1588 1.276 mrg db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
1589 1.366 christos (int) DISKPART(dev), (int) unit, cmd));
1590 1.1 oster
1591 1.410 oster /* Only CONFIGURE and RESCAN can be done without the RAID being initialized. */
1592 1.410 oster switch (cmd) {
1593 1.410 oster case RAIDFRAME_CONFIGURE:
1594 1.410 oster case RAIDFRAME_RESCAN:
1595 1.410 oster break;
1596 1.410 oster default:
1597 1.410 oster if (!rf_inited(rs))
1598 1.410 oster return ENXIO;
1599 1.410 oster }
1600 1.9 oster
1601 1.358 pgoyette switch (cmd) {
1602 1.1 oster /* configure the system */
1603 1.1 oster case RAIDFRAME_CONFIGURE:
1604 1.367 christos if ((retcode = rf_getConfiguration(rs, data, &k_cfg)) != 0)
1605 1.367 christos return retcode;
1606 1.367 christos return rf_construct(rs, k_cfg);
1607 1.9 oster
1608 1.9 oster /* shutdown the system */
1609 1.1 oster case RAIDFRAME_SHUTDOWN:
1610 1.9 oster
1611 1.266 dyoung part = DISKPART(dev);
1612 1.266 dyoung pmask = (1 << part);
1613 1.266 dyoung
1614 1.367 christos if ((retcode = raidlock(rs)) != 0)
1615 1.367 christos return retcode;
1616 1.1 oster
1617 1.337 mlelstv if (DK_BUSY(dksc, pmask) ||
1618 1.337 mlelstv raidPtr->recon_in_progress != 0 ||
1619 1.337 mlelstv raidPtr->parity_rewrite_in_progress != 0 ||
1620 1.337 mlelstv raidPtr->copyback_in_progress != 0)
1621 1.266 dyoung retcode = EBUSY;
1622 1.266 dyoung else {
1623 1.335 mlelstv /* detach and free on close */
1624 1.266 dyoung rs->sc_flags |= RAIDF_SHUTDOWN;
1625 1.266 dyoung retcode = 0;
1626 1.9 oster }
1627 1.11 oster
1628 1.266 dyoung raidunlock(rs);
1629 1.1 oster
1630 1.367 christos return retcode;
1631 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1632 1.353 mrg return rf_get_component_label(raidPtr, data);
1633 1.11 oster
1634 1.367 christos #if RF_DISABLED
1635 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1636 1.367 christos return rf_set_component_label(raidPtr, data);
1637 1.367 christos #endif
1638 1.11 oster
1639 1.367 christos case RAIDFRAME_INIT_LABELS:
1640 1.367 christos return rf_init_component_label(raidPtr, data);
1641 1.12 oster
1642 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1643 1.78 minoura d = rf_set_autoconfig(raidPtr, *(int *) data);
1644 1.186 perry printf("raid%d: New autoconfig value is: %d\n",
1645 1.123 oster raidPtr->raidid, d);
1646 1.78 minoura *(int *) data = d;
1647 1.367 christos return retcode;
1648 1.48 oster
1649 1.48 oster case RAIDFRAME_SET_ROOT:
1650 1.78 minoura d = rf_set_rootpartition(raidPtr, *(int *) data);
1651 1.186 perry printf("raid%d: New rootpartition value is: %d\n",
1652 1.123 oster raidPtr->raidid, d);
1653 1.78 minoura *(int *) data = d;
1654 1.367 christos return retcode;
1655 1.9 oster
1656 1.1 oster /* initialize all parity */
1657 1.1 oster case RAIDFRAME_REWRITEPARITY:
1658 1.1 oster
1659 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1660 1.17 oster /* Parity for RAID 0 is trivially correct */
1661 1.42 oster raidPtr->parity_good = RF_RAID_CLEAN;
1662 1.367 christos return 0;
1663 1.17 oster }
1664 1.186 perry
1665 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1666 1.37 oster /* Re-write is already in progress! */
1667 1.367 christos return EINVAL;
1668 1.37 oster }
1669 1.27 oster
1670 1.367 christos return RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1671 1.367 christos rf_RewriteParityThread, raidPtr,"raid_parity");
1672 1.11 oster
1673 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1674 1.408 mrg rf_copy_single_component(&component, data);
1675 1.367 christos return rf_add_hot_spare(raidPtr, &component);
1676 1.11 oster
1677 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1678 1.367 christos return retcode;
1679 1.73 oster
1680 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1681 1.408 mrg rf_copy_single_component(&component, data);
1682 1.367 christos return rf_delete_component(raidPtr, &component);
1683 1.73 oster
1684 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1685 1.408 mrg rf_copy_single_component(&component, data);
1686 1.367 christos return rf_incorporate_hot_spare(raidPtr, &component);
1687 1.11 oster
1688 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1689 1.367 christos return rf_rebuild_in_place(raidPtr, data);
1690 1.398 oster
1691 1.366 christos case RAIDFRAME_GET_INFO:
1692 1.371 oster ucfgp = *(RF_DeviceConfig_t **)data;
1693 1.374 christos d_cfg = RF_Malloc(sizeof(*d_cfg));
1694 1.41 oster if (d_cfg == NULL)
1695 1.366 christos return ENOMEM;
1696 1.353 mrg retcode = rf_get_info(raidPtr, d_cfg);
1697 1.353 mrg if (retcode == 0) {
1698 1.371 oster retcode = copyout(d_cfg, ucfgp, sizeof(*d_cfg));
1699 1.41 oster }
1700 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1701 1.366 christos return retcode;
1702 1.9 oster
1703 1.22 oster case RAIDFRAME_CHECK_PARITY:
1704 1.42 oster *(int *) data = raidPtr->parity_good;
1705 1.367 christos return 0;
1706 1.41 oster
1707 1.269 jld case RAIDFRAME_PARITYMAP_STATUS:
1708 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1709 1.273 jld return EINVAL;
1710 1.367 christos rf_paritymap_status(raidPtr->parity_map, data);
1711 1.269 jld return 0;
1712 1.269 jld
1713 1.269 jld case RAIDFRAME_PARITYMAP_SET_PARAMS:
1714 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1715 1.273 jld return EINVAL;
1716 1.269 jld if (raidPtr->parity_map == NULL)
1717 1.269 jld return ENOENT; /* ??? */
1718 1.367 christos if (rf_paritymap_set_params(raidPtr->parity_map, data, 1) != 0)
1719 1.269 jld return EINVAL;
1720 1.269 jld return 0;
1721 1.269 jld
1722 1.269 jld case RAIDFRAME_PARITYMAP_GET_DISABLE:
1723 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1724 1.273 jld return EINVAL;
1725 1.269 jld *(int *) data = rf_paritymap_get_disable(raidPtr);
1726 1.269 jld return 0;
1727 1.269 jld
1728 1.269 jld case RAIDFRAME_PARITYMAP_SET_DISABLE:
1729 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1730 1.273 jld return EINVAL;
1731 1.269 jld rf_paritymap_set_disable(raidPtr, *(int *)data);
1732 1.269 jld /* XXX should errors be passed up? */
1733 1.269 jld return 0;
1734 1.269 jld
1735 1.398 oster case RAIDFRAME_RESCAN:
1736 1.398 oster return rf_rescan();
1737 1.398 oster
1738 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1739 1.108 thorpej memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1740 1.367 christos return 0;
1741 1.9 oster
1742 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1743 1.41 oster totals = (RF_AccTotals_t *) data;
1744 1.42 oster *totals = raidPtr->acc_totals;
1745 1.366 christos return 0;
1746 1.9 oster
1747 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1748 1.42 oster raidPtr->keep_acc_totals = *(int *)data;
1749 1.366 christos return 0;
1750 1.9 oster
1751 1.1 oster case RAIDFRAME_GET_SIZE:
1752 1.42 oster *(int *) data = raidPtr->totalSectors;
1753 1.366 christos return 0;
1754 1.1 oster
1755 1.1 oster case RAIDFRAME_FAIL_DISK:
1756 1.366 christos return rf_fail_disk(raidPtr, data);
1757 1.9 oster
1758 1.9 oster /* invoke a copyback operation after recon on whatever disk
1759 1.9 oster * needs it, if any */
1760 1.9 oster case RAIDFRAME_COPYBACK:
1761 1.24 oster
1762 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1763 1.24 oster /* This makes no sense on a RAID 0!! */
1764 1.367 christos return EINVAL;
1765 1.24 oster }
1766 1.24 oster
1767 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1768 1.37 oster /* Copyback is already in progress! */
1769 1.367 christos return EINVAL;
1770 1.37 oster }
1771 1.27 oster
1772 1.367 christos return RF_CREATE_THREAD(raidPtr->copyback_thread,
1773 1.367 christos rf_CopybackThread, raidPtr, "raid_copyback");
1774 1.9 oster
1775 1.1 oster /* return the percentage completion of reconstruction */
1776 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1777 1.367 christos return rf_check_recon_status(raidPtr, data);
1778 1.367 christos
1779 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1780 1.353 mrg rf_check_recon_status_ext(raidPtr, data);
1781 1.367 christos return 0;
1782 1.9 oster
1783 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1784 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1785 1.80 oster /* This makes no sense on a RAID 0, so tell the
1786 1.80 oster user it's done. */
1787 1.80 oster *(int *) data = 100;
1788 1.367 christos return 0;
1789 1.37 oster }
1790 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1791 1.186 perry *(int *) data = 100 *
1792 1.186 perry raidPtr->parity_rewrite_stripes_done /
1793 1.83 oster raidPtr->Layout.numStripe;
1794 1.37 oster } else {
1795 1.37 oster *(int *) data = 100;
1796 1.37 oster }
1797 1.367 christos return 0;
1798 1.37 oster
1799 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1800 1.353 mrg rf_check_parityrewrite_status_ext(raidPtr, data);
1801 1.367 christos return 0;
1802 1.83 oster
1803 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1804 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1805 1.37 oster /* This makes no sense on a RAID 0 */
1806 1.83 oster *(int *) data = 100;
1807 1.367 christos return 0;
1808 1.37 oster }
1809 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1810 1.42 oster *(int *) data = 100 * raidPtr->copyback_stripes_done /
1811 1.42 oster raidPtr->Layout.numStripe;
1812 1.37 oster } else {
1813 1.37 oster *(int *) data = 100;
1814 1.37 oster }
1815 1.367 christos return 0;
1816 1.37 oster
1817 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1818 1.353 mrg rf_check_copyback_status_ext(raidPtr, data);
1819 1.353 mrg return 0;
1820 1.37 oster
1821 1.341 christos case RAIDFRAME_SET_LAST_UNIT:
1822 1.341 christos for (column = 0; column < raidPtr->numCol; column++)
1823 1.341 christos if (raidPtr->Disks[column].status != rf_ds_optimal)
1824 1.341 christos return EBUSY;
1825 1.341 christos
1826 1.341 christos for (column = 0; column < raidPtr->numCol; column++) {
1827 1.341 christos clabel = raidget_component_label(raidPtr, column);
1828 1.341 christos clabel->last_unit = *(int *)data;
1829 1.341 christos raidflush_component_label(raidPtr, column);
1830 1.341 christos }
1831 1.341 christos rs->sc_cflags |= RAIDF_UNIT_CHANGED;
1832 1.341 christos return 0;
1833 1.341 christos
1834 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1835 1.9 oster * need a spare table. this ioctl does not return until a
1836 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1837 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1838 1.9 oster * -- I should either compute the spare table in the kernel,
1839 1.9 oster * or have a different -- XXX XXX -- interface (a different
1840 1.42 oster * character device) for delivering the table -- XXX */
1841 1.367 christos #if RF_DISABLED
1842 1.1 oster case RAIDFRAME_SPARET_WAIT:
1843 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1844 1.9 oster while (!rf_sparet_wait_queue)
1845 1.287 mrg rf_wait_cond2(rf_sparet_wait_cv, rf_sparet_wait_mutex);
1846 1.367 christos RF_SparetWait_t *waitreq = rf_sparet_wait_queue;
1847 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1848 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1849 1.9 oster
1850 1.42 oster /* structure assignment */
1851 1.186 perry *((RF_SparetWait_t *) data) = *waitreq;
1852 1.9 oster
1853 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1854 1.367 christos return 0;
1855 1.9 oster
1856 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1857 1.9 oster * code in it that will cause the dameon to exit */
1858 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1859 1.374 christos waitreq = RF_Malloc(sizeof(*waitreq));
1860 1.1 oster waitreq->fcol = -1;
1861 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1862 1.1 oster waitreq->next = rf_sparet_wait_queue;
1863 1.1 oster rf_sparet_wait_queue = waitreq;
1864 1.367 christos rf_broadcast_cond2(rf_sparet_wait_cv);
1865 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1866 1.367 christos return 0;
1867 1.1 oster
1868 1.9 oster /* used by the spare table daemon to deliver a spare table
1869 1.9 oster * into the kernel */
1870 1.1 oster case RAIDFRAME_SEND_SPARET:
1871 1.9 oster
1872 1.1 oster /* install the spare table */
1873 1.42 oster retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1874 1.9 oster
1875 1.9 oster /* respond to the requestor. the return status of the spare
1876 1.9 oster * table installation is passed in the "fcol" field */
1877 1.374 christos waitred = RF_Malloc(sizeof(*waitreq));
1878 1.1 oster waitreq->fcol = retcode;
1879 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1880 1.1 oster waitreq->next = rf_sparet_resp_queue;
1881 1.1 oster rf_sparet_resp_queue = waitreq;
1882 1.287 mrg rf_broadcast_cond2(rf_sparet_resp_cv);
1883 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1884 1.9 oster
1885 1.367 christos return retcode;
1886 1.367 christos #endif
1887 1.367 christos default:
1888 1.372 christos /*
1889 1.372 christos * Don't bother trying to load compat modules
1890 1.372 christos * if it is not our ioctl. This is more efficient
1891 1.372 christos * and makes rump tests not depend on compat code
1892 1.372 christos */
1893 1.372 christos if (IOCGROUP(cmd) != 'r')
1894 1.372 christos break;
1895 1.367 christos #ifdef _LP64
1896 1.367 christos if ((l->l_proc->p_flag & PK_32) != 0) {
1897 1.367 christos module_autoload("compat_netbsd32_raid",
1898 1.367 christos MODULE_CLASS_EXEC);
1899 1.376 pgoyette MODULE_HOOK_CALL(raidframe_netbsd32_ioctl_hook,
1900 1.367 christos (rs, cmd, data), enosys(), retcode);
1901 1.367 christos if (retcode != EPASSTHROUGH)
1902 1.367 christos return retcode;
1903 1.367 christos }
1904 1.1 oster #endif
1905 1.367 christos module_autoload("compat_raid_80", MODULE_CLASS_EXEC);
1906 1.376 pgoyette MODULE_HOOK_CALL(raidframe_ioctl_80_hook,
1907 1.367 christos (rs, cmd, data), enosys(), retcode);
1908 1.367 christos if (retcode != EPASSTHROUGH)
1909 1.367 christos return retcode;
1910 1.1 oster
1911 1.367 christos module_autoload("compat_raid_50", MODULE_CLASS_EXEC);
1912 1.376 pgoyette MODULE_HOOK_CALL(raidframe_ioctl_50_hook,
1913 1.367 christos (rs, cmd, data), enosys(), retcode);
1914 1.367 christos if (retcode != EPASSTHROUGH)
1915 1.367 christos return retcode;
1916 1.36 oster break; /* fall through to the os-specific code below */
1917 1.1 oster
1918 1.1 oster }
1919 1.9 oster
1920 1.42 oster if (!raidPtr->valid)
1921 1.389 skrll return EINVAL;
1922 1.9 oster
1923 1.1 oster /*
1924 1.1 oster * Add support for "regular" device ioctls here.
1925 1.1 oster */
1926 1.385 riastrad
1927 1.1 oster switch (cmd) {
1928 1.348 jdolecek case DIOCGCACHE:
1929 1.348 jdolecek retcode = rf_get_component_caches(raidPtr, (int *)data);
1930 1.348 jdolecek break;
1931 1.348 jdolecek
1932 1.252 oster case DIOCCACHESYNC:
1933 1.390 christos retcode = rf_sync_component_caches(raidPtr, *(int *)data);
1934 1.347 jdolecek break;
1935 1.298 buhrow
1936 1.1 oster default:
1937 1.346 jdolecek retcode = dk_ioctl(dksc, dev, cmd, data, flag, l);
1938 1.347 jdolecek break;
1939 1.1 oster }
1940 1.346 jdolecek
1941 1.389 skrll return retcode;
1942 1.1 oster
1943 1.1 oster }
1944 1.1 oster
1945 1.1 oster
1946 1.9 oster /* raidinit -- complete the rest of the initialization for the
1947 1.1 oster RAIDframe device. */
1948 1.1 oster
1949 1.1 oster
1950 1.59 oster static void
1951 1.300 christos raidinit(struct raid_softc *rs)
1952 1.1 oster {
1953 1.262 cegger cfdata_t cf;
1954 1.335 mlelstv unsigned int unit;
1955 1.335 mlelstv struct dk_softc *dksc = &rs->sc_dksc;
1956 1.300 christos RF_Raid_t *raidPtr = &rs->sc_r;
1957 1.335 mlelstv device_t dev;
1958 1.1 oster
1959 1.59 oster unit = raidPtr->raidid;
1960 1.1 oster
1961 1.179 itojun /* XXX doesn't check bounds. */
1962 1.335 mlelstv snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%u", unit);
1963 1.1 oster
1964 1.217 oster /* attach the pseudo device */
1965 1.217 oster cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1966 1.217 oster cf->cf_name = raid_cd.cd_name;
1967 1.217 oster cf->cf_atname = raid_cd.cd_name;
1968 1.217 oster cf->cf_unit = unit;
1969 1.217 oster cf->cf_fstate = FSTATE_STAR;
1970 1.217 oster
1971 1.335 mlelstv dev = config_attach_pseudo(cf);
1972 1.335 mlelstv if (dev == NULL) {
1973 1.217 oster printf("raid%d: config_attach_pseudo failed\n",
1974 1.270 christos raidPtr->raidid);
1975 1.265 pooka free(cf, M_RAIDFRAME);
1976 1.265 pooka return;
1977 1.217 oster }
1978 1.217 oster
1979 1.335 mlelstv /* provide a backpointer to the real softc */
1980 1.335 mlelstv raidsoftc(dev) = rs;
1981 1.335 mlelstv
1982 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1983 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1984 1.9 oster * with disklabels. */
1985 1.335 mlelstv dk_init(dksc, dev, DKTYPE_RAID);
1986 1.335 mlelstv disk_init(&dksc->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1987 1.1 oster
1988 1.1 oster /* XXX There may be a weird interaction here between this, and
1989 1.9 oster * protectedSectors, as used in RAIDframe. */
1990 1.11 oster
1991 1.9 oster rs->sc_size = raidPtr->totalSectors;
1992 1.234 oster
1993 1.335 mlelstv /* Attach dk and disk subsystems */
1994 1.335 mlelstv dk_attach(dksc);
1995 1.335 mlelstv disk_attach(&dksc->sc_dkdev);
1996 1.318 mlelstv rf_set_geometry(rs, raidPtr);
1997 1.318 mlelstv
1998 1.335 mlelstv bufq_alloc(&dksc->sc_bufq, "fcfs", BUFQ_SORT_RAWBLOCK);
1999 1.335 mlelstv
2000 1.335 mlelstv /* mark unit as usuable */
2001 1.335 mlelstv rs->sc_flags |= RAIDF_INITED;
2002 1.234 oster
2003 1.335 mlelstv dkwedge_discover(&dksc->sc_dkdev);
2004 1.1 oster }
2005 1.335 mlelstv
2006 1.150 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
2007 1.1 oster /* wake up the daemon & tell it to get us a spare table
2008 1.1 oster * XXX
2009 1.9 oster * the entries in the queues should be tagged with the raidPtr
2010 1.186 perry * so that in the extremely rare case that two recons happen at once,
2011 1.11 oster * we know for which device were requesting a spare table
2012 1.1 oster * XXX
2013 1.186 perry *
2014 1.39 oster * XXX This code is not currently used. GO
2015 1.1 oster */
2016 1.186 perry int
2017 1.169 oster rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
2018 1.9 oster {
2019 1.9 oster int retcode;
2020 1.9 oster
2021 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
2022 1.9 oster req->next = rf_sparet_wait_queue;
2023 1.9 oster rf_sparet_wait_queue = req;
2024 1.289 mrg rf_broadcast_cond2(rf_sparet_wait_cv);
2025 1.9 oster
2026 1.9 oster /* mpsleep unlocks the mutex */
2027 1.9 oster while (!rf_sparet_resp_queue) {
2028 1.289 mrg rf_wait_cond2(rf_sparet_resp_cv, rf_sparet_wait_mutex);
2029 1.9 oster }
2030 1.9 oster req = rf_sparet_resp_queue;
2031 1.9 oster rf_sparet_resp_queue = req->next;
2032 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
2033 1.9 oster
2034 1.9 oster retcode = req->fcol;
2035 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
2036 1.9 oster * alloc'd */
2037 1.389 skrll return retcode;
2038 1.1 oster }
2039 1.150 oster #endif
2040 1.39 oster
2041 1.186 perry /* a wrapper around rf_DoAccess that extracts appropriate info from the
2042 1.11 oster * bp & passes it down.
2043 1.1 oster * any calls originating in the kernel must use non-blocking I/O
2044 1.1 oster * do some extra sanity checking to return "appropriate" error values for
2045 1.1 oster * certain conditions (to make some standard utilities work)
2046 1.186 perry *
2047 1.34 oster * Formerly known as: rf_DoAccessKernel
2048 1.1 oster */
2049 1.34 oster void
2050 1.169 oster raidstart(RF_Raid_t *raidPtr)
2051 1.1 oster {
2052 1.1 oster struct raid_softc *rs;
2053 1.335 mlelstv struct dk_softc *dksc;
2054 1.1 oster
2055 1.300 christos rs = raidPtr->softc;
2056 1.335 mlelstv dksc = &rs->sc_dksc;
2057 1.56 oster /* quick check to see if anything has died recently */
2058 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
2059 1.56 oster if (raidPtr->numNewFailures > 0) {
2060 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
2061 1.186 perry rf_update_component_labels(raidPtr,
2062 1.91 oster RF_NORMAL_COMPONENT_UPDATE);
2063 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
2064 1.56 oster raidPtr->numNewFailures--;
2065 1.56 oster }
2066 1.335 mlelstv rf_unlock_mutex2(raidPtr->mutex);
2067 1.56 oster
2068 1.335 mlelstv if ((rs->sc_flags & RAIDF_INITED) == 0) {
2069 1.335 mlelstv printf("raid%d: raidstart not ready\n", raidPtr->raidid);
2070 1.335 mlelstv return;
2071 1.335 mlelstv }
2072 1.34 oster
2073 1.335 mlelstv dk_start(dksc, NULL);
2074 1.335 mlelstv }
2075 1.34 oster
2076 1.335 mlelstv static int
2077 1.335 mlelstv raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp)
2078 1.335 mlelstv {
2079 1.335 mlelstv RF_SectorCount_t num_blocks, pb, sum;
2080 1.335 mlelstv RF_RaidAddr_t raid_addr;
2081 1.335 mlelstv daddr_t blocknum;
2082 1.335 mlelstv int rc;
2083 1.186 perry
2084 1.335 mlelstv rf_lock_mutex2(raidPtr->mutex);
2085 1.335 mlelstv if (raidPtr->openings == 0) {
2086 1.335 mlelstv rf_unlock_mutex2(raidPtr->mutex);
2087 1.335 mlelstv return EAGAIN;
2088 1.335 mlelstv }
2089 1.335 mlelstv rf_unlock_mutex2(raidPtr->mutex);
2090 1.186 perry
2091 1.335 mlelstv blocknum = bp->b_rawblkno;
2092 1.186 perry
2093 1.335 mlelstv db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
2094 1.335 mlelstv (int) blocknum));
2095 1.1 oster
2096 1.335 mlelstv db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
2097 1.335 mlelstv db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
2098 1.1 oster
2099 1.335 mlelstv /* *THIS* is where we adjust what block we're going to...
2100 1.335 mlelstv * but DO NOT TOUCH bp->b_blkno!!! */
2101 1.335 mlelstv raid_addr = blocknum;
2102 1.335 mlelstv
2103 1.335 mlelstv num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
2104 1.335 mlelstv pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
2105 1.335 mlelstv sum = raid_addr + num_blocks + pb;
2106 1.335 mlelstv if (1 || rf_debugKernelAccess) {
2107 1.335 mlelstv db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2108 1.335 mlelstv (int) raid_addr, (int) sum, (int) num_blocks,
2109 1.335 mlelstv (int) pb, (int) bp->b_resid));
2110 1.335 mlelstv }
2111 1.335 mlelstv if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2112 1.335 mlelstv || (sum < num_blocks) || (sum < pb)) {
2113 1.335 mlelstv rc = ENOSPC;
2114 1.335 mlelstv goto done;
2115 1.335 mlelstv }
2116 1.335 mlelstv /*
2117 1.335 mlelstv * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2118 1.335 mlelstv */
2119 1.186 perry
2120 1.335 mlelstv if (bp->b_bcount & raidPtr->sectorMask) {
2121 1.335 mlelstv rc = ENOSPC;
2122 1.335 mlelstv goto done;
2123 1.335 mlelstv }
2124 1.335 mlelstv db1_printf(("Calling DoAccess..\n"));
2125 1.99 oster
2126 1.20 oster
2127 1.335 mlelstv rf_lock_mutex2(raidPtr->mutex);
2128 1.335 mlelstv raidPtr->openings--;
2129 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
2130 1.20 oster
2131 1.335 mlelstv /* don't ever condition on bp->b_flags & B_WRITE.
2132 1.335 mlelstv * always condition on B_READ instead */
2133 1.7 explorer
2134 1.335 mlelstv rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2135 1.335 mlelstv RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2136 1.396 oster raid_addr, num_blocks,
2137 1.335 mlelstv bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2138 1.335 mlelstv
2139 1.335 mlelstv done:
2140 1.335 mlelstv return rc;
2141 1.335 mlelstv }
2142 1.7 explorer
2143 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2144 1.1 oster
2145 1.186 perry int
2146 1.169 oster rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2147 1.1 oster {
2148 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2149 1.1 oster struct buf *bp;
2150 1.9 oster
2151 1.1 oster req->queue = queue;
2152 1.1 oster bp = req->bp;
2153 1.1 oster
2154 1.1 oster switch (req->type) {
2155 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2156 1.1 oster /* XXX need to do something extra here.. */
2157 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
2158 1.9 oster * and I'd like folks to report it... GO */
2159 1.391 mrg printf("%s: WAKEUP CALLED\n", __func__);
2160 1.1 oster queue->numOutstanding++;
2161 1.1 oster
2162 1.197 oster bp->b_flags = 0;
2163 1.207 simonb bp->b_private = req;
2164 1.1 oster
2165 1.194 oster KernelWakeupFunc(bp);
2166 1.1 oster break;
2167 1.9 oster
2168 1.1 oster case RF_IO_TYPE_READ:
2169 1.1 oster case RF_IO_TYPE_WRITE:
2170 1.175 oster #if RF_ACC_TRACE > 0
2171 1.1 oster if (req->tracerec) {
2172 1.1 oster RF_ETIMER_START(req->tracerec->timer);
2173 1.1 oster }
2174 1.175 oster #endif
2175 1.194 oster InitBP(bp, queue->rf_cinfo->ci_vp,
2176 1.197 oster op, queue->rf_cinfo->ci_dev,
2177 1.9 oster req->sectorOffset, req->numSector,
2178 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
2179 1.384 jdolecek queue->raidPtr->logBytesPerSector);
2180 1.1 oster
2181 1.1 oster if (rf_debugKernelAccess) {
2182 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
2183 1.9 oster (long) bp->b_blkno));
2184 1.1 oster }
2185 1.1 oster queue->numOutstanding++;
2186 1.1 oster queue->last_deq_sector = req->sectorOffset;
2187 1.9 oster /* acc wouldn't have been let in if there were any pending
2188 1.9 oster * reqs at any other priority */
2189 1.1 oster queue->curPriority = req->priority;
2190 1.1 oster
2191 1.166 oster db1_printf(("Going for %c to unit %d col %d\n",
2192 1.186 perry req->type, queue->raidPtr->raidid,
2193 1.166 oster queue->col));
2194 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
2195 1.9 oster (int) req->sectorOffset, (int) req->numSector,
2196 1.9 oster (int) (req->numSector <<
2197 1.9 oster queue->raidPtr->logBytesPerSector),
2198 1.9 oster (int) queue->raidPtr->logBytesPerSector));
2199 1.256 oster
2200 1.256 oster /*
2201 1.385 riastrad * XXX: drop lock here since this can block at
2202 1.256 oster * least with backing SCSI devices. Retake it
2203 1.256 oster * to minimize fuss with calling interfaces.
2204 1.256 oster */
2205 1.256 oster
2206 1.256 oster RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2207 1.247 oster bdev_strategy(bp);
2208 1.256 oster RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2209 1.1 oster break;
2210 1.9 oster
2211 1.1 oster default:
2212 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
2213 1.1 oster }
2214 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
2215 1.134 oster
2216 1.389 skrll return 0;
2217 1.1 oster }
2218 1.9 oster /* this is the callback function associated with a I/O invoked from
2219 1.1 oster kernel code.
2220 1.1 oster */
2221 1.186 perry static void
2222 1.194 oster KernelWakeupFunc(struct buf *bp)
2223 1.9 oster {
2224 1.9 oster RF_DiskQueueData_t *req = NULL;
2225 1.9 oster RF_DiskQueue_t *queue;
2226 1.9 oster
2227 1.9 oster db1_printf(("recovering the request queue:\n"));
2228 1.285 mrg
2229 1.207 simonb req = bp->b_private;
2230 1.1 oster
2231 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
2232 1.1 oster
2233 1.286 mrg rf_lock_mutex2(queue->raidPtr->iodone_lock);
2234 1.285 mrg
2235 1.175 oster #if RF_ACC_TRACE > 0
2236 1.9 oster if (req->tracerec) {
2237 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
2238 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
2239 1.288 mrg rf_lock_mutex2(rf_tracing_mutex);
2240 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2241 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2242 1.9 oster req->tracerec->num_phys_ios++;
2243 1.288 mrg rf_unlock_mutex2(rf_tracing_mutex);
2244 1.9 oster }
2245 1.175 oster #endif
2246 1.1 oster
2247 1.230 ad /* XXX Ok, let's get aggressive... If b_error is set, let's go
2248 1.9 oster * ballistic, and mark the component as hosed... */
2249 1.36 oster
2250 1.230 ad if (bp->b_error != 0) {
2251 1.9 oster /* Mark the disk as dead */
2252 1.9 oster /* but only mark it once... */
2253 1.186 perry /* and only if it wouldn't leave this RAID set
2254 1.183 oster completely broken */
2255 1.193 oster if (((queue->raidPtr->Disks[queue->col].status ==
2256 1.193 oster rf_ds_optimal) ||
2257 1.193 oster (queue->raidPtr->Disks[queue->col].status ==
2258 1.385 riastrad rf_ds_used_spare)) &&
2259 1.193 oster (queue->raidPtr->numFailures <
2260 1.204 simonb queue->raidPtr->Layout.map->faultsTolerated)) {
2261 1.322 prlw1 printf("raid%d: IO Error (%d). Marking %s as failed.\n",
2262 1.136 oster queue->raidPtr->raidid,
2263 1.322 prlw1 bp->b_error,
2264 1.166 oster queue->raidPtr->Disks[queue->col].devname);
2265 1.166 oster queue->raidPtr->Disks[queue->col].status =
2266 1.9 oster rf_ds_failed;
2267 1.166 oster queue->raidPtr->status = rf_rs_degraded;
2268 1.9 oster queue->raidPtr->numFailures++;
2269 1.56 oster queue->raidPtr->numNewFailures++;
2270 1.9 oster } else { /* Disk is already dead... */
2271 1.9 oster /* printf("Disk already marked as dead!\n"); */
2272 1.9 oster }
2273 1.4 oster
2274 1.9 oster }
2275 1.4 oster
2276 1.143 oster /* Fill in the error value */
2277 1.230 ad req->error = bp->b_error;
2278 1.143 oster
2279 1.143 oster /* Drop this one on the "finished" queue... */
2280 1.143 oster TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2281 1.143 oster
2282 1.143 oster /* Let the raidio thread know there is work to be done. */
2283 1.286 mrg rf_signal_cond2(queue->raidPtr->iodone_cv);
2284 1.143 oster
2285 1.286 mrg rf_unlock_mutex2(queue->raidPtr->iodone_lock);
2286 1.1 oster }
2287 1.1 oster
2288 1.1 oster
2289 1.1 oster /*
2290 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
2291 1.1 oster */
2292 1.186 perry static void
2293 1.169 oster InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2294 1.225 christos RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2295 1.384 jdolecek void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector)
2296 1.9 oster {
2297 1.384 jdolecek bp->b_flags = rw_flag | (bp->b_flags & rf_b_pass);
2298 1.242 ad bp->b_oflags = 0;
2299 1.242 ad bp->b_cflags = 0;
2300 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
2301 1.9 oster bp->b_bufsize = bp->b_bcount;
2302 1.9 oster bp->b_error = 0;
2303 1.9 oster bp->b_dev = dev;
2304 1.187 christos bp->b_data = bf;
2305 1.275 mrg bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
2306 1.9 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2307 1.1 oster if (bp->b_bcount == 0) {
2308 1.141 provos panic("bp->b_bcount is zero in InitBP!!");
2309 1.1 oster }
2310 1.9 oster bp->b_iodone = cbFunc;
2311 1.207 simonb bp->b_private = cbArg;
2312 1.1 oster }
2313 1.1 oster
2314 1.1 oster /*
2315 1.1 oster * Wait interruptibly for an exclusive lock.
2316 1.1 oster *
2317 1.1 oster * XXX
2318 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
2319 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
2320 1.1 oster */
2321 1.1 oster static int
2322 1.169 oster raidlock(struct raid_softc *rs)
2323 1.1 oster {
2324 1.9 oster int error;
2325 1.1 oster
2326 1.335 mlelstv error = 0;
2327 1.327 pgoyette mutex_enter(&rs->sc_mutex);
2328 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2329 1.1 oster rs->sc_flags |= RAIDF_WANTED;
2330 1.327 pgoyette error = cv_wait_sig(&rs->sc_cv, &rs->sc_mutex);
2331 1.327 pgoyette if (error != 0)
2332 1.335 mlelstv goto done;
2333 1.1 oster }
2334 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
2335 1.335 mlelstv done:
2336 1.327 pgoyette mutex_exit(&rs->sc_mutex);
2337 1.389 skrll return error;
2338 1.1 oster }
2339 1.1 oster /*
2340 1.1 oster * Unlock and wake up any waiters.
2341 1.1 oster */
2342 1.1 oster static void
2343 1.169 oster raidunlock(struct raid_softc *rs)
2344 1.1 oster {
2345 1.1 oster
2346 1.327 pgoyette mutex_enter(&rs->sc_mutex);
2347 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
2348 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2349 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
2350 1.327 pgoyette cv_broadcast(&rs->sc_cv);
2351 1.1 oster }
2352 1.327 pgoyette mutex_exit(&rs->sc_mutex);
2353 1.11 oster }
2354 1.186 perry
2355 1.11 oster
2356 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2357 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2358 1.269 jld #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2359 1.11 oster
2360 1.276 mrg static daddr_t
2361 1.276 mrg rf_component_info_offset(void)
2362 1.276 mrg {
2363 1.276 mrg
2364 1.276 mrg return RF_COMPONENT_INFO_OFFSET;
2365 1.276 mrg }
2366 1.276 mrg
2367 1.276 mrg static daddr_t
2368 1.276 mrg rf_component_info_size(unsigned secsize)
2369 1.276 mrg {
2370 1.276 mrg daddr_t info_size;
2371 1.276 mrg
2372 1.276 mrg KASSERT(secsize);
2373 1.276 mrg if (secsize > RF_COMPONENT_INFO_SIZE)
2374 1.276 mrg info_size = secsize;
2375 1.276 mrg else
2376 1.276 mrg info_size = RF_COMPONENT_INFO_SIZE;
2377 1.276 mrg
2378 1.276 mrg return info_size;
2379 1.276 mrg }
2380 1.276 mrg
2381 1.276 mrg static daddr_t
2382 1.276 mrg rf_parity_map_offset(RF_Raid_t *raidPtr)
2383 1.276 mrg {
2384 1.276 mrg daddr_t map_offset;
2385 1.276 mrg
2386 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2387 1.276 mrg if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
2388 1.276 mrg map_offset = raidPtr->bytesPerSector;
2389 1.276 mrg else
2390 1.276 mrg map_offset = RF_COMPONENT_INFO_SIZE;
2391 1.276 mrg map_offset += rf_component_info_offset();
2392 1.276 mrg
2393 1.276 mrg return map_offset;
2394 1.276 mrg }
2395 1.276 mrg
2396 1.276 mrg static daddr_t
2397 1.276 mrg rf_parity_map_size(RF_Raid_t *raidPtr)
2398 1.276 mrg {
2399 1.276 mrg daddr_t map_size;
2400 1.276 mrg
2401 1.276 mrg if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
2402 1.276 mrg map_size = raidPtr->bytesPerSector;
2403 1.276 mrg else
2404 1.276 mrg map_size = RF_PARITY_MAP_SIZE;
2405 1.276 mrg
2406 1.276 mrg return map_size;
2407 1.276 mrg }
2408 1.276 mrg
2409 1.186 perry int
2410 1.269 jld raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2411 1.12 oster {
2412 1.269 jld RF_ComponentLabel_t *clabel;
2413 1.269 jld
2414 1.269 jld clabel = raidget_component_label(raidPtr, col);
2415 1.269 jld clabel->clean = RF_RAID_CLEAN;
2416 1.269 jld raidflush_component_label(raidPtr, col);
2417 1.12 oster return(0);
2418 1.12 oster }
2419 1.12 oster
2420 1.12 oster
2421 1.186 perry int
2422 1.269 jld raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2423 1.11 oster {
2424 1.269 jld RF_ComponentLabel_t *clabel;
2425 1.269 jld
2426 1.269 jld clabel = raidget_component_label(raidPtr, col);
2427 1.269 jld clabel->clean = RF_RAID_DIRTY;
2428 1.269 jld raidflush_component_label(raidPtr, col);
2429 1.11 oster return(0);
2430 1.11 oster }
2431 1.11 oster
2432 1.11 oster int
2433 1.269 jld raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2434 1.269 jld {
2435 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2436 1.394 mrg
2437 1.276 mrg return raidread_component_label(raidPtr->bytesPerSector,
2438 1.276 mrg raidPtr->Disks[col].dev,
2439 1.385 riastrad raidPtr->raid_cinfo[col].ci_vp,
2440 1.269 jld &raidPtr->raid_cinfo[col].ci_label);
2441 1.269 jld }
2442 1.269 jld
2443 1.269 jld RF_ComponentLabel_t *
2444 1.269 jld raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2445 1.269 jld {
2446 1.269 jld return &raidPtr->raid_cinfo[col].ci_label;
2447 1.269 jld }
2448 1.269 jld
2449 1.269 jld int
2450 1.269 jld raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2451 1.269 jld {
2452 1.269 jld RF_ComponentLabel_t *label;
2453 1.269 jld
2454 1.269 jld label = &raidPtr->raid_cinfo[col].ci_label;
2455 1.269 jld label->mod_counter = raidPtr->mod_counter;
2456 1.269 jld #ifndef RF_NO_PARITY_MAP
2457 1.269 jld label->parity_map_modcount = label->mod_counter;
2458 1.269 jld #endif
2459 1.276 mrg return raidwrite_component_label(raidPtr->bytesPerSector,
2460 1.276 mrg raidPtr->Disks[col].dev,
2461 1.269 jld raidPtr->raid_cinfo[col].ci_vp, label);
2462 1.269 jld }
2463 1.269 jld
2464 1.394 mrg /*
2465 1.394 mrg * Swap the label endianness.
2466 1.394 mrg *
2467 1.394 mrg * Everything in the component label is 4-byte-swapped except the version,
2468 1.394 mrg * which is kept in the byte-swapped version at all times, and indicates
2469 1.394 mrg * for the writer that a swap is necessary.
2470 1.394 mrg *
2471 1.394 mrg * For reads it is expected that out_label == clabel, but writes expect
2472 1.394 mrg * separate labels so only the re-swapped label is written out to disk,
2473 1.394 mrg * leaving the swapped-except-version internally.
2474 1.394 mrg *
2475 1.394 mrg * Only support swapping label version 2.
2476 1.394 mrg */
2477 1.394 mrg static void
2478 1.394 mrg rf_swap_label(RF_ComponentLabel_t *clabel, RF_ComponentLabel_t *out_label)
2479 1.394 mrg {
2480 1.394 mrg int *in, *out, *in_last;
2481 1.394 mrg
2482 1.394 mrg KASSERT(clabel->version == bswap32(RF_COMPONENT_LABEL_VERSION));
2483 1.394 mrg
2484 1.394 mrg /* Don't swap the label, but do copy it. */
2485 1.394 mrg out_label->version = clabel->version;
2486 1.394 mrg
2487 1.394 mrg in = &clabel->serial_number;
2488 1.394 mrg in_last = &clabel->future_use2[42];
2489 1.394 mrg out = &out_label->serial_number;
2490 1.394 mrg
2491 1.394 mrg for (; in < in_last; in++, out++)
2492 1.394 mrg *out = bswap32(*in);
2493 1.394 mrg }
2494 1.269 jld
2495 1.269 jld static int
2496 1.276 mrg raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2497 1.269 jld RF_ComponentLabel_t *clabel)
2498 1.269 jld {
2499 1.394 mrg int error;
2500 1.394 mrg
2501 1.394 mrg error = raidread_component_area(dev, b_vp, clabel,
2502 1.269 jld sizeof(RF_ComponentLabel_t),
2503 1.276 mrg rf_component_info_offset(),
2504 1.276 mrg rf_component_info_size(secsize));
2505 1.394 mrg
2506 1.394 mrg if (error == 0 &&
2507 1.394 mrg clabel->version == bswap32(RF_COMPONENT_LABEL_VERSION)) {
2508 1.394 mrg rf_swap_label(clabel, clabel);
2509 1.394 mrg }
2510 1.394 mrg
2511 1.394 mrg return error;
2512 1.269 jld }
2513 1.269 jld
2514 1.269 jld /* ARGSUSED */
2515 1.269 jld static int
2516 1.269 jld raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2517 1.269 jld size_t msize, daddr_t offset, daddr_t dsize)
2518 1.11 oster {
2519 1.11 oster struct buf *bp;
2520 1.11 oster int error;
2521 1.186 perry
2522 1.11 oster /* XXX should probably ensure that we don't try to do this if
2523 1.186 perry someone has changed rf_protected_sectors. */
2524 1.11 oster
2525 1.98 oster if (b_vp == NULL) {
2526 1.98 oster /* For whatever reason, this component is not valid.
2527 1.98 oster Don't try to read a component label from it. */
2528 1.98 oster return(EINVAL);
2529 1.98 oster }
2530 1.98 oster
2531 1.11 oster /* get a block of the appropriate size... */
2532 1.269 jld bp = geteblk((int)dsize);
2533 1.11 oster bp->b_dev = dev;
2534 1.11 oster
2535 1.11 oster /* get our ducks in a row for the read */
2536 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2537 1.269 jld bp->b_bcount = dsize;
2538 1.100 chs bp->b_flags |= B_READ;
2539 1.269 jld bp->b_resid = dsize;
2540 1.11 oster
2541 1.331 mlelstv bdev_strategy(bp);
2542 1.340 christos error = biowait(bp);
2543 1.11 oster
2544 1.11 oster if (!error) {
2545 1.269 jld memcpy(data, bp->b_data, msize);
2546 1.204 simonb }
2547 1.11 oster
2548 1.233 ad brelse(bp, 0);
2549 1.11 oster return(error);
2550 1.11 oster }
2551 1.269 jld
2552 1.269 jld static int
2553 1.276 mrg raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2554 1.276 mrg RF_ComponentLabel_t *clabel)
2555 1.269 jld {
2556 1.394 mrg RF_ComponentLabel_t *clabel_write = clabel;
2557 1.394 mrg RF_ComponentLabel_t lclabel;
2558 1.394 mrg int error;
2559 1.394 mrg
2560 1.394 mrg if (clabel->version == bswap32(RF_COMPONENT_LABEL_VERSION)) {
2561 1.394 mrg clabel_write = &lclabel;
2562 1.394 mrg rf_swap_label(clabel, clabel_write);
2563 1.394 mrg }
2564 1.394 mrg error = raidwrite_component_area(dev, b_vp, clabel_write,
2565 1.269 jld sizeof(RF_ComponentLabel_t),
2566 1.276 mrg rf_component_info_offset(),
2567 1.276 mrg rf_component_info_size(secsize), 0);
2568 1.394 mrg
2569 1.394 mrg return error;
2570 1.269 jld }
2571 1.269 jld
2572 1.11 oster /* ARGSUSED */
2573 1.269 jld static int
2574 1.385 riastrad raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2575 1.269 jld size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2576 1.11 oster {
2577 1.11 oster struct buf *bp;
2578 1.11 oster int error;
2579 1.11 oster
2580 1.11 oster /* get a block of the appropriate size... */
2581 1.269 jld bp = geteblk((int)dsize);
2582 1.11 oster bp->b_dev = dev;
2583 1.11 oster
2584 1.11 oster /* get our ducks in a row for the write */
2585 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2586 1.269 jld bp->b_bcount = dsize;
2587 1.269 jld bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2588 1.269 jld bp->b_resid = dsize;
2589 1.11 oster
2590 1.269 jld memset(bp->b_data, 0, dsize);
2591 1.269 jld memcpy(bp->b_data, data, msize);
2592 1.11 oster
2593 1.331 mlelstv bdev_strategy(bp);
2594 1.269 jld if (asyncp)
2595 1.269 jld return 0;
2596 1.340 christos error = biowait(bp);
2597 1.233 ad brelse(bp, 0);
2598 1.11 oster if (error) {
2599 1.48 oster #if 1
2600 1.11 oster printf("Failed to write RAID component info!\n");
2601 1.48 oster #endif
2602 1.11 oster }
2603 1.11 oster
2604 1.11 oster return(error);
2605 1.1 oster }
2606 1.12 oster
2607 1.186 perry void
2608 1.269 jld rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2609 1.269 jld {
2610 1.269 jld int c;
2611 1.269 jld
2612 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2613 1.269 jld /* Skip dead disks. */
2614 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2615 1.269 jld continue;
2616 1.269 jld /* XXXjld: what if an error occurs here? */
2617 1.269 jld raidwrite_component_area(raidPtr->Disks[c].dev,
2618 1.269 jld raidPtr->raid_cinfo[c].ci_vp, map,
2619 1.269 jld RF_PARITYMAP_NBYTE,
2620 1.276 mrg rf_parity_map_offset(raidPtr),
2621 1.276 mrg rf_parity_map_size(raidPtr), 0);
2622 1.269 jld }
2623 1.269 jld }
2624 1.269 jld
2625 1.269 jld void
2626 1.269 jld rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2627 1.269 jld {
2628 1.269 jld struct rf_paritymap_ondisk tmp;
2629 1.272 oster int c,first;
2630 1.269 jld
2631 1.272 oster first=1;
2632 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2633 1.269 jld /* Skip dead disks. */
2634 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2635 1.269 jld continue;
2636 1.269 jld raidread_component_area(raidPtr->Disks[c].dev,
2637 1.269 jld raidPtr->raid_cinfo[c].ci_vp, &tmp,
2638 1.269 jld RF_PARITYMAP_NBYTE,
2639 1.276 mrg rf_parity_map_offset(raidPtr),
2640 1.276 mrg rf_parity_map_size(raidPtr));
2641 1.272 oster if (first) {
2642 1.269 jld memcpy(map, &tmp, sizeof(*map));
2643 1.272 oster first = 0;
2644 1.269 jld } else {
2645 1.269 jld rf_paritymap_merge(map, &tmp);
2646 1.269 jld }
2647 1.269 jld }
2648 1.269 jld }
2649 1.269 jld
2650 1.269 jld void
2651 1.169 oster rf_markalldirty(RF_Raid_t *raidPtr)
2652 1.12 oster {
2653 1.269 jld RF_ComponentLabel_t *clabel;
2654 1.146 oster int sparecol;
2655 1.166 oster int c;
2656 1.166 oster int j;
2657 1.166 oster int scol = -1;
2658 1.12 oster
2659 1.12 oster raidPtr->mod_counter++;
2660 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2661 1.166 oster /* we don't want to touch (at all) a disk that has
2662 1.166 oster failed */
2663 1.166 oster if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2664 1.269 jld clabel = raidget_component_label(raidPtr, c);
2665 1.269 jld if (clabel->status == rf_ds_spared) {
2666 1.186 perry /* XXX do something special...
2667 1.186 perry but whatever you do, don't
2668 1.166 oster try to access it!! */
2669 1.166 oster } else {
2670 1.269 jld raidmarkdirty(raidPtr, c);
2671 1.12 oster }
2672 1.166 oster }
2673 1.186 perry }
2674 1.146 oster
2675 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2676 1.12 oster sparecol = raidPtr->numCol + c;
2677 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2678 1.186 perry /*
2679 1.186 perry
2680 1.186 perry we claim this disk is "optimal" if it's
2681 1.186 perry rf_ds_used_spare, as that means it should be
2682 1.186 perry directly substitutable for the disk it replaced.
2683 1.12 oster We note that too...
2684 1.12 oster
2685 1.12 oster */
2686 1.12 oster
2687 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2688 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2689 1.166 oster scol = j;
2690 1.166 oster break;
2691 1.12 oster }
2692 1.12 oster }
2693 1.186 perry
2694 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2695 1.12 oster /* make sure status is noted */
2696 1.146 oster
2697 1.269 jld raid_init_component_label(raidPtr, clabel);
2698 1.146 oster
2699 1.269 jld clabel->row = 0;
2700 1.269 jld clabel->column = scol;
2701 1.146 oster /* Note: we *don't* change status from rf_ds_used_spare
2702 1.146 oster to rf_ds_optimal */
2703 1.146 oster /* clabel.status = rf_ds_optimal; */
2704 1.186 perry
2705 1.269 jld raidmarkdirty(raidPtr, sparecol);
2706 1.12 oster }
2707 1.12 oster }
2708 1.12 oster }
2709 1.12 oster
2710 1.13 oster
2711 1.13 oster void
2712 1.169 oster rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2713 1.13 oster {
2714 1.269 jld RF_ComponentLabel_t *clabel;
2715 1.13 oster int sparecol;
2716 1.166 oster int c;
2717 1.166 oster int j;
2718 1.166 oster int scol;
2719 1.341 christos struct raid_softc *rs = raidPtr->softc;
2720 1.13 oster
2721 1.13 oster scol = -1;
2722 1.13 oster
2723 1.186 perry /* XXX should do extra checks to make sure things really are clean,
2724 1.13 oster rather than blindly setting the clean bit... */
2725 1.13 oster
2726 1.13 oster raidPtr->mod_counter++;
2727 1.13 oster
2728 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2729 1.166 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
2730 1.269 jld clabel = raidget_component_label(raidPtr, c);
2731 1.201 oster /* make sure status is noted */
2732 1.269 jld clabel->status = rf_ds_optimal;
2733 1.385 riastrad
2734 1.214 oster /* note what unit we are configured as */
2735 1.341 christos if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2736 1.341 christos clabel->last_unit = raidPtr->raidid;
2737 1.214 oster
2738 1.269 jld raidflush_component_label(raidPtr, c);
2739 1.166 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2740 1.166 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2741 1.269 jld raidmarkclean(raidPtr, c);
2742 1.91 oster }
2743 1.166 oster }
2744 1.186 perry }
2745 1.166 oster /* else we don't touch it.. */
2746 1.186 perry }
2747 1.63 oster
2748 1.63 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2749 1.63 oster sparecol = raidPtr->numCol + c;
2750 1.110 oster /* Need to ensure that the reconstruct actually completed! */
2751 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2752 1.186 perry /*
2753 1.186 perry
2754 1.186 perry we claim this disk is "optimal" if it's
2755 1.186 perry rf_ds_used_spare, as that means it should be
2756 1.186 perry directly substitutable for the disk it replaced.
2757 1.63 oster We note that too...
2758 1.63 oster
2759 1.63 oster */
2760 1.63 oster
2761 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2762 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2763 1.166 oster scol = j;
2764 1.166 oster break;
2765 1.63 oster }
2766 1.63 oster }
2767 1.186 perry
2768 1.63 oster /* XXX shouldn't *really* need this... */
2769 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2770 1.63 oster /* make sure status is noted */
2771 1.63 oster
2772 1.269 jld raid_init_component_label(raidPtr, clabel);
2773 1.269 jld
2774 1.269 jld clabel->column = scol;
2775 1.269 jld clabel->status = rf_ds_optimal;
2776 1.341 christos if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2777 1.341 christos clabel->last_unit = raidPtr->raidid;
2778 1.63 oster
2779 1.269 jld raidflush_component_label(raidPtr, sparecol);
2780 1.91 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2781 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2782 1.269 jld raidmarkclean(raidPtr, sparecol);
2783 1.13 oster }
2784 1.13 oster }
2785 1.13 oster }
2786 1.13 oster }
2787 1.68 oster }
2788 1.68 oster
2789 1.68 oster void
2790 1.169 oster rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2791 1.69 oster {
2792 1.69 oster
2793 1.69 oster if (vp != NULL) {
2794 1.69 oster if (auto_configured == 1) {
2795 1.96 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2796 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2797 1.69 oster vput(vp);
2798 1.186 perry
2799 1.186 perry } else {
2800 1.244 ad (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2801 1.69 oster }
2802 1.186 perry }
2803 1.69 oster }
2804 1.69 oster
2805 1.69 oster
2806 1.69 oster void
2807 1.169 oster rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2808 1.68 oster {
2809 1.186 perry int r,c;
2810 1.69 oster struct vnode *vp;
2811 1.69 oster int acd;
2812 1.68 oster
2813 1.68 oster
2814 1.68 oster /* We take this opportunity to close the vnodes like we should.. */
2815 1.68 oster
2816 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2817 1.166 oster vp = raidPtr->raid_cinfo[c].ci_vp;
2818 1.166 oster acd = raidPtr->Disks[c].auto_configured;
2819 1.166 oster rf_close_component(raidPtr, vp, acd);
2820 1.166 oster raidPtr->raid_cinfo[c].ci_vp = NULL;
2821 1.166 oster raidPtr->Disks[c].auto_configured = 0;
2822 1.68 oster }
2823 1.166 oster
2824 1.68 oster for (r = 0; r < raidPtr->numSpare; r++) {
2825 1.166 oster vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2826 1.166 oster acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2827 1.69 oster rf_close_component(raidPtr, vp, acd);
2828 1.166 oster raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2829 1.166 oster raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2830 1.68 oster }
2831 1.37 oster }
2832 1.63 oster
2833 1.37 oster
2834 1.393 mrg static void
2835 1.353 mrg rf_ReconThread(struct rf_recon_req_internal *req)
2836 1.37 oster {
2837 1.37 oster int s;
2838 1.37 oster RF_Raid_t *raidPtr;
2839 1.37 oster
2840 1.37 oster s = splbio();
2841 1.37 oster raidPtr = (RF_Raid_t *) req->raidPtr;
2842 1.37 oster raidPtr->recon_in_progress = 1;
2843 1.37 oster
2844 1.398 oster if (req->flags & RF_FDFLAGS_RECON_FORCE) {
2845 1.398 oster raidPtr->forceRecon = 1;
2846 1.398 oster }
2847 1.398 oster
2848 1.166 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2849 1.37 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2850 1.37 oster
2851 1.398 oster if (req->flags & RF_FDFLAGS_RECON_FORCE) {
2852 1.398 oster raidPtr->forceRecon = 0;
2853 1.398 oster }
2854 1.398 oster
2855 1.37 oster RF_Free(req, sizeof(*req));
2856 1.37 oster
2857 1.37 oster raidPtr->recon_in_progress = 0;
2858 1.37 oster splx(s);
2859 1.37 oster
2860 1.37 oster /* That's all... */
2861 1.204 simonb kthread_exit(0); /* does not return */
2862 1.37 oster }
2863 1.37 oster
2864 1.393 mrg static void
2865 1.169 oster rf_RewriteParityThread(RF_Raid_t *raidPtr)
2866 1.37 oster {
2867 1.37 oster int retcode;
2868 1.37 oster int s;
2869 1.37 oster
2870 1.184 oster raidPtr->parity_rewrite_stripes_done = 0;
2871 1.37 oster raidPtr->parity_rewrite_in_progress = 1;
2872 1.37 oster s = splbio();
2873 1.37 oster retcode = rf_RewriteParity(raidPtr);
2874 1.37 oster splx(s);
2875 1.37 oster if (retcode) {
2876 1.279 christos printf("raid%d: Error re-writing parity (%d)!\n",
2877 1.279 christos raidPtr->raidid, retcode);
2878 1.37 oster } else {
2879 1.37 oster /* set the clean bit! If we shutdown correctly,
2880 1.37 oster the clean bit on each component label will get
2881 1.37 oster set */
2882 1.37 oster raidPtr->parity_good = RF_RAID_CLEAN;
2883 1.37 oster }
2884 1.37 oster raidPtr->parity_rewrite_in_progress = 0;
2885 1.85 oster
2886 1.85 oster /* Anyone waiting for us to stop? If so, inform them... */
2887 1.85 oster if (raidPtr->waitShutdown) {
2888 1.357 mrg rf_lock_mutex2(raidPtr->rad_lock);
2889 1.357 mrg cv_broadcast(&raidPtr->parity_rewrite_cv);
2890 1.357 mrg rf_unlock_mutex2(raidPtr->rad_lock);
2891 1.85 oster }
2892 1.37 oster
2893 1.37 oster /* That's all... */
2894 1.204 simonb kthread_exit(0); /* does not return */
2895 1.37 oster }
2896 1.37 oster
2897 1.37 oster
2898 1.393 mrg static void
2899 1.169 oster rf_CopybackThread(RF_Raid_t *raidPtr)
2900 1.37 oster {
2901 1.37 oster int s;
2902 1.37 oster
2903 1.37 oster raidPtr->copyback_in_progress = 1;
2904 1.37 oster s = splbio();
2905 1.37 oster rf_CopybackReconstructedData(raidPtr);
2906 1.37 oster splx(s);
2907 1.37 oster raidPtr->copyback_in_progress = 0;
2908 1.37 oster
2909 1.37 oster /* That's all... */
2910 1.204 simonb kthread_exit(0); /* does not return */
2911 1.37 oster }
2912 1.37 oster
2913 1.37 oster
2914 1.393 mrg static void
2915 1.353 mrg rf_ReconstructInPlaceThread(struct rf_recon_req_internal *req)
2916 1.37 oster {
2917 1.37 oster int s;
2918 1.37 oster RF_Raid_t *raidPtr;
2919 1.186 perry
2920 1.37 oster s = splbio();
2921 1.37 oster raidPtr = req->raidPtr;
2922 1.37 oster raidPtr->recon_in_progress = 1;
2923 1.398 oster
2924 1.398 oster if (req->flags & RF_FDFLAGS_RECON_FORCE) {
2925 1.398 oster raidPtr->forceRecon = 1;
2926 1.398 oster }
2927 1.398 oster
2928 1.166 oster rf_ReconstructInPlace(raidPtr, req->col);
2929 1.398 oster
2930 1.398 oster if (req->flags & RF_FDFLAGS_RECON_FORCE) {
2931 1.398 oster raidPtr->forceRecon = 0;
2932 1.398 oster }
2933 1.398 oster
2934 1.37 oster RF_Free(req, sizeof(*req));
2935 1.37 oster raidPtr->recon_in_progress = 0;
2936 1.37 oster splx(s);
2937 1.37 oster
2938 1.37 oster /* That's all... */
2939 1.204 simonb kthread_exit(0); /* does not return */
2940 1.48 oster }
2941 1.48 oster
2942 1.213 christos static RF_AutoConfig_t *
2943 1.213 christos rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2944 1.276 mrg const char *cname, RF_SectorCount_t size, uint64_t numsecs,
2945 1.276 mrg unsigned secsize)
2946 1.213 christos {
2947 1.213 christos int good_one = 0;
2948 1.385 riastrad RF_ComponentLabel_t *clabel;
2949 1.213 christos RF_AutoConfig_t *ac;
2950 1.213 christos
2951 1.379 chs clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_WAITOK);
2952 1.213 christos
2953 1.276 mrg if (!raidread_component_label(secsize, dev, vp, clabel)) {
2954 1.276 mrg /* Got the label. Does it look reasonable? */
2955 1.385 riastrad if (rf_reasonable_label(clabel, numsecs) &&
2956 1.282 enami (rf_component_label_partitionsize(clabel) <= size)) {
2957 1.224 oster #ifdef DEBUG
2958 1.276 mrg printf("Component on: %s: %llu\n",
2959 1.213 christos cname, (unsigned long long)size);
2960 1.276 mrg rf_print_component_label(clabel);
2961 1.213 christos #endif
2962 1.276 mrg /* if it's reasonable, add it, else ignore it. */
2963 1.276 mrg ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2964 1.379 chs M_WAITOK);
2965 1.276 mrg strlcpy(ac->devname, cname, sizeof(ac->devname));
2966 1.276 mrg ac->dev = dev;
2967 1.276 mrg ac->vp = vp;
2968 1.276 mrg ac->clabel = clabel;
2969 1.276 mrg ac->next = ac_list;
2970 1.276 mrg ac_list = ac;
2971 1.276 mrg good_one = 1;
2972 1.276 mrg }
2973 1.213 christos }
2974 1.213 christos if (!good_one) {
2975 1.213 christos /* cleanup */
2976 1.213 christos free(clabel, M_RAIDFRAME);
2977 1.213 christos vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2978 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2979 1.213 christos vput(vp);
2980 1.213 christos }
2981 1.213 christos return ac_list;
2982 1.213 christos }
2983 1.213 christos
2984 1.393 mrg static RF_AutoConfig_t *
2985 1.259 cegger rf_find_raid_components(void)
2986 1.48 oster {
2987 1.48 oster struct vnode *vp;
2988 1.48 oster struct disklabel label;
2989 1.261 dyoung device_t dv;
2990 1.268 dyoung deviter_t di;
2991 1.48 oster dev_t dev;
2992 1.296 buhrow int bmajor, bminor, wedge, rf_part_found;
2993 1.48 oster int error;
2994 1.48 oster int i;
2995 1.48 oster RF_AutoConfig_t *ac_list;
2996 1.276 mrg uint64_t numsecs;
2997 1.276 mrg unsigned secsize;
2998 1.335 mlelstv int dowedges;
2999 1.48 oster
3000 1.48 oster /* initialize the AutoConfig list */
3001 1.48 oster ac_list = NULL;
3002 1.48 oster
3003 1.335 mlelstv /*
3004 1.335 mlelstv * we begin by trolling through *all* the devices on the system *twice*
3005 1.335 mlelstv * first we scan for wedges, second for other devices. This avoids
3006 1.335 mlelstv * using a raw partition instead of a wedge that covers the whole disk
3007 1.335 mlelstv */
3008 1.48 oster
3009 1.335 mlelstv for (dowedges=1; dowedges>=0; --dowedges) {
3010 1.335 mlelstv for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
3011 1.335 mlelstv dv = deviter_next(&di)) {
3012 1.48 oster
3013 1.393 mrg /* we are only interested in disks */
3014 1.335 mlelstv if (device_class(dv) != DV_DISK)
3015 1.335 mlelstv continue;
3016 1.48 oster
3017 1.393 mrg /* we don't care about floppies */
3018 1.335 mlelstv if (device_is_a(dv, "fd")) {
3019 1.335 mlelstv continue;
3020 1.335 mlelstv }
3021 1.129 oster
3022 1.393 mrg /* we don't care about CDs. */
3023 1.335 mlelstv if (device_is_a(dv, "cd")) {
3024 1.335 mlelstv continue;
3025 1.335 mlelstv }
3026 1.129 oster
3027 1.393 mrg /* we don't care about md. */
3028 1.335 mlelstv if (device_is_a(dv, "md")) {
3029 1.335 mlelstv continue;
3030 1.335 mlelstv }
3031 1.248 oster
3032 1.335 mlelstv /* hdfd is the Atari/Hades floppy driver */
3033 1.335 mlelstv if (device_is_a(dv, "hdfd")) {
3034 1.335 mlelstv continue;
3035 1.335 mlelstv }
3036 1.206 thorpej
3037 1.335 mlelstv /* fdisa is the Atari/Milan floppy driver */
3038 1.335 mlelstv if (device_is_a(dv, "fdisa")) {
3039 1.335 mlelstv continue;
3040 1.335 mlelstv }
3041 1.186 perry
3042 1.393 mrg /* we don't care about spiflash */
3043 1.393 mrg if (device_is_a(dv, "spiflash")) {
3044 1.393 mrg continue;
3045 1.393 mrg }
3046 1.393 mrg
3047 1.335 mlelstv /* are we in the wedges pass ? */
3048 1.335 mlelstv wedge = device_is_a(dv, "dk");
3049 1.335 mlelstv if (wedge != dowedges) {
3050 1.335 mlelstv continue;
3051 1.335 mlelstv }
3052 1.48 oster
3053 1.335 mlelstv /* need to find the device_name_to_block_device_major stuff */
3054 1.335 mlelstv bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
3055 1.296 buhrow
3056 1.335 mlelstv rf_part_found = 0; /*No raid partition as yet*/
3057 1.48 oster
3058 1.335 mlelstv /* get a vnode for the raw partition of this disk */
3059 1.335 mlelstv bminor = minor(device_unit(dv));
3060 1.335 mlelstv dev = wedge ? makedev(bmajor, bminor) :
3061 1.335 mlelstv MAKEDISKDEV(bmajor, bminor, RAW_PART);
3062 1.335 mlelstv if (bdevvp(dev, &vp))
3063 1.335 mlelstv panic("RAID can't alloc vnode");
3064 1.48 oster
3065 1.375 hannken vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3066 1.335 mlelstv error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
3067 1.48 oster
3068 1.335 mlelstv if (error) {
3069 1.335 mlelstv /* "Who cares." Continue looking
3070 1.335 mlelstv for something that exists*/
3071 1.335 mlelstv vput(vp);
3072 1.335 mlelstv continue;
3073 1.335 mlelstv }
3074 1.48 oster
3075 1.335 mlelstv error = getdisksize(vp, &numsecs, &secsize);
3076 1.213 christos if (error) {
3077 1.339 mlelstv /*
3078 1.339 mlelstv * Pseudo devices like vnd and cgd can be
3079 1.339 mlelstv * opened but may still need some configuration.
3080 1.339 mlelstv * Ignore these quietly.
3081 1.339 mlelstv */
3082 1.339 mlelstv if (error != ENXIO)
3083 1.339 mlelstv printf("RAIDframe: can't get disk size"
3084 1.339 mlelstv " for dev %s (%d)\n",
3085 1.339 mlelstv device_xname(dv), error);
3086 1.241 oster VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3087 1.241 oster vput(vp);
3088 1.213 christos continue;
3089 1.213 christos }
3090 1.335 mlelstv if (wedge) {
3091 1.335 mlelstv struct dkwedge_info dkw;
3092 1.335 mlelstv error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
3093 1.335 mlelstv NOCRED);
3094 1.335 mlelstv if (error) {
3095 1.335 mlelstv printf("RAIDframe: can't get wedge info for "
3096 1.335 mlelstv "dev %s (%d)\n", device_xname(dv), error);
3097 1.335 mlelstv VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3098 1.335 mlelstv vput(vp);
3099 1.335 mlelstv continue;
3100 1.335 mlelstv }
3101 1.213 christos
3102 1.335 mlelstv if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
3103 1.335 mlelstv VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3104 1.335 mlelstv vput(vp);
3105 1.335 mlelstv continue;
3106 1.335 mlelstv }
3107 1.385 riastrad
3108 1.410.4.1 martin VOP_UNLOCK(vp);
3109 1.335 mlelstv ac_list = rf_get_component(ac_list, dev, vp,
3110 1.335 mlelstv device_xname(dv), dkw.dkw_size, numsecs, secsize);
3111 1.335 mlelstv rf_part_found = 1; /*There is a raid component on this disk*/
3112 1.228 christos continue;
3113 1.241 oster }
3114 1.213 christos
3115 1.335 mlelstv /* Ok, the disk exists. Go get the disklabel. */
3116 1.335 mlelstv error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
3117 1.335 mlelstv if (error) {
3118 1.335 mlelstv /*
3119 1.335 mlelstv * XXX can't happen - open() would
3120 1.335 mlelstv * have errored out (or faked up one)
3121 1.335 mlelstv */
3122 1.335 mlelstv if (error != ENOTTY)
3123 1.335 mlelstv printf("RAIDframe: can't get label for dev "
3124 1.335 mlelstv "%s (%d)\n", device_xname(dv), error);
3125 1.335 mlelstv }
3126 1.48 oster
3127 1.335 mlelstv /* don't need this any more. We'll allocate it again
3128 1.335 mlelstv a little later if we really do... */
3129 1.335 mlelstv VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3130 1.335 mlelstv vput(vp);
3131 1.48 oster
3132 1.335 mlelstv if (error)
3133 1.48 oster continue;
3134 1.48 oster
3135 1.335 mlelstv rf_part_found = 0; /*No raid partitions yet*/
3136 1.335 mlelstv for (i = 0; i < label.d_npartitions; i++) {
3137 1.335 mlelstv char cname[sizeof(ac_list->devname)];
3138 1.335 mlelstv
3139 1.335 mlelstv /* We only support partitions marked as RAID */
3140 1.335 mlelstv if (label.d_partitions[i].p_fstype != FS_RAID)
3141 1.335 mlelstv continue;
3142 1.335 mlelstv
3143 1.335 mlelstv dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
3144 1.335 mlelstv if (bdevvp(dev, &vp))
3145 1.335 mlelstv panic("RAID can't alloc vnode");
3146 1.335 mlelstv
3147 1.375 hannken vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3148 1.335 mlelstv error = VOP_OPEN(vp, FREAD, NOCRED);
3149 1.335 mlelstv if (error) {
3150 1.400 oster /* Not quite a 'whatever'. In
3151 1.400 oster * this situation we know
3152 1.400 oster * there is a FS_RAID
3153 1.400 oster * partition, but we can't
3154 1.400 oster * open it. The most likely
3155 1.400 oster * reason is that the
3156 1.400 oster * partition is already in
3157 1.400 oster * use by another RAID set.
3158 1.400 oster * So note that we've already
3159 1.400 oster * found a partition on this
3160 1.400 oster * disk so we don't attempt
3161 1.400 oster * to use the raw disk later. */
3162 1.400 oster rf_part_found = 1;
3163 1.335 mlelstv vput(vp);
3164 1.335 mlelstv continue;
3165 1.335 mlelstv }
3166 1.375 hannken VOP_UNLOCK(vp);
3167 1.335 mlelstv snprintf(cname, sizeof(cname), "%s%c",
3168 1.335 mlelstv device_xname(dv), 'a' + i);
3169 1.335 mlelstv ac_list = rf_get_component(ac_list, dev, vp, cname,
3170 1.335 mlelstv label.d_partitions[i].p_size, numsecs, secsize);
3171 1.335 mlelstv rf_part_found = 1; /*There is at least one raid partition on this disk*/
3172 1.48 oster }
3173 1.296 buhrow
3174 1.335 mlelstv /*
3175 1.335 mlelstv *If there is no raid component on this disk, either in a
3176 1.335 mlelstv *disklabel or inside a wedge, check the raw partition as well,
3177 1.335 mlelstv *as it is possible to configure raid components on raw disk
3178 1.335 mlelstv *devices.
3179 1.335 mlelstv */
3180 1.296 buhrow
3181 1.335 mlelstv if (!rf_part_found) {
3182 1.335 mlelstv char cname[sizeof(ac_list->devname)];
3183 1.296 buhrow
3184 1.335 mlelstv dev = MAKEDISKDEV(bmajor, device_unit(dv), RAW_PART);
3185 1.335 mlelstv if (bdevvp(dev, &vp))
3186 1.335 mlelstv panic("RAID can't alloc vnode");
3187 1.335 mlelstv
3188 1.375 hannken vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3189 1.375 hannken
3190 1.335 mlelstv error = VOP_OPEN(vp, FREAD, NOCRED);
3191 1.335 mlelstv if (error) {
3192 1.335 mlelstv /* Whatever... */
3193 1.335 mlelstv vput(vp);
3194 1.335 mlelstv continue;
3195 1.335 mlelstv }
3196 1.375 hannken VOP_UNLOCK(vp);
3197 1.335 mlelstv snprintf(cname, sizeof(cname), "%s%c",
3198 1.335 mlelstv device_xname(dv), 'a' + RAW_PART);
3199 1.335 mlelstv ac_list = rf_get_component(ac_list, dev, vp, cname,
3200 1.335 mlelstv label.d_partitions[RAW_PART].p_size, numsecs, secsize);
3201 1.296 buhrow }
3202 1.48 oster }
3203 1.335 mlelstv deviter_release(&di);
3204 1.48 oster }
3205 1.213 christos return ac_list;
3206 1.48 oster }
3207 1.186 perry
3208 1.292 oster int
3209 1.284 mrg rf_reasonable_label(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3210 1.48 oster {
3211 1.186 perry
3212 1.393 mrg if ((clabel->version==RF_COMPONENT_LABEL_VERSION_1 ||
3213 1.394 mrg clabel->version==RF_COMPONENT_LABEL_VERSION ||
3214 1.394 mrg clabel->version == bswap32(RF_COMPONENT_LABEL_VERSION)) &&
3215 1.393 mrg (clabel->clean == RF_RAID_CLEAN ||
3216 1.393 mrg clabel->clean == RF_RAID_DIRTY) &&
3217 1.186 perry clabel->row >=0 &&
3218 1.186 perry clabel->column >= 0 &&
3219 1.48 oster clabel->num_rows > 0 &&
3220 1.48 oster clabel->num_columns > 0 &&
3221 1.186 perry clabel->row < clabel->num_rows &&
3222 1.48 oster clabel->column < clabel->num_columns &&
3223 1.48 oster clabel->blockSize > 0 &&
3224 1.282 enami /*
3225 1.282 enami * numBlocksHi may contain garbage, but it is ok since
3226 1.282 enami * the type is unsigned. If it is really garbage,
3227 1.282 enami * rf_fix_old_label_size() will fix it.
3228 1.282 enami */
3229 1.282 enami rf_component_label_numblocks(clabel) > 0) {
3230 1.284 mrg /*
3231 1.284 mrg * label looks reasonable enough...
3232 1.284 mrg * let's make sure it has no old garbage.
3233 1.284 mrg */
3234 1.292 oster if (numsecs)
3235 1.292 oster rf_fix_old_label_size(clabel, numsecs);
3236 1.48 oster return(1);
3237 1.48 oster }
3238 1.48 oster return(0);
3239 1.48 oster }
3240 1.48 oster
3241 1.48 oster
3242 1.278 mrg /*
3243 1.278 mrg * For reasons yet unknown, some old component labels have garbage in
3244 1.278 mrg * the newer numBlocksHi region, and this causes lossage. Since those
3245 1.278 mrg * disks will also have numsecs set to less than 32 bits of sectors,
3246 1.299 oster * we can determine when this corruption has occurred, and fix it.
3247 1.284 mrg *
3248 1.284 mrg * The exact same problem, with the same unknown reason, happens to
3249 1.284 mrg * the partitionSizeHi member as well.
3250 1.278 mrg */
3251 1.278 mrg static void
3252 1.278 mrg rf_fix_old_label_size(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3253 1.278 mrg {
3254 1.278 mrg
3255 1.284 mrg if (numsecs < ((uint64_t)1 << 32)) {
3256 1.284 mrg if (clabel->numBlocksHi) {
3257 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3258 1.284 mrg "numBlocksHi set\n"
3259 1.284 mrg "WARNING: resetting numBlocksHi to zero.\n");
3260 1.284 mrg clabel->numBlocksHi = 0;
3261 1.284 mrg }
3262 1.284 mrg
3263 1.284 mrg if (clabel->partitionSizeHi) {
3264 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3265 1.284 mrg "partitionSizeHi set\n"
3266 1.284 mrg "WARNING: resetting partitionSizeHi to zero.\n");
3267 1.284 mrg clabel->partitionSizeHi = 0;
3268 1.284 mrg }
3269 1.278 mrg }
3270 1.278 mrg }
3271 1.278 mrg
3272 1.278 mrg
3273 1.224 oster #ifdef DEBUG
3274 1.48 oster void
3275 1.169 oster rf_print_component_label(RF_ComponentLabel_t *clabel)
3276 1.48 oster {
3277 1.282 enami uint64_t numBlocks;
3278 1.308 christos static const char *rp[] = {
3279 1.308 christos "No", "Force", "Soft", "*invalid*"
3280 1.308 christos };
3281 1.308 christos
3282 1.275 mrg
3283 1.282 enami numBlocks = rf_component_label_numblocks(clabel);
3284 1.275 mrg
3285 1.48 oster printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3286 1.186 perry clabel->row, clabel->column,
3287 1.48 oster clabel->num_rows, clabel->num_columns);
3288 1.48 oster printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3289 1.48 oster clabel->version, clabel->serial_number,
3290 1.48 oster clabel->mod_counter);
3291 1.48 oster printf(" Clean: %s Status: %d\n",
3292 1.271 dyoung clabel->clean ? "Yes" : "No", clabel->status);
3293 1.48 oster printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3294 1.48 oster clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3295 1.275 mrg printf(" RAID Level: %c blocksize: %d numBlocks: %"PRIu64"\n",
3296 1.275 mrg (char) clabel->parityConfig, clabel->blockSize, numBlocks);
3297 1.271 dyoung printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3298 1.308 christos printf(" Root partition: %s\n", rp[clabel->root_partition & 3]);
3299 1.271 dyoung printf(" Last configured as: raid%d\n", clabel->last_unit);
3300 1.51 oster #if 0
3301 1.51 oster printf(" Config order: %d\n", clabel->config_order);
3302 1.51 oster #endif
3303 1.186 perry
3304 1.48 oster }
3305 1.133 oster #endif
3306 1.48 oster
3307 1.393 mrg static RF_ConfigSet_t *
3308 1.169 oster rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3309 1.48 oster {
3310 1.48 oster RF_AutoConfig_t *ac;
3311 1.48 oster RF_ConfigSet_t *config_sets;
3312 1.48 oster RF_ConfigSet_t *cset;
3313 1.48 oster RF_AutoConfig_t *ac_next;
3314 1.48 oster
3315 1.48 oster
3316 1.48 oster config_sets = NULL;
3317 1.48 oster
3318 1.48 oster /* Go through the AutoConfig list, and figure out which components
3319 1.48 oster belong to what sets. */
3320 1.48 oster ac = ac_list;
3321 1.48 oster while(ac!=NULL) {
3322 1.48 oster /* we're going to putz with ac->next, so save it here
3323 1.48 oster for use at the end of the loop */
3324 1.48 oster ac_next = ac->next;
3325 1.48 oster
3326 1.48 oster if (config_sets == NULL) {
3327 1.48 oster /* will need at least this one... */
3328 1.379 chs config_sets = malloc(sizeof(RF_ConfigSet_t),
3329 1.379 chs M_RAIDFRAME, M_WAITOK);
3330 1.48 oster /* this one is easy :) */
3331 1.48 oster config_sets->ac = ac;
3332 1.48 oster config_sets->next = NULL;
3333 1.51 oster config_sets->rootable = 0;
3334 1.48 oster ac->next = NULL;
3335 1.48 oster } else {
3336 1.48 oster /* which set does this component fit into? */
3337 1.48 oster cset = config_sets;
3338 1.48 oster while(cset!=NULL) {
3339 1.49 oster if (rf_does_it_fit(cset, ac)) {
3340 1.86 oster /* looks like it matches... */
3341 1.86 oster ac->next = cset->ac;
3342 1.86 oster cset->ac = ac;
3343 1.48 oster break;
3344 1.48 oster }
3345 1.48 oster cset = cset->next;
3346 1.48 oster }
3347 1.48 oster if (cset==NULL) {
3348 1.48 oster /* didn't find a match above... new set..*/
3349 1.379 chs cset = malloc(sizeof(RF_ConfigSet_t),
3350 1.379 chs M_RAIDFRAME, M_WAITOK);
3351 1.48 oster cset->ac = ac;
3352 1.48 oster ac->next = NULL;
3353 1.48 oster cset->next = config_sets;
3354 1.51 oster cset->rootable = 0;
3355 1.48 oster config_sets = cset;
3356 1.48 oster }
3357 1.48 oster }
3358 1.48 oster ac = ac_next;
3359 1.48 oster }
3360 1.48 oster
3361 1.48 oster
3362 1.48 oster return(config_sets);
3363 1.48 oster }
3364 1.48 oster
3365 1.48 oster static int
3366 1.169 oster rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3367 1.48 oster {
3368 1.48 oster RF_ComponentLabel_t *clabel1, *clabel2;
3369 1.48 oster
3370 1.48 oster /* If this one matches the *first* one in the set, that's good
3371 1.48 oster enough, since the other members of the set would have been
3372 1.48 oster through here too... */
3373 1.60 oster /* note that we are not checking partitionSize here..
3374 1.60 oster
3375 1.60 oster Note that we are also not checking the mod_counters here.
3376 1.299 oster If everything else matches except the mod_counter, that's
3377 1.60 oster good enough for this test. We will deal with the mod_counters
3378 1.186 perry a little later in the autoconfiguration process.
3379 1.60 oster
3380 1.60 oster (clabel1->mod_counter == clabel2->mod_counter) &&
3381 1.81 oster
3382 1.81 oster The reason we don't check for this is that failed disks
3383 1.81 oster will have lower modification counts. If those disks are
3384 1.81 oster not added to the set they used to belong to, then they will
3385 1.81 oster form their own set, which may result in 2 different sets,
3386 1.81 oster for example, competing to be configured at raid0, and
3387 1.81 oster perhaps competing to be the root filesystem set. If the
3388 1.81 oster wrong ones get configured, or both attempt to become /,
3389 1.81 oster weird behaviour and or serious lossage will occur. Thus we
3390 1.81 oster need to bring them into the fold here, and kick them out at
3391 1.81 oster a later point.
3392 1.60 oster
3393 1.60 oster */
3394 1.48 oster
3395 1.48 oster clabel1 = cset->ac->clabel;
3396 1.48 oster clabel2 = ac->clabel;
3397 1.48 oster if ((clabel1->version == clabel2->version) &&
3398 1.48 oster (clabel1->serial_number == clabel2->serial_number) &&
3399 1.48 oster (clabel1->num_rows == clabel2->num_rows) &&
3400 1.48 oster (clabel1->num_columns == clabel2->num_columns) &&
3401 1.48 oster (clabel1->sectPerSU == clabel2->sectPerSU) &&
3402 1.48 oster (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3403 1.48 oster (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3404 1.48 oster (clabel1->parityConfig == clabel2->parityConfig) &&
3405 1.48 oster (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3406 1.48 oster (clabel1->blockSize == clabel2->blockSize) &&
3407 1.282 enami rf_component_label_numblocks(clabel1) ==
3408 1.282 enami rf_component_label_numblocks(clabel2) &&
3409 1.48 oster (clabel1->autoconfigure == clabel2->autoconfigure) &&
3410 1.48 oster (clabel1->root_partition == clabel2->root_partition) &&
3411 1.48 oster (clabel1->last_unit == clabel2->last_unit) &&
3412 1.48 oster (clabel1->config_order == clabel2->config_order)) {
3413 1.48 oster /* if it get's here, it almost *has* to be a match */
3414 1.48 oster } else {
3415 1.186 perry /* it's not consistent with somebody in the set..
3416 1.48 oster punt */
3417 1.48 oster return(0);
3418 1.48 oster }
3419 1.48 oster /* all was fine.. it must fit... */
3420 1.48 oster return(1);
3421 1.48 oster }
3422 1.48 oster
3423 1.393 mrg static int
3424 1.169 oster rf_have_enough_components(RF_ConfigSet_t *cset)
3425 1.48 oster {
3426 1.51 oster RF_AutoConfig_t *ac;
3427 1.51 oster RF_AutoConfig_t *auto_config;
3428 1.51 oster RF_ComponentLabel_t *clabel;
3429 1.166 oster int c;
3430 1.51 oster int num_cols;
3431 1.51 oster int num_missing;
3432 1.86 oster int mod_counter;
3433 1.87 oster int mod_counter_found;
3434 1.88 oster int even_pair_failed;
3435 1.88 oster char parity_type;
3436 1.186 perry
3437 1.51 oster
3438 1.48 oster /* check to see that we have enough 'live' components
3439 1.48 oster of this set. If so, we can configure it if necessary */
3440 1.48 oster
3441 1.51 oster num_cols = cset->ac->clabel->num_columns;
3442 1.88 oster parity_type = cset->ac->clabel->parityConfig;
3443 1.51 oster
3444 1.51 oster /* XXX Check for duplicate components!?!?!? */
3445 1.51 oster
3446 1.86 oster /* Determine what the mod_counter is supposed to be for this set. */
3447 1.86 oster
3448 1.87 oster mod_counter_found = 0;
3449 1.101 oster mod_counter = 0;
3450 1.86 oster ac = cset->ac;
3451 1.86 oster while(ac!=NULL) {
3452 1.87 oster if (mod_counter_found==0) {
3453 1.86 oster mod_counter = ac->clabel->mod_counter;
3454 1.87 oster mod_counter_found = 1;
3455 1.87 oster } else {
3456 1.87 oster if (ac->clabel->mod_counter > mod_counter) {
3457 1.87 oster mod_counter = ac->clabel->mod_counter;
3458 1.87 oster }
3459 1.86 oster }
3460 1.86 oster ac = ac->next;
3461 1.86 oster }
3462 1.86 oster
3463 1.51 oster num_missing = 0;
3464 1.51 oster auto_config = cset->ac;
3465 1.51 oster
3466 1.166 oster even_pair_failed = 0;
3467 1.166 oster for(c=0; c<num_cols; c++) {
3468 1.166 oster ac = auto_config;
3469 1.166 oster while(ac!=NULL) {
3470 1.186 perry if ((ac->clabel->column == c) &&
3471 1.166 oster (ac->clabel->mod_counter == mod_counter)) {
3472 1.166 oster /* it's this one... */
3473 1.224 oster #ifdef DEBUG
3474 1.166 oster printf("Found: %s at %d\n",
3475 1.166 oster ac->devname,c);
3476 1.51 oster #endif
3477 1.166 oster break;
3478 1.51 oster }
3479 1.166 oster ac=ac->next;
3480 1.166 oster }
3481 1.166 oster if (ac==NULL) {
3482 1.51 oster /* Didn't find one here! */
3483 1.88 oster /* special case for RAID 1, especially
3484 1.88 oster where there are more than 2
3485 1.88 oster components (where RAIDframe treats
3486 1.88 oster things a little differently :( ) */
3487 1.166 oster if (parity_type == '1') {
3488 1.166 oster if (c%2 == 0) { /* even component */
3489 1.166 oster even_pair_failed = 1;
3490 1.166 oster } else { /* odd component. If
3491 1.166 oster we're failed, and
3492 1.166 oster so is the even
3493 1.166 oster component, it's
3494 1.166 oster "Good Night, Charlie" */
3495 1.166 oster if (even_pair_failed == 1) {
3496 1.166 oster return(0);
3497 1.88 oster }
3498 1.88 oster }
3499 1.166 oster } else {
3500 1.166 oster /* normal accounting */
3501 1.166 oster num_missing++;
3502 1.88 oster }
3503 1.166 oster }
3504 1.166 oster if ((parity_type == '1') && (c%2 == 1)) {
3505 1.88 oster /* Just did an even component, and we didn't
3506 1.186 perry bail.. reset the even_pair_failed flag,
3507 1.88 oster and go on to the next component.... */
3508 1.166 oster even_pair_failed = 0;
3509 1.51 oster }
3510 1.51 oster }
3511 1.51 oster
3512 1.51 oster clabel = cset->ac->clabel;
3513 1.51 oster
3514 1.51 oster if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3515 1.51 oster ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3516 1.51 oster ((clabel->parityConfig == '5') && (num_missing > 1))) {
3517 1.51 oster /* XXX this needs to be made *much* more general */
3518 1.51 oster /* Too many failures */
3519 1.51 oster return(0);
3520 1.51 oster }
3521 1.51 oster /* otherwise, all is well, and we've got enough to take a kick
3522 1.51 oster at autoconfiguring this set */
3523 1.51 oster return(1);
3524 1.48 oster }
3525 1.48 oster
3526 1.393 mrg static void
3527 1.169 oster rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3528 1.222 christos RF_Raid_t *raidPtr)
3529 1.48 oster {
3530 1.48 oster RF_ComponentLabel_t *clabel;
3531 1.77 oster int i;
3532 1.48 oster
3533 1.48 oster clabel = ac->clabel;
3534 1.48 oster
3535 1.48 oster /* 1. Fill in the common stuff */
3536 1.48 oster config->numCol = clabel->num_columns;
3537 1.48 oster config->numSpare = 0; /* XXX should this be set here? */
3538 1.48 oster config->sectPerSU = clabel->sectPerSU;
3539 1.48 oster config->SUsPerPU = clabel->SUsPerPU;
3540 1.48 oster config->SUsPerRU = clabel->SUsPerRU;
3541 1.48 oster config->parityConfig = clabel->parityConfig;
3542 1.48 oster /* XXX... */
3543 1.48 oster strcpy(config->diskQueueType,"fifo");
3544 1.48 oster config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3545 1.48 oster config->layoutSpecificSize = 0; /* XXX ?? */
3546 1.48 oster
3547 1.48 oster while(ac!=NULL) {
3548 1.48 oster /* row/col values will be in range due to the checks
3549 1.48 oster in reasonable_label() */
3550 1.166 oster strcpy(config->devnames[0][ac->clabel->column],
3551 1.48 oster ac->devname);
3552 1.48 oster ac = ac->next;
3553 1.48 oster }
3554 1.48 oster
3555 1.77 oster for(i=0;i<RF_MAXDBGV;i++) {
3556 1.163 fvdl config->debugVars[i][0] = 0;
3557 1.77 oster }
3558 1.48 oster }
3559 1.48 oster
3560 1.393 mrg static int
3561 1.169 oster rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3562 1.48 oster {
3563 1.269 jld RF_ComponentLabel_t *clabel;
3564 1.166 oster int column;
3565 1.148 oster int sparecol;
3566 1.48 oster
3567 1.54 oster raidPtr->autoconfigure = new_value;
3568 1.166 oster
3569 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3570 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3571 1.269 jld clabel = raidget_component_label(raidPtr, column);
3572 1.269 jld clabel->autoconfigure = new_value;
3573 1.269 jld raidflush_component_label(raidPtr, column);
3574 1.48 oster }
3575 1.48 oster }
3576 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3577 1.148 oster sparecol = raidPtr->numCol + column;
3578 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3579 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3580 1.269 jld clabel->autoconfigure = new_value;
3581 1.269 jld raidflush_component_label(raidPtr, sparecol);
3582 1.148 oster }
3583 1.148 oster }
3584 1.48 oster return(new_value);
3585 1.48 oster }
3586 1.48 oster
3587 1.393 mrg static int
3588 1.169 oster rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3589 1.48 oster {
3590 1.269 jld RF_ComponentLabel_t *clabel;
3591 1.166 oster int column;
3592 1.148 oster int sparecol;
3593 1.48 oster
3594 1.54 oster raidPtr->root_partition = new_value;
3595 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3596 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3597 1.269 jld clabel = raidget_component_label(raidPtr, column);
3598 1.269 jld clabel->root_partition = new_value;
3599 1.269 jld raidflush_component_label(raidPtr, column);
3600 1.148 oster }
3601 1.148 oster }
3602 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3603 1.148 oster sparecol = raidPtr->numCol + column;
3604 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3605 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3606 1.269 jld clabel->root_partition = new_value;
3607 1.269 jld raidflush_component_label(raidPtr, sparecol);
3608 1.48 oster }
3609 1.48 oster }
3610 1.48 oster return(new_value);
3611 1.48 oster }
3612 1.48 oster
3613 1.393 mrg static void
3614 1.169 oster rf_release_all_vps(RF_ConfigSet_t *cset)
3615 1.48 oster {
3616 1.48 oster RF_AutoConfig_t *ac;
3617 1.186 perry
3618 1.48 oster ac = cset->ac;
3619 1.48 oster while(ac!=NULL) {
3620 1.48 oster /* Close the vp, and give it back */
3621 1.48 oster if (ac->vp) {
3622 1.96 oster vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3623 1.335 mlelstv VOP_CLOSE(ac->vp, FREAD | FWRITE, NOCRED);
3624 1.48 oster vput(ac->vp);
3625 1.86 oster ac->vp = NULL;
3626 1.48 oster }
3627 1.48 oster ac = ac->next;
3628 1.48 oster }
3629 1.48 oster }
3630 1.48 oster
3631 1.48 oster
3632 1.393 mrg static void
3633 1.169 oster rf_cleanup_config_set(RF_ConfigSet_t *cset)
3634 1.48 oster {
3635 1.48 oster RF_AutoConfig_t *ac;
3636 1.48 oster RF_AutoConfig_t *next_ac;
3637 1.186 perry
3638 1.48 oster ac = cset->ac;
3639 1.48 oster while(ac!=NULL) {
3640 1.48 oster next_ac = ac->next;
3641 1.48 oster /* nuke the label */
3642 1.48 oster free(ac->clabel, M_RAIDFRAME);
3643 1.48 oster /* cleanup the config structure */
3644 1.48 oster free(ac, M_RAIDFRAME);
3645 1.48 oster /* "next.." */
3646 1.48 oster ac = next_ac;
3647 1.48 oster }
3648 1.48 oster /* and, finally, nuke the config set */
3649 1.48 oster free(cset, M_RAIDFRAME);
3650 1.48 oster }
3651 1.48 oster
3652 1.48 oster
3653 1.48 oster void
3654 1.169 oster raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3655 1.48 oster {
3656 1.394 mrg /* avoid over-writing byteswapped version. */
3657 1.394 mrg if (clabel->version != bswap32(RF_COMPONENT_LABEL_VERSION))
3658 1.394 mrg clabel->version = RF_COMPONENT_LABEL_VERSION;
3659 1.57 oster clabel->serial_number = raidPtr->serial_number;
3660 1.48 oster clabel->mod_counter = raidPtr->mod_counter;
3661 1.269 jld
3662 1.166 oster clabel->num_rows = 1;
3663 1.48 oster clabel->num_columns = raidPtr->numCol;
3664 1.48 oster clabel->clean = RF_RAID_DIRTY; /* not clean */
3665 1.48 oster clabel->status = rf_ds_optimal; /* "It's good!" */
3666 1.186 perry
3667 1.48 oster clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3668 1.48 oster clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3669 1.48 oster clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3670 1.54 oster
3671 1.54 oster clabel->blockSize = raidPtr->bytesPerSector;
3672 1.282 enami rf_component_label_set_numblocks(clabel, raidPtr->sectorsPerDisk);
3673 1.54 oster
3674 1.48 oster /* XXX not portable */
3675 1.48 oster clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3676 1.54 oster clabel->maxOutstanding = raidPtr->maxOutstanding;
3677 1.54 oster clabel->autoconfigure = raidPtr->autoconfigure;
3678 1.54 oster clabel->root_partition = raidPtr->root_partition;
3679 1.48 oster clabel->last_unit = raidPtr->raidid;
3680 1.54 oster clabel->config_order = raidPtr->config_order;
3681 1.269 jld
3682 1.269 jld #ifndef RF_NO_PARITY_MAP
3683 1.269 jld rf_paritymap_init_label(raidPtr->parity_map, clabel);
3684 1.269 jld #endif
3685 1.51 oster }
3686 1.51 oster
3687 1.393 mrg static struct raid_softc *
3688 1.300 christos rf_auto_config_set(RF_ConfigSet_t *cset)
3689 1.51 oster {
3690 1.51 oster RF_Raid_t *raidPtr;
3691 1.51 oster RF_Config_t *config;
3692 1.51 oster int raidID;
3693 1.300 christos struct raid_softc *sc;
3694 1.51 oster
3695 1.224 oster #ifdef DEBUG
3696 1.72 oster printf("RAID autoconfigure\n");
3697 1.127 oster #endif
3698 1.51 oster
3699 1.51 oster /* 1. Create a config structure */
3700 1.379 chs config = malloc(sizeof(*config), M_RAIDFRAME, M_WAITOK|M_ZERO);
3701 1.77 oster
3702 1.186 perry /*
3703 1.186 perry 2. Figure out what RAID ID this one is supposed to live at
3704 1.51 oster See if we can get the same RAID dev that it was configured
3705 1.186 perry on last time..
3706 1.51 oster */
3707 1.51 oster
3708 1.51 oster raidID = cset->ac->clabel->last_unit;
3709 1.327 pgoyette for (sc = raidget(raidID, false); sc && sc->sc_r.valid != 0;
3710 1.327 pgoyette sc = raidget(++raidID, false))
3711 1.300 christos continue;
3712 1.224 oster #ifdef DEBUG
3713 1.72 oster printf("Configuring raid%d:\n",raidID);
3714 1.127 oster #endif
3715 1.127 oster
3716 1.327 pgoyette if (sc == NULL)
3717 1.327 pgoyette sc = raidget(raidID, true);
3718 1.300 christos raidPtr = &sc->sc_r;
3719 1.51 oster
3720 1.51 oster /* XXX all this stuff should be done SOMEWHERE ELSE! */
3721 1.302 christos raidPtr->softc = sc;
3722 1.51 oster raidPtr->raidid = raidID;
3723 1.51 oster raidPtr->openings = RAIDOUTSTANDING;
3724 1.51 oster
3725 1.51 oster /* 3. Build the configuration structure */
3726 1.51 oster rf_create_configuration(cset->ac, config, raidPtr);
3727 1.51 oster
3728 1.51 oster /* 4. Do the configuration */
3729 1.300 christos if (rf_Configure(raidPtr, config, cset->ac) == 0) {
3730 1.300 christos raidinit(sc);
3731 1.186 perry
3732 1.300 christos rf_markalldirty(raidPtr);
3733 1.300 christos raidPtr->autoconfigure = 1; /* XXX do this here? */
3734 1.308 christos switch (cset->ac->clabel->root_partition) {
3735 1.308 christos case 1: /* Force Root */
3736 1.308 christos case 2: /* Soft Root: root when boot partition part of raid */
3737 1.308 christos /*
3738 1.308 christos * everything configured just fine. Make a note
3739 1.308 christos * that this set is eligible to be root,
3740 1.308 christos * or forced to be root
3741 1.308 christos */
3742 1.308 christos cset->rootable = cset->ac->clabel->root_partition;
3743 1.54 oster /* XXX do this here? */
3744 1.308 christos raidPtr->root_partition = cset->rootable;
3745 1.308 christos break;
3746 1.308 christos default:
3747 1.308 christos break;
3748 1.51 oster }
3749 1.300 christos } else {
3750 1.300 christos raidput(sc);
3751 1.300 christos sc = NULL;
3752 1.51 oster }
3753 1.51 oster
3754 1.51 oster /* 5. Cleanup */
3755 1.51 oster free(config, M_RAIDFRAME);
3756 1.300 christos return sc;
3757 1.99 oster }
3758 1.99 oster
3759 1.99 oster void
3760 1.395 oster rf_pool_init(RF_Raid_t *raidPtr, char *w_chan, struct pool *p, size_t size, const char *pool_name,
3761 1.187 christos size_t xmin, size_t xmax)
3762 1.177 oster {
3763 1.352 christos
3764 1.395 oster /* Format: raid%d_foo */
3765 1.395 oster snprintf(w_chan, RF_MAX_POOLNAMELEN, "raid%d_%s", raidPtr->raidid, pool_name);
3766 1.395 oster
3767 1.227 ad pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3768 1.187 christos pool_sethiwat(p, xmax);
3769 1.382 chs pool_prime(p, xmin);
3770 1.177 oster }
3771 1.190 oster
3772 1.395 oster
3773 1.190 oster /*
3774 1.335 mlelstv * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
3775 1.335 mlelstv * to see if there is IO pending and if that IO could possibly be done
3776 1.335 mlelstv * for a given RAID set. Returns 0 if IO is waiting and can be done, 1
3777 1.190 oster * otherwise.
3778 1.190 oster *
3779 1.190 oster */
3780 1.190 oster int
3781 1.300 christos rf_buf_queue_check(RF_Raid_t *raidPtr)
3782 1.190 oster {
3783 1.335 mlelstv struct raid_softc *rs;
3784 1.335 mlelstv struct dk_softc *dksc;
3785 1.335 mlelstv
3786 1.335 mlelstv rs = raidPtr->softc;
3787 1.335 mlelstv dksc = &rs->sc_dksc;
3788 1.335 mlelstv
3789 1.335 mlelstv if ((rs->sc_flags & RAIDF_INITED) == 0)
3790 1.335 mlelstv return 1;
3791 1.335 mlelstv
3792 1.335 mlelstv if (dk_strategy_pending(dksc) && raidPtr->openings > 0) {
3793 1.190 oster /* there is work to do */
3794 1.190 oster return 0;
3795 1.335 mlelstv }
3796 1.190 oster /* default is nothing to do */
3797 1.190 oster return 1;
3798 1.190 oster }
3799 1.213 christos
3800 1.213 christos int
3801 1.294 oster rf_getdisksize(struct vnode *vp, RF_RaidDisk_t *diskPtr)
3802 1.213 christos {
3803 1.275 mrg uint64_t numsecs;
3804 1.275 mrg unsigned secsize;
3805 1.213 christos int error;
3806 1.213 christos
3807 1.275 mrg error = getdisksize(vp, &numsecs, &secsize);
3808 1.213 christos if (error == 0) {
3809 1.275 mrg diskPtr->blockSize = secsize;
3810 1.275 mrg diskPtr->numBlocks = numsecs - rf_protectedSectors;
3811 1.275 mrg diskPtr->partitionSize = numsecs;
3812 1.213 christos return 0;
3813 1.213 christos }
3814 1.213 christos return error;
3815 1.213 christos }
3816 1.217 oster
3817 1.217 oster static int
3818 1.261 dyoung raid_match(device_t self, cfdata_t cfdata, void *aux)
3819 1.217 oster {
3820 1.217 oster return 1;
3821 1.217 oster }
3822 1.217 oster
3823 1.217 oster static void
3824 1.261 dyoung raid_attach(device_t parent, device_t self, void *aux)
3825 1.217 oster {
3826 1.217 oster }
3827 1.217 oster
3828 1.217 oster
3829 1.217 oster static int
3830 1.261 dyoung raid_detach(device_t self, int flags)
3831 1.217 oster {
3832 1.266 dyoung int error;
3833 1.335 mlelstv struct raid_softc *rs = raidsoftc(self);
3834 1.303 christos
3835 1.303 christos if (rs == NULL)
3836 1.303 christos return ENXIO;
3837 1.266 dyoung
3838 1.266 dyoung if ((error = raidlock(rs)) != 0)
3839 1.389 skrll return error;
3840 1.217 oster
3841 1.266 dyoung error = raid_detach_unlocked(rs);
3842 1.266 dyoung
3843 1.332 mlelstv raidunlock(rs);
3844 1.332 mlelstv
3845 1.332 mlelstv /* XXX raid can be referenced here */
3846 1.332 mlelstv
3847 1.332 mlelstv if (error)
3848 1.332 mlelstv return error;
3849 1.332 mlelstv
3850 1.332 mlelstv /* Free the softc */
3851 1.332 mlelstv raidput(rs);
3852 1.332 mlelstv
3853 1.332 mlelstv return 0;
3854 1.217 oster }
3855 1.217 oster
3856 1.234 oster static void
3857 1.304 christos rf_set_geometry(struct raid_softc *rs, RF_Raid_t *raidPtr)
3858 1.234 oster {
3859 1.335 mlelstv struct dk_softc *dksc = &rs->sc_dksc;
3860 1.335 mlelstv struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
3861 1.304 christos
3862 1.304 christos memset(dg, 0, sizeof(*dg));
3863 1.304 christos
3864 1.304 christos dg->dg_secperunit = raidPtr->totalSectors;
3865 1.304 christos dg->dg_secsize = raidPtr->bytesPerSector;
3866 1.304 christos dg->dg_nsectors = raidPtr->Layout.dataSectorsPerStripe;
3867 1.304 christos dg->dg_ntracks = 4 * raidPtr->numCol;
3868 1.304 christos
3869 1.335 mlelstv disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
3870 1.234 oster }
3871 1.252 oster
3872 1.348 jdolecek /*
3873 1.348 jdolecek * Get cache info for all the components (including spares).
3874 1.348 jdolecek * Returns intersection of all the cache flags of all disks, or first
3875 1.348 jdolecek * error if any encountered.
3876 1.348 jdolecek * XXXfua feature flags can change as spares are added - lock down somehow
3877 1.348 jdolecek */
3878 1.348 jdolecek static int
3879 1.348 jdolecek rf_get_component_caches(RF_Raid_t *raidPtr, int *data)
3880 1.348 jdolecek {
3881 1.348 jdolecek int c;
3882 1.348 jdolecek int error;
3883 1.348 jdolecek int dkwhole = 0, dkpart;
3884 1.385 riastrad
3885 1.348 jdolecek for (c = 0; c < raidPtr->numCol + raidPtr->numSpare; c++) {
3886 1.348 jdolecek /*
3887 1.348 jdolecek * Check any non-dead disk, even when currently being
3888 1.348 jdolecek * reconstructed.
3889 1.348 jdolecek */
3890 1.410.4.2 martin if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
3891 1.348 jdolecek error = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp,
3892 1.348 jdolecek DIOCGCACHE, &dkpart, FREAD, NOCRED);
3893 1.348 jdolecek if (error) {
3894 1.348 jdolecek if (error != ENODEV) {
3895 1.348 jdolecek printf("raid%d: get cache for component %s failed\n",
3896 1.348 jdolecek raidPtr->raidid,
3897 1.348 jdolecek raidPtr->Disks[c].devname);
3898 1.348 jdolecek }
3899 1.348 jdolecek
3900 1.348 jdolecek return error;
3901 1.348 jdolecek }
3902 1.348 jdolecek
3903 1.348 jdolecek if (c == 0)
3904 1.348 jdolecek dkwhole = dkpart;
3905 1.348 jdolecek else
3906 1.348 jdolecek dkwhole = DKCACHE_COMBINE(dkwhole, dkpart);
3907 1.348 jdolecek }
3908 1.348 jdolecek }
3909 1.348 jdolecek
3910 1.349 jdolecek *data = dkwhole;
3911 1.348 jdolecek
3912 1.348 jdolecek return 0;
3913 1.348 jdolecek }
3914 1.348 jdolecek
3915 1.385 riastrad /*
3916 1.252 oster * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3917 1.252 oster * We end up returning whatever error was returned by the first cache flush
3918 1.252 oster * that fails.
3919 1.252 oster */
3920 1.252 oster
3921 1.386 christos static int
3922 1.390 christos rf_sync_component_cache(RF_Raid_t *raidPtr, int c, int force)
3923 1.386 christos {
3924 1.386 christos int e = 0;
3925 1.386 christos for (int i = 0; i < 5; i++) {
3926 1.386 christos e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3927 1.386 christos &force, FWRITE, NOCRED);
3928 1.386 christos if (!e || e == ENODEV)
3929 1.386 christos return e;
3930 1.386 christos printf("raid%d: cache flush[%d] to component %s failed (%d)\n",
3931 1.386 christos raidPtr->raidid, i, raidPtr->Disks[c].devname, e);
3932 1.386 christos }
3933 1.387 christos return e;
3934 1.386 christos }
3935 1.386 christos
3936 1.269 jld int
3937 1.390 christos rf_sync_component_caches(RF_Raid_t *raidPtr, int force)
3938 1.252 oster {
3939 1.386 christos int c, error;
3940 1.385 riastrad
3941 1.252 oster error = 0;
3942 1.252 oster for (c = 0; c < raidPtr->numCol; c++) {
3943 1.252 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
3944 1.390 christos int e = rf_sync_component_cache(raidPtr, c, force);
3945 1.387 christos if (e && !error)
3946 1.386 christos error = e;
3947 1.252 oster }
3948 1.252 oster }
3949 1.252 oster
3950 1.386 christos for (c = 0; c < raidPtr->numSpare ; c++) {
3951 1.386 christos int sparecol = raidPtr->numCol + c;
3952 1.252 oster /* Need to ensure that the reconstruct actually completed! */
3953 1.252 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3954 1.390 christos int e = rf_sync_component_cache(raidPtr, sparecol,
3955 1.390 christos force);
3956 1.387 christos if (e && !error)
3957 1.386 christos error = e;
3958 1.252 oster }
3959 1.252 oster }
3960 1.252 oster return error;
3961 1.252 oster }
3962 1.327 pgoyette
3963 1.353 mrg /* Fill in info with the current status */
3964 1.353 mrg void
3965 1.353 mrg rf_check_recon_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
3966 1.353 mrg {
3967 1.353 mrg
3968 1.401 riastrad memset(info, 0, sizeof(*info));
3969 1.401 riastrad
3970 1.353 mrg if (raidPtr->status != rf_rs_reconstructing) {
3971 1.353 mrg info->total = 100;
3972 1.353 mrg info->completed = 100;
3973 1.353 mrg } else {
3974 1.353 mrg info->total = raidPtr->reconControl->numRUsTotal;
3975 1.353 mrg info->completed = raidPtr->reconControl->numRUsComplete;
3976 1.353 mrg }
3977 1.353 mrg info->remaining = info->total - info->completed;
3978 1.353 mrg }
3979 1.353 mrg
3980 1.353 mrg /* Fill in info with the current status */
3981 1.353 mrg void
3982 1.353 mrg rf_check_parityrewrite_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
3983 1.353 mrg {
3984 1.353 mrg
3985 1.401 riastrad memset(info, 0, sizeof(*info));
3986 1.401 riastrad
3987 1.353 mrg if (raidPtr->parity_rewrite_in_progress == 1) {
3988 1.353 mrg info->total = raidPtr->Layout.numStripe;
3989 1.353 mrg info->completed = raidPtr->parity_rewrite_stripes_done;
3990 1.353 mrg } else {
3991 1.353 mrg info->completed = 100;
3992 1.353 mrg info->total = 100;
3993 1.353 mrg }
3994 1.353 mrg info->remaining = info->total - info->completed;
3995 1.353 mrg }
3996 1.353 mrg
3997 1.353 mrg /* Fill in info with the current status */
3998 1.353 mrg void
3999 1.353 mrg rf_check_copyback_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
4000 1.353 mrg {
4001 1.353 mrg
4002 1.401 riastrad memset(info, 0, sizeof(*info));
4003 1.401 riastrad
4004 1.353 mrg if (raidPtr->copyback_in_progress == 1) {
4005 1.353 mrg info->total = raidPtr->Layout.numStripe;
4006 1.353 mrg info->completed = raidPtr->copyback_stripes_done;
4007 1.353 mrg info->remaining = info->total - info->completed;
4008 1.353 mrg } else {
4009 1.353 mrg info->remaining = 0;
4010 1.353 mrg info->completed = 100;
4011 1.353 mrg info->total = 100;
4012 1.353 mrg }
4013 1.353 mrg }
4014 1.353 mrg
4015 1.353 mrg /* Fill in config with the current info */
4016 1.353 mrg int
4017 1.353 mrg rf_get_info(RF_Raid_t *raidPtr, RF_DeviceConfig_t *config)
4018 1.353 mrg {
4019 1.353 mrg int d, i, j;
4020 1.353 mrg
4021 1.353 mrg if (!raidPtr->valid)
4022 1.389 skrll return ENODEV;
4023 1.353 mrg config->cols = raidPtr->numCol;
4024 1.353 mrg config->ndevs = raidPtr->numCol;
4025 1.353 mrg if (config->ndevs >= RF_MAX_DISKS)
4026 1.389 skrll return ENOMEM;
4027 1.353 mrg config->nspares = raidPtr->numSpare;
4028 1.353 mrg if (config->nspares >= RF_MAX_DISKS)
4029 1.389 skrll return ENOMEM;
4030 1.353 mrg config->maxqdepth = raidPtr->maxQueueDepth;
4031 1.353 mrg d = 0;
4032 1.353 mrg for (j = 0; j < config->cols; j++) {
4033 1.353 mrg config->devs[d] = raidPtr->Disks[j];
4034 1.353 mrg d++;
4035 1.353 mrg }
4036 1.353 mrg for (j = config->cols, i = 0; i < config->nspares; i++, j++) {
4037 1.353 mrg config->spares[i] = raidPtr->Disks[j];
4038 1.353 mrg if (config->spares[i].status == rf_ds_rebuilding_spare) {
4039 1.353 mrg /* XXX: raidctl(8) expects to see this as a used spare */
4040 1.353 mrg config->spares[i].status = rf_ds_used_spare;
4041 1.353 mrg }
4042 1.353 mrg }
4043 1.353 mrg return 0;
4044 1.353 mrg }
4045 1.353 mrg
4046 1.353 mrg int
4047 1.353 mrg rf_get_component_label(RF_Raid_t *raidPtr, void *data)
4048 1.353 mrg {
4049 1.353 mrg RF_ComponentLabel_t *clabel = (RF_ComponentLabel_t *)data;
4050 1.353 mrg RF_ComponentLabel_t *raid_clabel;
4051 1.353 mrg int column = clabel->column;
4052 1.353 mrg
4053 1.353 mrg if ((column < 0) || (column >= raidPtr->numCol + raidPtr->numSpare))
4054 1.353 mrg return EINVAL;
4055 1.353 mrg raid_clabel = raidget_component_label(raidPtr, column);
4056 1.353 mrg memcpy(clabel, raid_clabel, sizeof *clabel);
4057 1.394 mrg /* Fix-up for userland. */
4058 1.394 mrg if (clabel->version == bswap32(RF_COMPONENT_LABEL_VERSION))
4059 1.394 mrg clabel->version = RF_COMPONENT_LABEL_VERSION;
4060 1.353 mrg
4061 1.353 mrg return 0;
4062 1.353 mrg }
4063 1.353 mrg
4064 1.327 pgoyette /*
4065 1.327 pgoyette * Module interface
4066 1.327 pgoyette */
4067 1.327 pgoyette
4068 1.356 pgoyette MODULE(MODULE_CLASS_DRIVER, raid, "dk_subr,bufq_fcfs");
4069 1.327 pgoyette
4070 1.327 pgoyette #ifdef _MODULE
4071 1.327 pgoyette CFDRIVER_DECL(raid, DV_DISK, NULL);
4072 1.327 pgoyette #endif
4073 1.327 pgoyette
4074 1.327 pgoyette static int raid_modcmd(modcmd_t, void *);
4075 1.327 pgoyette static int raid_modcmd_init(void);
4076 1.327 pgoyette static int raid_modcmd_fini(void);
4077 1.327 pgoyette
4078 1.327 pgoyette static int
4079 1.327 pgoyette raid_modcmd(modcmd_t cmd, void *data)
4080 1.327 pgoyette {
4081 1.327 pgoyette int error;
4082 1.327 pgoyette
4083 1.327 pgoyette error = 0;
4084 1.327 pgoyette switch (cmd) {
4085 1.327 pgoyette case MODULE_CMD_INIT:
4086 1.327 pgoyette error = raid_modcmd_init();
4087 1.327 pgoyette break;
4088 1.327 pgoyette case MODULE_CMD_FINI:
4089 1.327 pgoyette error = raid_modcmd_fini();
4090 1.327 pgoyette break;
4091 1.327 pgoyette default:
4092 1.327 pgoyette error = ENOTTY;
4093 1.327 pgoyette break;
4094 1.327 pgoyette }
4095 1.327 pgoyette return error;
4096 1.327 pgoyette }
4097 1.327 pgoyette
4098 1.327 pgoyette static int
4099 1.327 pgoyette raid_modcmd_init(void)
4100 1.327 pgoyette {
4101 1.327 pgoyette int error;
4102 1.327 pgoyette int bmajor, cmajor;
4103 1.327 pgoyette
4104 1.327 pgoyette mutex_init(&raid_lock, MUTEX_DEFAULT, IPL_NONE);
4105 1.327 pgoyette mutex_enter(&raid_lock);
4106 1.327 pgoyette #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
4107 1.327 pgoyette rf_init_mutex2(rf_sparet_wait_mutex, IPL_VM);
4108 1.327 pgoyette rf_init_cond2(rf_sparet_wait_cv, "sparetw");
4109 1.327 pgoyette rf_init_cond2(rf_sparet_resp_cv, "rfgst");
4110 1.327 pgoyette
4111 1.327 pgoyette rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
4112 1.327 pgoyette #endif
4113 1.327 pgoyette
4114 1.327 pgoyette bmajor = cmajor = -1;
4115 1.327 pgoyette error = devsw_attach("raid", &raid_bdevsw, &bmajor,
4116 1.327 pgoyette &raid_cdevsw, &cmajor);
4117 1.327 pgoyette if (error != 0 && error != EEXIST) {
4118 1.327 pgoyette aprint_error("%s: devsw_attach failed %d\n", __func__, error);
4119 1.327 pgoyette mutex_exit(&raid_lock);
4120 1.327 pgoyette return error;
4121 1.327 pgoyette }
4122 1.327 pgoyette #ifdef _MODULE
4123 1.327 pgoyette error = config_cfdriver_attach(&raid_cd);
4124 1.327 pgoyette if (error != 0) {
4125 1.327 pgoyette aprint_error("%s: config_cfdriver_attach failed %d\n",
4126 1.327 pgoyette __func__, error);
4127 1.327 pgoyette devsw_detach(&raid_bdevsw, &raid_cdevsw);
4128 1.327 pgoyette mutex_exit(&raid_lock);
4129 1.327 pgoyette return error;
4130 1.327 pgoyette }
4131 1.327 pgoyette #endif
4132 1.327 pgoyette error = config_cfattach_attach(raid_cd.cd_name, &raid_ca);
4133 1.327 pgoyette if (error != 0) {
4134 1.327 pgoyette aprint_error("%s: config_cfattach_attach failed %d\n",
4135 1.327 pgoyette __func__, error);
4136 1.327 pgoyette #ifdef _MODULE
4137 1.327 pgoyette config_cfdriver_detach(&raid_cd);
4138 1.327 pgoyette #endif
4139 1.327 pgoyette devsw_detach(&raid_bdevsw, &raid_cdevsw);
4140 1.327 pgoyette mutex_exit(&raid_lock);
4141 1.327 pgoyette return error;
4142 1.327 pgoyette }
4143 1.327 pgoyette
4144 1.327 pgoyette raidautoconfigdone = false;
4145 1.327 pgoyette
4146 1.327 pgoyette mutex_exit(&raid_lock);
4147 1.327 pgoyette
4148 1.327 pgoyette if (error == 0) {
4149 1.327 pgoyette if (rf_BootRaidframe(true) == 0)
4150 1.327 pgoyette aprint_verbose("Kernelized RAIDframe activated\n");
4151 1.327 pgoyette else
4152 1.327 pgoyette panic("Serious error activating RAID!!");
4153 1.327 pgoyette }
4154 1.327 pgoyette
4155 1.327 pgoyette /*
4156 1.327 pgoyette * Register a finalizer which will be used to auto-config RAID
4157 1.327 pgoyette * sets once all real hardware devices have been found.
4158 1.327 pgoyette */
4159 1.327 pgoyette error = config_finalize_register(NULL, rf_autoconfig);
4160 1.327 pgoyette if (error != 0) {
4161 1.327 pgoyette aprint_error("WARNING: unable to register RAIDframe "
4162 1.327 pgoyette "finalizer\n");
4163 1.329 pgoyette error = 0;
4164 1.327 pgoyette }
4165 1.327 pgoyette
4166 1.327 pgoyette return error;
4167 1.327 pgoyette }
4168 1.327 pgoyette
4169 1.327 pgoyette static int
4170 1.327 pgoyette raid_modcmd_fini(void)
4171 1.327 pgoyette {
4172 1.327 pgoyette int error;
4173 1.327 pgoyette
4174 1.327 pgoyette mutex_enter(&raid_lock);
4175 1.327 pgoyette
4176 1.327 pgoyette /* Don't allow unload if raid device(s) exist. */
4177 1.327 pgoyette if (!LIST_EMPTY(&raids)) {
4178 1.327 pgoyette mutex_exit(&raid_lock);
4179 1.327 pgoyette return EBUSY;
4180 1.327 pgoyette }
4181 1.327 pgoyette
4182 1.327 pgoyette error = config_cfattach_detach(raid_cd.cd_name, &raid_ca);
4183 1.327 pgoyette if (error != 0) {
4184 1.335 mlelstv aprint_error("%s: cannot detach cfattach\n",__func__);
4185 1.327 pgoyette mutex_exit(&raid_lock);
4186 1.327 pgoyette return error;
4187 1.327 pgoyette }
4188 1.327 pgoyette #ifdef _MODULE
4189 1.327 pgoyette error = config_cfdriver_detach(&raid_cd);
4190 1.327 pgoyette if (error != 0) {
4191 1.335 mlelstv aprint_error("%s: cannot detach cfdriver\n",__func__);
4192 1.327 pgoyette config_cfattach_attach(raid_cd.cd_name, &raid_ca);
4193 1.327 pgoyette mutex_exit(&raid_lock);
4194 1.327 pgoyette return error;
4195 1.327 pgoyette }
4196 1.327 pgoyette #endif
4197 1.404 riastrad devsw_detach(&raid_bdevsw, &raid_cdevsw);
4198 1.327 pgoyette rf_BootRaidframe(false);
4199 1.327 pgoyette #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
4200 1.327 pgoyette rf_destroy_mutex2(rf_sparet_wait_mutex);
4201 1.327 pgoyette rf_destroy_cond2(rf_sparet_wait_cv);
4202 1.327 pgoyette rf_destroy_cond2(rf_sparet_resp_cv);
4203 1.327 pgoyette #endif
4204 1.327 pgoyette mutex_exit(&raid_lock);
4205 1.327 pgoyette mutex_destroy(&raid_lock);
4206 1.327 pgoyette
4207 1.327 pgoyette return error;
4208 1.327 pgoyette }
4209