rf_netbsdkintf.c revision 1.298.2.5 1 1.298.2.3 tls /* $NetBSD: rf_netbsdkintf.c,v 1.298.2.5 2017/12/03 11:37:31 jdolecek Exp $ */
2 1.281 rmind
3 1.1 oster /*-
4 1.295 erh * Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
5 1.1 oster * All rights reserved.
6 1.1 oster *
7 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
8 1.1 oster * by Greg Oster; Jason R. Thorpe.
9 1.1 oster *
10 1.1 oster * Redistribution and use in source and binary forms, with or without
11 1.1 oster * modification, are permitted provided that the following conditions
12 1.1 oster * are met:
13 1.1 oster * 1. Redistributions of source code must retain the above copyright
14 1.1 oster * notice, this list of conditions and the following disclaimer.
15 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 oster * notice, this list of conditions and the following disclaimer in the
17 1.1 oster * documentation and/or other materials provided with the distribution.
18 1.1 oster *
19 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
30 1.1 oster */
31 1.1 oster
32 1.1 oster /*
33 1.281 rmind * Copyright (c) 1988 University of Utah.
34 1.1 oster * Copyright (c) 1990, 1993
35 1.1 oster * The Regents of the University of California. All rights reserved.
36 1.1 oster *
37 1.1 oster * This code is derived from software contributed to Berkeley by
38 1.1 oster * the Systems Programming Group of the University of Utah Computer
39 1.1 oster * Science Department.
40 1.1 oster *
41 1.1 oster * Redistribution and use in source and binary forms, with or without
42 1.1 oster * modification, are permitted provided that the following conditions
43 1.1 oster * are met:
44 1.1 oster * 1. Redistributions of source code must retain the above copyright
45 1.1 oster * notice, this list of conditions and the following disclaimer.
46 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
47 1.1 oster * notice, this list of conditions and the following disclaimer in the
48 1.1 oster * documentation and/or other materials provided with the distribution.
49 1.162 agc * 3. Neither the name of the University nor the names of its contributors
50 1.162 agc * may be used to endorse or promote products derived from this software
51 1.162 agc * without specific prior written permission.
52 1.162 agc *
53 1.162 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.162 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.162 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.162 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.162 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.162 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.162 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.162 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.162 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.162 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.162 agc * SUCH DAMAGE.
64 1.162 agc *
65 1.162 agc * from: Utah $Hdr: cd.c 1.6 90/11/28$
66 1.162 agc *
67 1.162 agc * @(#)cd.c 8.2 (Berkeley) 11/16/93
68 1.162 agc */
69 1.162 agc
70 1.162 agc /*
71 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
72 1.1 oster * All rights reserved.
73 1.1 oster *
74 1.1 oster * Authors: Mark Holland, Jim Zelenka
75 1.1 oster *
76 1.1 oster * Permission to use, copy, modify and distribute this software and
77 1.1 oster * its documentation is hereby granted, provided that both the copyright
78 1.1 oster * notice and this permission notice appear in all copies of the
79 1.1 oster * software, derivative works or modified versions, and any portions
80 1.1 oster * thereof, and that both notices appear in supporting documentation.
81 1.1 oster *
82 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
83 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
84 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
85 1.1 oster *
86 1.1 oster * Carnegie Mellon requests users of this software to return to
87 1.1 oster *
88 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
89 1.1 oster * School of Computer Science
90 1.1 oster * Carnegie Mellon University
91 1.1 oster * Pittsburgh PA 15213-3890
92 1.1 oster *
93 1.1 oster * any improvements or extensions that they make and grant Carnegie the
94 1.1 oster * rights to redistribute these changes.
95 1.1 oster */
96 1.1 oster
97 1.1 oster /***********************************************************
98 1.1 oster *
99 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
100 1.1 oster *
101 1.1 oster ***********************************************************/
102 1.112 lukem
103 1.112 lukem #include <sys/cdefs.h>
104 1.298.2.3 tls __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.298.2.5 2017/12/03 11:37:31 jdolecek Exp $");
105 1.251 ad
106 1.251 ad #ifdef _KERNEL_OPT
107 1.254 christos #include "opt_compat_netbsd.h"
108 1.251 ad #include "opt_raid_autoconfig.h"
109 1.251 ad #endif
110 1.1 oster
111 1.113 lukem #include <sys/param.h>
112 1.1 oster #include <sys/errno.h>
113 1.1 oster #include <sys/pool.h>
114 1.152 thorpej #include <sys/proc.h>
115 1.1 oster #include <sys/queue.h>
116 1.1 oster #include <sys/disk.h>
117 1.1 oster #include <sys/device.h>
118 1.1 oster #include <sys/stat.h>
119 1.1 oster #include <sys/ioctl.h>
120 1.1 oster #include <sys/fcntl.h>
121 1.1 oster #include <sys/systm.h>
122 1.1 oster #include <sys/vnode.h>
123 1.1 oster #include <sys/disklabel.h>
124 1.1 oster #include <sys/conf.h>
125 1.1 oster #include <sys/buf.h>
126 1.182 yamt #include <sys/bufq.h>
127 1.65 oster #include <sys/reboot.h>
128 1.208 elad #include <sys/kauth.h>
129 1.298.2.5 jdolecek #include <sys/module.h>
130 1.8 oster
131 1.234 oster #include <prop/proplib.h>
132 1.234 oster
133 1.110 oster #include <dev/raidframe/raidframevar.h>
134 1.110 oster #include <dev/raidframe/raidframeio.h>
135 1.269 jld #include <dev/raidframe/rf_paritymap.h>
136 1.251 ad
137 1.1 oster #include "rf_raid.h"
138 1.44 oster #include "rf_copyback.h"
139 1.1 oster #include "rf_dag.h"
140 1.1 oster #include "rf_dagflags.h"
141 1.99 oster #include "rf_desc.h"
142 1.1 oster #include "rf_diskqueue.h"
143 1.1 oster #include "rf_etimer.h"
144 1.1 oster #include "rf_general.h"
145 1.1 oster #include "rf_kintf.h"
146 1.1 oster #include "rf_options.h"
147 1.1 oster #include "rf_driver.h"
148 1.1 oster #include "rf_parityscan.h"
149 1.1 oster #include "rf_threadstuff.h"
150 1.1 oster
151 1.254 christos #ifdef COMPAT_50
152 1.254 christos #include "rf_compat50.h"
153 1.254 christos #endif
154 1.254 christos
155 1.298.2.5 jdolecek #include "ioconf.h"
156 1.298.2.5 jdolecek
157 1.133 oster #ifdef DEBUG
158 1.9 oster int rf_kdebug_level = 0;
159 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
160 1.9 oster #else /* DEBUG */
161 1.1 oster #define db1_printf(a) { }
162 1.9 oster #endif /* DEBUG */
163 1.1 oster
164 1.298.2.5 jdolecek #ifdef DEBUG_ROOT
165 1.298.2.5 jdolecek #define DPRINTF(a, ...) printf(a, __VA_ARGS__)
166 1.298.2.5 jdolecek #else
167 1.298.2.5 jdolecek #define DPRINTF(a, ...)
168 1.298.2.5 jdolecek #endif
169 1.298.2.5 jdolecek
170 1.249 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
171 1.289 mrg static rf_declare_mutex2(rf_sparet_wait_mutex);
172 1.287 mrg static rf_declare_cond2(rf_sparet_wait_cv);
173 1.287 mrg static rf_declare_cond2(rf_sparet_resp_cv);
174 1.1 oster
175 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
176 1.10 oster * spare table */
177 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
178 1.10 oster * installation process */
179 1.249 oster #endif
180 1.153 thorpej
181 1.153 thorpej MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
182 1.10 oster
183 1.1 oster /* prototypes */
184 1.187 christos static void KernelWakeupFunc(struct buf *);
185 1.187 christos static void InitBP(struct buf *, struct vnode *, unsigned,
186 1.225 christos dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
187 1.187 christos void *, int, struct proc *);
188 1.298.2.3 tls struct raid_softc;
189 1.298.2.3 tls static void raidinit(struct raid_softc *);
190 1.298.2.5 jdolecek static int raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp);
191 1.298.2.5 jdolecek static int rf_get_component_caches(RF_Raid_t *raidPtr, int *);
192 1.1 oster
193 1.261 dyoung static int raid_match(device_t, cfdata_t, void *);
194 1.261 dyoung static void raid_attach(device_t, device_t, void *);
195 1.261 dyoung static int raid_detach(device_t, int);
196 1.130 gehenna
197 1.269 jld static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
198 1.269 jld daddr_t, daddr_t);
199 1.269 jld static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
200 1.269 jld daddr_t, daddr_t, int);
201 1.269 jld
202 1.276 mrg static int raidwrite_component_label(unsigned,
203 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
204 1.276 mrg static int raidread_component_label(unsigned,
205 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
206 1.269 jld
207 1.298.2.5 jdolecek static int raid_diskstart(device_t, struct buf *bp);
208 1.298.2.5 jdolecek static int raid_dumpblocks(device_t, void *, daddr_t, int);
209 1.298.2.5 jdolecek static int raid_lastclose(device_t);
210 1.298.2.5 jdolecek
211 1.298.2.5 jdolecek static dev_type_open(raidopen);
212 1.298.2.5 jdolecek static dev_type_close(raidclose);
213 1.298.2.5 jdolecek static dev_type_read(raidread);
214 1.298.2.5 jdolecek static dev_type_write(raidwrite);
215 1.298.2.5 jdolecek static dev_type_ioctl(raidioctl);
216 1.298.2.5 jdolecek static dev_type_strategy(raidstrategy);
217 1.298.2.5 jdolecek static dev_type_dump(raiddump);
218 1.298.2.5 jdolecek static dev_type_size(raidsize);
219 1.130 gehenna
220 1.130 gehenna const struct bdevsw raid_bdevsw = {
221 1.298.2.4 tls .d_open = raidopen,
222 1.298.2.4 tls .d_close = raidclose,
223 1.298.2.4 tls .d_strategy = raidstrategy,
224 1.298.2.4 tls .d_ioctl = raidioctl,
225 1.298.2.4 tls .d_dump = raiddump,
226 1.298.2.4 tls .d_psize = raidsize,
227 1.298.2.4 tls .d_discard = nodiscard,
228 1.298.2.4 tls .d_flag = D_DISK
229 1.130 gehenna };
230 1.130 gehenna
231 1.130 gehenna const struct cdevsw raid_cdevsw = {
232 1.298.2.4 tls .d_open = raidopen,
233 1.298.2.4 tls .d_close = raidclose,
234 1.298.2.4 tls .d_read = raidread,
235 1.298.2.4 tls .d_write = raidwrite,
236 1.298.2.4 tls .d_ioctl = raidioctl,
237 1.298.2.4 tls .d_stop = nostop,
238 1.298.2.4 tls .d_tty = notty,
239 1.298.2.4 tls .d_poll = nopoll,
240 1.298.2.4 tls .d_mmap = nommap,
241 1.298.2.4 tls .d_kqfilter = nokqfilter,
242 1.298.2.4 tls .d_discard = nodiscard,
243 1.298.2.4 tls .d_flag = D_DISK
244 1.130 gehenna };
245 1.1 oster
246 1.298.2.1 tls static void raidminphys(struct buf *);
247 1.298.2.1 tls
248 1.298.2.5 jdolecek static struct dkdriver rf_dkdriver = {
249 1.298.2.5 jdolecek .d_open = raidopen,
250 1.298.2.5 jdolecek .d_close = raidclose,
251 1.298.2.5 jdolecek .d_strategy = raidstrategy,
252 1.298.2.5 jdolecek .d_diskstart = raid_diskstart,
253 1.298.2.5 jdolecek .d_dumpblocks = raid_dumpblocks,
254 1.298.2.5 jdolecek .d_lastclose = raid_lastclose,
255 1.298.2.5 jdolecek .d_minphys = raidminphys
256 1.298.2.5 jdolecek };
257 1.235 oster
258 1.10 oster struct raid_softc {
259 1.298.2.5 jdolecek struct dk_softc sc_dksc;
260 1.298.2.3 tls int sc_unit;
261 1.10 oster int sc_flags; /* flags */
262 1.10 oster int sc_cflags; /* configuration flags */
263 1.298.2.5 jdolecek kmutex_t sc_mutex; /* interlock mutex */
264 1.298.2.5 jdolecek kcondvar_t sc_cv; /* and the condvar */
265 1.212 oster uint64_t sc_size; /* size of the raid device */
266 1.10 oster char sc_xname[20]; /* XXX external name */
267 1.298.2.3 tls RF_Raid_t sc_r;
268 1.298.2.3 tls LIST_ENTRY(raid_softc) sc_link;
269 1.10 oster };
270 1.1 oster /* sc_flags */
271 1.298.2.5 jdolecek #define RAIDF_INITED 0x01 /* unit has been initialized */
272 1.298.2.5 jdolecek #define RAIDF_SHUTDOWN 0x02 /* unit is being shutdown */
273 1.298.2.5 jdolecek #define RAIDF_DETACH 0x04 /* detach after final close */
274 1.298.2.5 jdolecek #define RAIDF_WANTED 0x08 /* someone waiting to obtain a lock */
275 1.298.2.5 jdolecek #define RAIDF_LOCKED 0x10 /* unit is locked */
276 1.298.2.5 jdolecek #define RAIDF_UNIT_CHANGED 0x20 /* unit is being changed */
277 1.1 oster
278 1.1 oster #define raidunit(x) DISKUNIT(x)
279 1.298.2.5 jdolecek #define raidsoftc(dev) (((struct raid_softc *)device_private(dev))->sc_r.softc)
280 1.1 oster
281 1.202 oster extern struct cfdriver raid_cd;
282 1.266 dyoung CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
283 1.266 dyoung raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
284 1.266 dyoung DVF_DETACH_SHUTDOWN);
285 1.202 oster
286 1.186 perry /*
287 1.186 perry * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
288 1.186 perry * Be aware that large numbers can allow the driver to consume a lot of
289 1.28 oster * kernel memory, especially on writes, and in degraded mode reads.
290 1.186 perry *
291 1.186 perry * For example: with a stripe width of 64 blocks (32k) and 5 disks,
292 1.186 perry * a single 64K write will typically require 64K for the old data,
293 1.186 perry * 64K for the old parity, and 64K for the new parity, for a total
294 1.28 oster * of 192K (if the parity buffer is not re-used immediately).
295 1.110 oster * Even it if is used immediately, that's still 128K, which when multiplied
296 1.28 oster * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
297 1.186 perry *
298 1.28 oster * Now in degraded mode, for example, a 64K read on the above setup may
299 1.186 perry * require data reconstruction, which will require *all* of the 4 remaining
300 1.28 oster * disks to participate -- 4 * 32K/disk == 128K again.
301 1.20 oster */
302 1.20 oster
303 1.20 oster #ifndef RAIDOUTSTANDING
304 1.28 oster #define RAIDOUTSTANDING 6
305 1.20 oster #endif
306 1.20 oster
307 1.1 oster #define RAIDLABELDEV(dev) \
308 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
309 1.1 oster
310 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
311 1.9 oster
312 1.104 oster static int raidlock(struct raid_softc *);
313 1.104 oster static void raidunlock(struct raid_softc *);
314 1.1 oster
315 1.266 dyoung static int raid_detach_unlocked(struct raid_softc *);
316 1.266 dyoung
317 1.104 oster static void rf_markalldirty(RF_Raid_t *);
318 1.298.2.3 tls static void rf_set_geometry(struct raid_softc *, RF_Raid_t *);
319 1.48 oster
320 1.104 oster void rf_ReconThread(struct rf_recon_req *);
321 1.104 oster void rf_RewriteParityThread(RF_Raid_t *raidPtr);
322 1.104 oster void rf_CopybackThread(RF_Raid_t *raidPtr);
323 1.104 oster void rf_ReconstructInPlaceThread(struct rf_recon_req *);
324 1.261 dyoung int rf_autoconfig(device_t);
325 1.142 thorpej void rf_buildroothack(RF_ConfigSet_t *);
326 1.104 oster
327 1.104 oster RF_AutoConfig_t *rf_find_raid_components(void);
328 1.104 oster RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
329 1.104 oster static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
330 1.292 oster int rf_reasonable_label(RF_ComponentLabel_t *, uint64_t);
331 1.104 oster void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
332 1.104 oster int rf_set_autoconfig(RF_Raid_t *, int);
333 1.104 oster int rf_set_rootpartition(RF_Raid_t *, int);
334 1.104 oster void rf_release_all_vps(RF_ConfigSet_t *);
335 1.104 oster void rf_cleanup_config_set(RF_ConfigSet_t *);
336 1.104 oster int rf_have_enough_components(RF_ConfigSet_t *);
337 1.298.2.3 tls struct raid_softc *rf_auto_config_set(RF_ConfigSet_t *);
338 1.278 mrg static void rf_fix_old_label_size(RF_ComponentLabel_t *, uint64_t);
339 1.48 oster
340 1.295 erh /*
341 1.295 erh * Debugging, mostly. Set to 0 to not allow autoconfig to take place.
342 1.295 erh * Note that this is overridden by having RAID_AUTOCONFIG as an option
343 1.295 erh * in the kernel config file.
344 1.295 erh */
345 1.295 erh #ifdef RAID_AUTOCONFIG
346 1.295 erh int raidautoconfig = 1;
347 1.295 erh #else
348 1.295 erh int raidautoconfig = 0;
349 1.295 erh #endif
350 1.295 erh static bool raidautoconfigdone = false;
351 1.37 oster
352 1.177 oster struct RF_Pools_s rf_pools;
353 1.177 oster
354 1.298.2.3 tls static LIST_HEAD(, raid_softc) raids = LIST_HEAD_INITIALIZER(raids);
355 1.298.2.3 tls static kmutex_t raid_lock;
356 1.1 oster
357 1.298.2.3 tls static struct raid_softc *
358 1.298.2.3 tls raidcreate(int unit) {
359 1.298.2.3 tls struct raid_softc *sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
360 1.298.2.3 tls sc->sc_unit = unit;
361 1.298.2.5 jdolecek cv_init(&sc->sc_cv, "raidunit");
362 1.298.2.5 jdolecek mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
363 1.298.2.3 tls return sc;
364 1.298.2.3 tls }
365 1.50 oster
366 1.298.2.3 tls static void
367 1.298.2.3 tls raiddestroy(struct raid_softc *sc) {
368 1.298.2.5 jdolecek cv_destroy(&sc->sc_cv);
369 1.298.2.5 jdolecek mutex_destroy(&sc->sc_mutex);
370 1.298.2.3 tls kmem_free(sc, sizeof(*sc));
371 1.298.2.3 tls }
372 1.1 oster
373 1.298.2.3 tls static struct raid_softc *
374 1.298.2.5 jdolecek raidget(int unit, bool create) {
375 1.298.2.3 tls struct raid_softc *sc;
376 1.298.2.3 tls if (unit < 0) {
377 1.298.2.3 tls #ifdef DIAGNOSTIC
378 1.298.2.3 tls panic("%s: unit %d!", __func__, unit);
379 1.298.2.3 tls #endif
380 1.298.2.3 tls return NULL;
381 1.1 oster }
382 1.298.2.3 tls mutex_enter(&raid_lock);
383 1.298.2.3 tls LIST_FOREACH(sc, &raids, sc_link) {
384 1.298.2.3 tls if (sc->sc_unit == unit) {
385 1.298.2.3 tls mutex_exit(&raid_lock);
386 1.298.2.3 tls return sc;
387 1.298.2.3 tls }
388 1.298.2.3 tls }
389 1.298.2.3 tls mutex_exit(&raid_lock);
390 1.298.2.5 jdolecek if (!create)
391 1.298.2.5 jdolecek return NULL;
392 1.298.2.3 tls if ((sc = raidcreate(unit)) == NULL)
393 1.298.2.3 tls return NULL;
394 1.298.2.3 tls mutex_enter(&raid_lock);
395 1.298.2.3 tls LIST_INSERT_HEAD(&raids, sc, sc_link);
396 1.298.2.3 tls mutex_exit(&raid_lock);
397 1.298.2.3 tls return sc;
398 1.298.2.3 tls }
399 1.298.2.3 tls
400 1.298.2.3 tls static void
401 1.298.2.3 tls raidput(struct raid_softc *sc) {
402 1.298.2.3 tls mutex_enter(&raid_lock);
403 1.298.2.3 tls LIST_REMOVE(sc, sc_link);
404 1.298.2.3 tls mutex_exit(&raid_lock);
405 1.298.2.3 tls raiddestroy(sc);
406 1.298.2.3 tls }
407 1.298.2.3 tls
408 1.298.2.3 tls void
409 1.298.2.3 tls raidattach(int num)
410 1.298.2.3 tls {
411 1.62 oster
412 1.142 thorpej /*
413 1.298.2.5 jdolecek * Device attachment and associated initialization now occurs
414 1.298.2.5 jdolecek * as part of the module initialization.
415 1.142 thorpej */
416 1.142 thorpej }
417 1.142 thorpej
418 1.142 thorpej int
419 1.261 dyoung rf_autoconfig(device_t self)
420 1.142 thorpej {
421 1.142 thorpej RF_AutoConfig_t *ac_list;
422 1.142 thorpej RF_ConfigSet_t *config_sets;
423 1.142 thorpej
424 1.295 erh if (!raidautoconfig || raidautoconfigdone == true)
425 1.142 thorpej return (0);
426 1.142 thorpej
427 1.142 thorpej /* XXX This code can only be run once. */
428 1.295 erh raidautoconfigdone = true;
429 1.142 thorpej
430 1.298.2.4 tls #ifdef __HAVE_CPU_BOOTCONF
431 1.298.2.4 tls /*
432 1.298.2.4 tls * 0. find the boot device if needed first so we can use it later
433 1.298.2.4 tls * this needs to be done before we autoconfigure any raid sets,
434 1.298.2.4 tls * because if we use wedges we are not going to be able to open
435 1.298.2.4 tls * the boot device later
436 1.298.2.4 tls */
437 1.298.2.4 tls if (booted_device == NULL)
438 1.298.2.4 tls cpu_bootconf();
439 1.298.2.4 tls #endif
440 1.48 oster /* 1. locate all RAID components on the system */
441 1.258 ad aprint_debug("Searching for RAID components...\n");
442 1.48 oster ac_list = rf_find_raid_components();
443 1.48 oster
444 1.142 thorpej /* 2. Sort them into their respective sets. */
445 1.48 oster config_sets = rf_create_auto_sets(ac_list);
446 1.48 oster
447 1.142 thorpej /*
448 1.298.2.2 tls * 3. Evaluate each set and configure the valid ones.
449 1.142 thorpej * This gets done in rf_buildroothack().
450 1.142 thorpej */
451 1.142 thorpej rf_buildroothack(config_sets);
452 1.48 oster
453 1.213 christos return 1;
454 1.48 oster }
455 1.48 oster
456 1.298.2.4 tls static int
457 1.298.2.4 tls rf_containsboot(RF_Raid_t *r, device_t bdv) {
458 1.298.2.4 tls const char *bootname = device_xname(bdv);
459 1.298.2.4 tls size_t len = strlen(bootname);
460 1.298.2.4 tls
461 1.298.2.4 tls for (int col = 0; col < r->numCol; col++) {
462 1.298.2.4 tls const char *devname = r->Disks[col].devname;
463 1.298.2.4 tls devname += sizeof("/dev/") - 1;
464 1.298.2.4 tls if (strncmp(devname, "dk", 2) == 0) {
465 1.298.2.4 tls const char *parent =
466 1.298.2.4 tls dkwedge_get_parent_name(r->Disks[col].dev);
467 1.298.2.4 tls if (parent != NULL)
468 1.298.2.4 tls devname = parent;
469 1.298.2.4 tls }
470 1.298.2.4 tls if (strncmp(devname, bootname, len) == 0) {
471 1.298.2.4 tls struct raid_softc *sc = r->softc;
472 1.298.2.4 tls aprint_debug("raid%d includes boot device %s\n",
473 1.298.2.4 tls sc->sc_unit, devname);
474 1.298.2.4 tls return 1;
475 1.298.2.4 tls }
476 1.298.2.4 tls }
477 1.298.2.4 tls return 0;
478 1.298.2.4 tls }
479 1.298.2.4 tls
480 1.48 oster void
481 1.142 thorpej rf_buildroothack(RF_ConfigSet_t *config_sets)
482 1.48 oster {
483 1.48 oster RF_ConfigSet_t *cset;
484 1.48 oster RF_ConfigSet_t *next_cset;
485 1.51 oster int num_root;
486 1.298.2.3 tls struct raid_softc *sc, *rsc;
487 1.298.2.5 jdolecek struct dk_softc *dksc;
488 1.48 oster
489 1.298.2.3 tls sc = rsc = NULL;
490 1.51 oster num_root = 0;
491 1.48 oster cset = config_sets;
492 1.271 dyoung while (cset != NULL) {
493 1.48 oster next_cset = cset->next;
494 1.186 perry if (rf_have_enough_components(cset) &&
495 1.298.2.3 tls cset->ac->clabel->autoconfigure == 1) {
496 1.298.2.3 tls sc = rf_auto_config_set(cset);
497 1.298.2.3 tls if (sc != NULL) {
498 1.298.2.3 tls aprint_debug("raid%d: configured ok\n",
499 1.298.2.3 tls sc->sc_unit);
500 1.51 oster if (cset->rootable) {
501 1.298.2.3 tls rsc = sc;
502 1.51 oster num_root++;
503 1.51 oster }
504 1.51 oster } else {
505 1.51 oster /* The autoconfig didn't work :( */
506 1.298.2.3 tls aprint_debug("Autoconfig failed\n");
507 1.51 oster rf_release_all_vps(cset);
508 1.48 oster }
509 1.48 oster } else {
510 1.186 perry /* we're not autoconfiguring this set...
511 1.48 oster release the associated resources */
512 1.49 oster rf_release_all_vps(cset);
513 1.48 oster }
514 1.48 oster /* cleanup */
515 1.49 oster rf_cleanup_config_set(cset);
516 1.48 oster cset = next_cset;
517 1.48 oster }
518 1.298.2.5 jdolecek dksc = &rsc->sc_dksc;
519 1.122 oster
520 1.223 oster /* if the user has specified what the root device should be
521 1.223 oster then we don't touch booted_device or boothowto... */
522 1.223 oster
523 1.223 oster if (rootspec != NULL)
524 1.223 oster return;
525 1.223 oster
526 1.122 oster /* we found something bootable... */
527 1.122 oster
528 1.298.2.4 tls /*
529 1.298.2.4 tls * XXX: The following code assumes that the root raid
530 1.298.2.4 tls * is the first ('a') partition. This is about the best
531 1.298.2.4 tls * we can do with a BSD disklabel, but we might be able
532 1.298.2.4 tls * to do better with a GPT label, by setting a specified
533 1.298.2.4 tls * attribute to indicate the root partition. We can then
534 1.298.2.4 tls * stash the partition number in the r->root_partition
535 1.298.2.4 tls * high bits (the bottom 2 bits are already used). For
536 1.298.2.4 tls * now we just set booted_partition to 0 when we override
537 1.298.2.4 tls * root.
538 1.298.2.4 tls */
539 1.122 oster if (num_root == 1) {
540 1.298.2.4 tls device_t candidate_root;
541 1.298.2.5 jdolecek if (dksc->sc_dkdev.dk_nwedges != 0) {
542 1.297 christos char cname[sizeof(cset->ac->devname)];
543 1.298.2.5 jdolecek /* XXX: assume partition 'a' first */
544 1.297 christos snprintf(cname, sizeof(cname), "%s%c",
545 1.298.2.5 jdolecek device_xname(dksc->sc_dev), 'a');
546 1.298.2.4 tls candidate_root = dkwedge_find_by_wname(cname);
547 1.298.2.5 jdolecek DPRINTF("%s: candidate wedge root=%s\n", __func__,
548 1.298.2.5 jdolecek cname);
549 1.298.2.5 jdolecek if (candidate_root == NULL) {
550 1.298.2.5 jdolecek /*
551 1.298.2.5 jdolecek * If that is not found, because we don't use
552 1.298.2.5 jdolecek * disklabel, return the first dk child
553 1.298.2.5 jdolecek * XXX: we can skip the 'a' check above
554 1.298.2.5 jdolecek * and always do this...
555 1.298.2.5 jdolecek */
556 1.298.2.5 jdolecek size_t i = 0;
557 1.298.2.5 jdolecek candidate_root = dkwedge_find_by_parent(
558 1.298.2.5 jdolecek device_xname(dksc->sc_dev), &i);
559 1.298.2.5 jdolecek }
560 1.298.2.5 jdolecek DPRINTF("%s: candidate wedge root=%p\n", __func__,
561 1.298.2.5 jdolecek candidate_root);
562 1.297 christos } else
563 1.298.2.5 jdolecek candidate_root = dksc->sc_dev;
564 1.298.2.5 jdolecek DPRINTF("%s: candidate root=%p\n", __func__, candidate_root);
565 1.298.2.5 jdolecek DPRINTF("%s: booted_device=%p root_partition=%d "
566 1.298.2.5 jdolecek "contains_boot=%d\n", __func__, booted_device,
567 1.298.2.5 jdolecek rsc->sc_r.root_partition,
568 1.298.2.5 jdolecek rf_containsboot(&rsc->sc_r, booted_device));
569 1.298.2.4 tls if (booted_device == NULL ||
570 1.298.2.4 tls rsc->sc_r.root_partition == 1 ||
571 1.298.2.4 tls rf_containsboot(&rsc->sc_r, booted_device)) {
572 1.298.2.4 tls booted_device = candidate_root;
573 1.298.2.5 jdolecek booted_method = "raidframe/single";
574 1.298.2.4 tls booted_partition = 0; /* XXX assume 'a' */
575 1.298.2.4 tls }
576 1.122 oster } else if (num_root > 1) {
577 1.298.2.5 jdolecek DPRINTF("%s: many roots=%d, %p\n", __func__, num_root,
578 1.298.2.5 jdolecek booted_device);
579 1.226 oster
580 1.226 oster /*
581 1.226 oster * Maybe the MD code can help. If it cannot, then
582 1.226 oster * setroot() will discover that we have no
583 1.226 oster * booted_device and will ask the user if nothing was
584 1.226 oster * hardwired in the kernel config file
585 1.226 oster */
586 1.226 oster if (booted_device == NULL)
587 1.226 oster return;
588 1.226 oster
589 1.226 oster num_root = 0;
590 1.298.2.3 tls mutex_enter(&raid_lock);
591 1.298.2.3 tls LIST_FOREACH(sc, &raids, sc_link) {
592 1.298.2.3 tls RF_Raid_t *r = &sc->sc_r;
593 1.298.2.3 tls if (r->valid == 0)
594 1.226 oster continue;
595 1.226 oster
596 1.298.2.3 tls if (r->root_partition == 0)
597 1.226 oster continue;
598 1.226 oster
599 1.298.2.4 tls if (rf_containsboot(r, booted_device)) {
600 1.226 oster num_root++;
601 1.298.2.3 tls rsc = sc;
602 1.298.2.5 jdolecek dksc = &rsc->sc_dksc;
603 1.226 oster }
604 1.226 oster }
605 1.298.2.3 tls mutex_exit(&raid_lock);
606 1.295 erh
607 1.226 oster if (num_root == 1) {
608 1.298.2.5 jdolecek booted_device = dksc->sc_dev;
609 1.298.2.5 jdolecek booted_method = "raidframe/multi";
610 1.298.2.4 tls booted_partition = 0; /* XXX assume 'a' */
611 1.226 oster } else {
612 1.226 oster /* we can't guess.. require the user to answer... */
613 1.226 oster boothowto |= RB_ASKNAME;
614 1.226 oster }
615 1.51 oster }
616 1.1 oster }
617 1.1 oster
618 1.298.2.5 jdolecek static int
619 1.169 oster raidsize(dev_t dev)
620 1.1 oster {
621 1.1 oster struct raid_softc *rs;
622 1.298.2.5 jdolecek struct dk_softc *dksc;
623 1.298.2.5 jdolecek unsigned int unit;
624 1.1 oster
625 1.1 oster unit = raidunit(dev);
626 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
627 1.298.2.3 tls return -1;
628 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
629 1.298.2.5 jdolecek
630 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
631 1.298.2.5 jdolecek return -1;
632 1.1 oster
633 1.298.2.5 jdolecek return dk_size(dksc, dev);
634 1.298.2.5 jdolecek }
635 1.1 oster
636 1.298.2.5 jdolecek static int
637 1.298.2.5 jdolecek raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
638 1.298.2.5 jdolecek {
639 1.298.2.5 jdolecek unsigned int unit;
640 1.298.2.5 jdolecek struct raid_softc *rs;
641 1.298.2.5 jdolecek struct dk_softc *dksc;
642 1.1 oster
643 1.298.2.5 jdolecek unit = raidunit(dev);
644 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
645 1.298.2.5 jdolecek return ENXIO;
646 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
647 1.1 oster
648 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_INITED) == 0)
649 1.298.2.5 jdolecek return ENODEV;
650 1.1 oster
651 1.298.2.5 jdolecek /*
652 1.298.2.5 jdolecek Note that blkno is relative to this particular partition.
653 1.298.2.5 jdolecek By adding adding RF_PROTECTED_SECTORS, we get a value that
654 1.298.2.5 jdolecek is relative to the partition used for the underlying component.
655 1.298.2.5 jdolecek */
656 1.298.2.5 jdolecek blkno += RF_PROTECTED_SECTORS;
657 1.1 oster
658 1.298.2.5 jdolecek return dk_dump(dksc, dev, blkno, va, size);
659 1.1 oster }
660 1.1 oster
661 1.298.2.5 jdolecek static int
662 1.298.2.5 jdolecek raid_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
663 1.1 oster {
664 1.298.2.5 jdolecek struct raid_softc *rs = raidsoftc(dev);
665 1.231 oster const struct bdevsw *bdev;
666 1.231 oster RF_Raid_t *raidPtr;
667 1.298.2.5 jdolecek int c, sparecol, j, scol, dumpto;
668 1.231 oster int error = 0;
669 1.231 oster
670 1.298.2.3 tls raidPtr = &rs->sc_r;
671 1.231 oster
672 1.231 oster /* we only support dumping to RAID 1 sets */
673 1.231 oster if (raidPtr->Layout.numDataCol != 1 ||
674 1.231 oster raidPtr->Layout.numParityCol != 1)
675 1.231 oster return EINVAL;
676 1.231 oster
677 1.231 oster if ((error = raidlock(rs)) != 0)
678 1.231 oster return error;
679 1.231 oster
680 1.231 oster /* figure out what device is alive.. */
681 1.231 oster
682 1.231 oster /*
683 1.231 oster Look for a component to dump to. The preference for the
684 1.231 oster component to dump to is as follows:
685 1.231 oster 1) the master
686 1.231 oster 2) a used_spare of the master
687 1.231 oster 3) the slave
688 1.231 oster 4) a used_spare of the slave
689 1.231 oster */
690 1.231 oster
691 1.231 oster dumpto = -1;
692 1.231 oster for (c = 0; c < raidPtr->numCol; c++) {
693 1.231 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
694 1.231 oster /* this might be the one */
695 1.231 oster dumpto = c;
696 1.231 oster break;
697 1.231 oster }
698 1.231 oster }
699 1.231 oster
700 1.231 oster /*
701 1.231 oster At this point we have possibly selected a live master or a
702 1.231 oster live slave. We now check to see if there is a spared
703 1.231 oster master (or a spared slave), if we didn't find a live master
704 1.231 oster or a live slave.
705 1.231 oster */
706 1.231 oster
707 1.231 oster for (c = 0; c < raidPtr->numSpare; c++) {
708 1.231 oster sparecol = raidPtr->numCol + c;
709 1.231 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
710 1.231 oster /* How about this one? */
711 1.231 oster scol = -1;
712 1.231 oster for(j=0;j<raidPtr->numCol;j++) {
713 1.231 oster if (raidPtr->Disks[j].spareCol == sparecol) {
714 1.231 oster scol = j;
715 1.231 oster break;
716 1.231 oster }
717 1.231 oster }
718 1.231 oster if (scol == 0) {
719 1.231 oster /*
720 1.231 oster We must have found a spared master!
721 1.231 oster We'll take that over anything else
722 1.231 oster found so far. (We couldn't have
723 1.231 oster found a real master before, since
724 1.231 oster this is a used spare, and it's
725 1.231 oster saying that it's replacing the
726 1.231 oster master.) On reboot (with
727 1.231 oster autoconfiguration turned on)
728 1.231 oster sparecol will become the 1st
729 1.231 oster component (component0) of this set.
730 1.231 oster */
731 1.231 oster dumpto = sparecol;
732 1.231 oster break;
733 1.231 oster } else if (scol != -1) {
734 1.231 oster /*
735 1.231 oster Must be a spared slave. We'll dump
736 1.231 oster to that if we havn't found anything
737 1.231 oster else so far.
738 1.231 oster */
739 1.231 oster if (dumpto == -1)
740 1.231 oster dumpto = sparecol;
741 1.231 oster }
742 1.231 oster }
743 1.231 oster }
744 1.231 oster
745 1.231 oster if (dumpto == -1) {
746 1.231 oster /* we couldn't find any live components to dump to!?!?
747 1.231 oster */
748 1.231 oster error = EINVAL;
749 1.231 oster goto out;
750 1.231 oster }
751 1.231 oster
752 1.231 oster bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
753 1.298.2.5 jdolecek if (bdev == NULL) {
754 1.298.2.5 jdolecek error = ENXIO;
755 1.298.2.5 jdolecek goto out;
756 1.298.2.5 jdolecek }
757 1.231 oster
758 1.231 oster error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
759 1.298.2.5 jdolecek blkno, va, nblk * raidPtr->bytesPerSector);
760 1.231 oster
761 1.231 oster out:
762 1.231 oster raidunlock(rs);
763 1.231 oster
764 1.231 oster return error;
765 1.1 oster }
766 1.298.2.5 jdolecek
767 1.1 oster /* ARGSUSED */
768 1.298.2.5 jdolecek static int
769 1.222 christos raidopen(dev_t dev, int flags, int fmt,
770 1.222 christos struct lwp *l)
771 1.1 oster {
772 1.9 oster int unit = raidunit(dev);
773 1.1 oster struct raid_softc *rs;
774 1.298.2.5 jdolecek struct dk_softc *dksc;
775 1.9 oster int error = 0;
776 1.298.2.5 jdolecek int part, pmask;
777 1.9 oster
778 1.298.2.5 jdolecek if ((rs = raidget(unit, true)) == NULL)
779 1.298.2.3 tls return ENXIO;
780 1.1 oster if ((error = raidlock(rs)) != 0)
781 1.9 oster return (error);
782 1.266 dyoung
783 1.266 dyoung if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
784 1.266 dyoung error = EBUSY;
785 1.266 dyoung goto bad;
786 1.266 dyoung }
787 1.266 dyoung
788 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
789 1.1 oster
790 1.1 oster part = DISKPART(dev);
791 1.1 oster pmask = (1 << part);
792 1.1 oster
793 1.298.2.5 jdolecek if (!DK_BUSY(dksc, pmask) &&
794 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
795 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
796 1.13 oster have done a configure before this. I DO NOT WANT TO BE
797 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
798 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
799 1.13 oster /* XXX should check to see if we're only open for reading
800 1.13 oster here... If so, we needn't do this, but then need some
801 1.13 oster other way of keeping track of what's happened.. */
802 1.13 oster
803 1.298.2.3 tls rf_markalldirty(&rs->sc_r);
804 1.13 oster }
805 1.13 oster
806 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_INITED) != 0)
807 1.298.2.5 jdolecek error = dk_open(dksc, dev, flags, fmt, l);
808 1.1 oster
809 1.213 christos bad:
810 1.1 oster raidunlock(rs);
811 1.1 oster
812 1.9 oster return (error);
813 1.1 oster
814 1.1 oster
815 1.1 oster }
816 1.298.2.5 jdolecek
817 1.298.2.5 jdolecek static int
818 1.298.2.5 jdolecek raid_lastclose(device_t self)
819 1.298.2.5 jdolecek {
820 1.298.2.5 jdolecek struct raid_softc *rs = raidsoftc(self);
821 1.298.2.5 jdolecek
822 1.298.2.5 jdolecek /* Last one... device is not unconfigured yet.
823 1.298.2.5 jdolecek Device shutdown has taken care of setting the
824 1.298.2.5 jdolecek clean bits if RAIDF_INITED is not set
825 1.298.2.5 jdolecek mark things as clean... */
826 1.298.2.5 jdolecek
827 1.298.2.5 jdolecek rf_update_component_labels(&rs->sc_r,
828 1.298.2.5 jdolecek RF_FINAL_COMPONENT_UPDATE);
829 1.298.2.5 jdolecek
830 1.298.2.5 jdolecek /* pass to unlocked code */
831 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
832 1.298.2.5 jdolecek rs->sc_flags |= RAIDF_DETACH;
833 1.298.2.5 jdolecek
834 1.298.2.5 jdolecek return 0;
835 1.298.2.5 jdolecek }
836 1.298.2.5 jdolecek
837 1.1 oster /* ARGSUSED */
838 1.298.2.5 jdolecek static int
839 1.222 christos raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
840 1.1 oster {
841 1.9 oster int unit = raidunit(dev);
842 1.1 oster struct raid_softc *rs;
843 1.298.2.5 jdolecek struct dk_softc *dksc;
844 1.298.2.5 jdolecek cfdata_t cf;
845 1.298.2.5 jdolecek int error = 0, do_detach = 0, do_put = 0;
846 1.1 oster
847 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
848 1.298.2.3 tls return ENXIO;
849 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
850 1.1 oster
851 1.1 oster if ((error = raidlock(rs)) != 0)
852 1.1 oster return (error);
853 1.1 oster
854 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_INITED) != 0) {
855 1.298.2.5 jdolecek error = dk_close(dksc, dev, flags, fmt, l);
856 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_DETACH) != 0)
857 1.298.2.5 jdolecek do_detach = 1;
858 1.298.2.5 jdolecek } else if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
859 1.298.2.5 jdolecek do_put = 1;
860 1.1 oster
861 1.298.2.5 jdolecek raidunlock(rs);
862 1.1 oster
863 1.298.2.5 jdolecek if (do_detach) {
864 1.298.2.5 jdolecek /* free the pseudo device attach bits */
865 1.298.2.5 jdolecek cf = device_cfdata(dksc->sc_dev);
866 1.298.2.5 jdolecek error = config_detach(dksc->sc_dev, 0);
867 1.298.2.5 jdolecek if (error == 0)
868 1.298.2.5 jdolecek free(cf, M_RAIDFRAME);
869 1.298.2.5 jdolecek } else if (do_put) {
870 1.298.2.5 jdolecek raidput(rs);
871 1.1 oster }
872 1.147 oster
873 1.298.2.5 jdolecek return (error);
874 1.1 oster
875 1.298.2.5 jdolecek }
876 1.1 oster
877 1.298.2.5 jdolecek static void
878 1.298.2.5 jdolecek raid_wakeup(RF_Raid_t *raidPtr)
879 1.298.2.5 jdolecek {
880 1.298.2.5 jdolecek rf_lock_mutex2(raidPtr->iodone_lock);
881 1.298.2.5 jdolecek rf_signal_cond2(raidPtr->iodone_cv);
882 1.298.2.5 jdolecek rf_unlock_mutex2(raidPtr->iodone_lock);
883 1.1 oster }
884 1.1 oster
885 1.298.2.5 jdolecek static void
886 1.169 oster raidstrategy(struct buf *bp)
887 1.1 oster {
888 1.298.2.5 jdolecek unsigned int unit;
889 1.298.2.3 tls struct raid_softc *rs;
890 1.298.2.5 jdolecek struct dk_softc *dksc;
891 1.298.2.5 jdolecek RF_Raid_t *raidPtr;
892 1.1 oster
893 1.298.2.5 jdolecek unit = raidunit(bp->b_dev);
894 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL) {
895 1.30 oster bp->b_error = ENXIO;
896 1.298.2.5 jdolecek goto fail;
897 1.30 oster }
898 1.298.2.3 tls if ((rs->sc_flags & RAIDF_INITED) == 0) {
899 1.298.2.3 tls bp->b_error = ENXIO;
900 1.298.2.5 jdolecek goto fail;
901 1.1 oster }
902 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
903 1.298.2.3 tls raidPtr = &rs->sc_r;
904 1.298.2.5 jdolecek
905 1.298.2.5 jdolecek /* Queue IO only */
906 1.298.2.5 jdolecek if (dk_strategy_defer(dksc, bp))
907 1.196 yamt goto done;
908 1.1 oster
909 1.298.2.5 jdolecek /* schedule the IO to happen at the next convenient time */
910 1.298.2.5 jdolecek raid_wakeup(raidPtr);
911 1.1 oster
912 1.298.2.5 jdolecek done:
913 1.298.2.5 jdolecek return;
914 1.285 mrg
915 1.298.2.5 jdolecek fail:
916 1.298.2.5 jdolecek bp->b_resid = bp->b_bcount;
917 1.298.2.5 jdolecek biodone(bp);
918 1.298.2.5 jdolecek }
919 1.298.2.5 jdolecek
920 1.298.2.5 jdolecek static int
921 1.298.2.5 jdolecek raid_diskstart(device_t dev, struct buf *bp)
922 1.298.2.5 jdolecek {
923 1.298.2.5 jdolecek struct raid_softc *rs = raidsoftc(dev);
924 1.298.2.5 jdolecek RF_Raid_t *raidPtr;
925 1.298.2.5 jdolecek
926 1.298.2.5 jdolecek raidPtr = &rs->sc_r;
927 1.298.2.5 jdolecek if (!raidPtr->valid) {
928 1.298.2.5 jdolecek db1_printf(("raid is not valid..\n"));
929 1.298.2.5 jdolecek return ENODEV;
930 1.298.2.5 jdolecek }
931 1.1 oster
932 1.298.2.5 jdolecek /* XXX */
933 1.1 oster bp->b_resid = 0;
934 1.34 oster
935 1.298.2.5 jdolecek return raiddoaccess(raidPtr, bp);
936 1.298.2.5 jdolecek }
937 1.34 oster
938 1.298.2.5 jdolecek void
939 1.298.2.5 jdolecek raiddone(RF_Raid_t *raidPtr, struct buf *bp)
940 1.298.2.5 jdolecek {
941 1.298.2.5 jdolecek struct raid_softc *rs;
942 1.298.2.5 jdolecek struct dk_softc *dksc;
943 1.34 oster
944 1.298.2.5 jdolecek rs = raidPtr->softc;
945 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
946 1.196 yamt
947 1.298.2.5 jdolecek dk_done(dksc, bp);
948 1.298.2.5 jdolecek
949 1.298.2.5 jdolecek rf_lock_mutex2(raidPtr->mutex);
950 1.298.2.5 jdolecek raidPtr->openings++;
951 1.298.2.5 jdolecek rf_unlock_mutex2(raidPtr->mutex);
952 1.298.2.5 jdolecek
953 1.298.2.5 jdolecek /* schedule more IO */
954 1.298.2.5 jdolecek raid_wakeup(raidPtr);
955 1.1 oster }
956 1.298.2.5 jdolecek
957 1.1 oster /* ARGSUSED */
958 1.298.2.5 jdolecek static int
959 1.222 christos raidread(dev_t dev, struct uio *uio, int flags)
960 1.1 oster {
961 1.9 oster int unit = raidunit(dev);
962 1.1 oster struct raid_softc *rs;
963 1.1 oster
964 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
965 1.298.2.3 tls return ENXIO;
966 1.1 oster
967 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
968 1.1 oster return (ENXIO);
969 1.1 oster
970 1.298.2.1 tls return (physio(raidstrategy, NULL, dev, B_READ, raidminphys, uio));
971 1.1 oster
972 1.1 oster }
973 1.298.2.5 jdolecek
974 1.1 oster /* ARGSUSED */
975 1.298.2.5 jdolecek static int
976 1.222 christos raidwrite(dev_t dev, struct uio *uio, int flags)
977 1.1 oster {
978 1.9 oster int unit = raidunit(dev);
979 1.1 oster struct raid_softc *rs;
980 1.1 oster
981 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
982 1.298.2.3 tls return ENXIO;
983 1.1 oster
984 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
985 1.1 oster return (ENXIO);
986 1.147 oster
987 1.298.2.1 tls return (physio(raidstrategy, NULL, dev, B_WRITE, raidminphys, uio));
988 1.1 oster
989 1.1 oster }
990 1.1 oster
991 1.266 dyoung static int
992 1.266 dyoung raid_detach_unlocked(struct raid_softc *rs)
993 1.266 dyoung {
994 1.298.2.5 jdolecek struct dk_softc *dksc = &rs->sc_dksc;
995 1.266 dyoung RF_Raid_t *raidPtr;
996 1.298.2.5 jdolecek int error;
997 1.266 dyoung
998 1.298.2.3 tls raidPtr = &rs->sc_r;
999 1.266 dyoung
1000 1.298.2.5 jdolecek if (DK_BUSY(dksc, 0) ||
1001 1.298.2.5 jdolecek raidPtr->recon_in_progress != 0 ||
1002 1.298.2.5 jdolecek raidPtr->parity_rewrite_in_progress != 0 ||
1003 1.298.2.5 jdolecek raidPtr->copyback_in_progress != 0)
1004 1.266 dyoung return EBUSY;
1005 1.266 dyoung
1006 1.266 dyoung if ((rs->sc_flags & RAIDF_INITED) == 0)
1007 1.298.2.5 jdolecek return 0;
1008 1.298.2.5 jdolecek
1009 1.298.2.5 jdolecek rs->sc_flags &= ~RAIDF_SHUTDOWN;
1010 1.298.2.5 jdolecek
1011 1.298.2.5 jdolecek if ((error = rf_Shutdown(raidPtr)) != 0)
1012 1.266 dyoung return error;
1013 1.266 dyoung
1014 1.298.2.5 jdolecek rs->sc_flags &= ~RAIDF_INITED;
1015 1.298.2.5 jdolecek
1016 1.298.2.5 jdolecek /* Kill off any queued buffers */
1017 1.298.2.5 jdolecek dk_drain(dksc);
1018 1.298.2.5 jdolecek bufq_free(dksc->sc_bufq);
1019 1.266 dyoung
1020 1.298.2.5 jdolecek /* Detach the disk. */
1021 1.298.2.5 jdolecek dkwedge_delall(&dksc->sc_dkdev);
1022 1.298.2.5 jdolecek disk_detach(&dksc->sc_dkdev);
1023 1.298.2.5 jdolecek disk_destroy(&dksc->sc_dkdev);
1024 1.298.2.5 jdolecek dk_detach(dksc);
1025 1.290 mrg
1026 1.266 dyoung return 0;
1027 1.266 dyoung }
1028 1.266 dyoung
1029 1.298.2.5 jdolecek static int
1030 1.225 christos raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1031 1.1 oster {
1032 1.9 oster int unit = raidunit(dev);
1033 1.9 oster int error = 0;
1034 1.298.2.5 jdolecek int part, pmask;
1035 1.1 oster struct raid_softc *rs;
1036 1.298.2.5 jdolecek struct dk_softc *dksc;
1037 1.1 oster RF_Config_t *k_cfg, *u_cfg;
1038 1.42 oster RF_Raid_t *raidPtr;
1039 1.48 oster RF_RaidDisk_t *diskPtr;
1040 1.41 oster RF_AccTotals_t *totals;
1041 1.41 oster RF_DeviceConfig_t *d_cfg, **ucfgp;
1042 1.1 oster u_char *specific_buf;
1043 1.11 oster int retcode = 0;
1044 1.11 oster int column;
1045 1.269 jld /* int raidid; */
1046 1.1 oster struct rf_recon_req *rrcopy, *rr;
1047 1.48 oster RF_ComponentLabel_t *clabel;
1048 1.209 oster RF_ComponentLabel_t *ci_label;
1049 1.48 oster RF_ComponentLabel_t **clabel_ptr;
1050 1.12 oster RF_SingleComponent_t *sparePtr,*componentPtr;
1051 1.12 oster RF_SingleComponent_t component;
1052 1.83 oster RF_ProgressInfo_t progressInfo, **progressInfoPtr;
1053 1.41 oster int i, j, d;
1054 1.1 oster
1055 1.298.2.5 jdolecek if ((rs = raidget(unit, false)) == NULL)
1056 1.298.2.3 tls return ENXIO;
1057 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
1058 1.298.2.3 tls raidPtr = &rs->sc_r;
1059 1.1 oster
1060 1.276 mrg db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
1061 1.276 mrg (int) DISKPART(dev), (int) unit, cmd));
1062 1.1 oster
1063 1.1 oster /* Must be initialized for these... */
1064 1.1 oster switch (cmd) {
1065 1.1 oster case RAIDFRAME_REWRITEPARITY:
1066 1.1 oster case RAIDFRAME_GET_INFO:
1067 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1068 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1069 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1070 1.1 oster case RAIDFRAME_GET_SIZE:
1071 1.1 oster case RAIDFRAME_FAIL_DISK:
1072 1.1 oster case RAIDFRAME_COPYBACK:
1073 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1074 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1075 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1076 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1077 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1078 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1079 1.11 oster case RAIDFRAME_INIT_LABELS:
1080 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1081 1.23 oster case RAIDFRAME_CHECK_PARITY:
1082 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1083 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1084 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1085 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1086 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1087 1.48 oster case RAIDFRAME_SET_ROOT:
1088 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1089 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1090 1.269 jld case RAIDFRAME_PARITYMAP_STATUS:
1091 1.269 jld case RAIDFRAME_PARITYMAP_GET_DISABLE:
1092 1.269 jld case RAIDFRAME_PARITYMAP_SET_DISABLE:
1093 1.269 jld case RAIDFRAME_PARITYMAP_SET_PARAMS:
1094 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
1095 1.1 oster return (ENXIO);
1096 1.1 oster }
1097 1.9 oster
1098 1.1 oster switch (cmd) {
1099 1.254 christos #ifdef COMPAT_50
1100 1.254 christos case RAIDFRAME_GET_INFO50:
1101 1.254 christos return rf_get_info50(raidPtr, data);
1102 1.254 christos
1103 1.254 christos case RAIDFRAME_CONFIGURE50:
1104 1.254 christos if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1105 1.254 christos return retcode;
1106 1.254 christos goto config;
1107 1.254 christos #endif
1108 1.1 oster /* configure the system */
1109 1.1 oster case RAIDFRAME_CONFIGURE:
1110 1.48 oster
1111 1.48 oster if (raidPtr->valid) {
1112 1.48 oster /* There is a valid RAID set running on this unit! */
1113 1.48 oster printf("raid%d: Device already configured!\n",unit);
1114 1.66 oster return(EINVAL);
1115 1.48 oster }
1116 1.48 oster
1117 1.1 oster /* copy-in the configuration information */
1118 1.1 oster /* data points to a pointer to the configuration structure */
1119 1.43 oster
1120 1.9 oster u_cfg = *((RF_Config_t **) data);
1121 1.9 oster RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1122 1.1 oster if (k_cfg == NULL) {
1123 1.9 oster return (ENOMEM);
1124 1.1 oster }
1125 1.156 dsl retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1126 1.1 oster if (retcode) {
1127 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1128 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1129 1.9 oster retcode));
1130 1.298.2.5 jdolecek goto no_config;
1131 1.1 oster }
1132 1.254 christos goto config;
1133 1.254 christos config:
1134 1.298.2.5 jdolecek rs->sc_flags &= ~RAIDF_SHUTDOWN;
1135 1.298.2.5 jdolecek
1136 1.9 oster /* allocate a buffer for the layout-specific data, and copy it
1137 1.9 oster * in */
1138 1.1 oster if (k_cfg->layoutSpecificSize) {
1139 1.9 oster if (k_cfg->layoutSpecificSize > 10000) {
1140 1.1 oster /* sanity check */
1141 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1142 1.298.2.5 jdolecek retcode = EINVAL;
1143 1.298.2.5 jdolecek goto no_config;
1144 1.1 oster }
1145 1.9 oster RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1146 1.9 oster (u_char *));
1147 1.1 oster if (specific_buf == NULL) {
1148 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1149 1.298.2.5 jdolecek retcode = ENOMEM;
1150 1.298.2.5 jdolecek goto no_config;
1151 1.1 oster }
1152 1.156 dsl retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1153 1.9 oster k_cfg->layoutSpecificSize);
1154 1.1 oster if (retcode) {
1155 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1156 1.186 perry RF_Free(specific_buf,
1157 1.42 oster k_cfg->layoutSpecificSize);
1158 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1159 1.9 oster retcode));
1160 1.298.2.5 jdolecek goto no_config;
1161 1.1 oster }
1162 1.9 oster } else
1163 1.9 oster specific_buf = NULL;
1164 1.1 oster k_cfg->layoutSpecific = specific_buf;
1165 1.9 oster
1166 1.9 oster /* should do some kind of sanity check on the configuration.
1167 1.9 oster * Store the sum of all the bytes in the last byte? */
1168 1.1 oster
1169 1.1 oster /* configure the system */
1170 1.1 oster
1171 1.48 oster /*
1172 1.48 oster * Clear the entire RAID descriptor, just to make sure
1173 1.186 perry * there is no stale data left in the case of a
1174 1.186 perry * reconfiguration
1175 1.48 oster */
1176 1.277 christos memset(raidPtr, 0, sizeof(*raidPtr));
1177 1.298.2.3 tls raidPtr->softc = rs;
1178 1.42 oster raidPtr->raidid = unit;
1179 1.20 oster
1180 1.48 oster retcode = rf_Configure(raidPtr, k_cfg, NULL);
1181 1.1 oster
1182 1.40 oster if (retcode == 0) {
1183 1.37 oster
1184 1.186 perry /* allow this many simultaneous IO's to
1185 1.40 oster this RAID device */
1186 1.42 oster raidPtr->openings = RAIDOUTSTANDING;
1187 1.186 perry
1188 1.298.2.3 tls raidinit(rs);
1189 1.298.2.5 jdolecek raid_wakeup(raidPtr);
1190 1.59 oster rf_markalldirty(raidPtr);
1191 1.9 oster }
1192 1.1 oster /* free the buffers. No return code here. */
1193 1.1 oster if (k_cfg->layoutSpecificSize) {
1194 1.9 oster RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1195 1.1 oster }
1196 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1197 1.9 oster
1198 1.298.2.5 jdolecek no_config:
1199 1.298.2.5 jdolecek /*
1200 1.298.2.5 jdolecek * If configuration failed, set sc_flags so that we
1201 1.298.2.5 jdolecek * will detach the device when we close it.
1202 1.298.2.5 jdolecek */
1203 1.298.2.5 jdolecek if (retcode != 0)
1204 1.298.2.5 jdolecek rs->sc_flags |= RAIDF_SHUTDOWN;
1205 1.9 oster return (retcode);
1206 1.9 oster
1207 1.9 oster /* shutdown the system */
1208 1.1 oster case RAIDFRAME_SHUTDOWN:
1209 1.9 oster
1210 1.266 dyoung part = DISKPART(dev);
1211 1.266 dyoung pmask = (1 << part);
1212 1.266 dyoung
1213 1.9 oster if ((error = raidlock(rs)) != 0)
1214 1.9 oster return (error);
1215 1.1 oster
1216 1.298.2.5 jdolecek if (DK_BUSY(dksc, pmask) ||
1217 1.298.2.5 jdolecek raidPtr->recon_in_progress != 0 ||
1218 1.298.2.5 jdolecek raidPtr->parity_rewrite_in_progress != 0 ||
1219 1.298.2.5 jdolecek raidPtr->copyback_in_progress != 0)
1220 1.266 dyoung retcode = EBUSY;
1221 1.266 dyoung else {
1222 1.298.2.5 jdolecek /* detach and free on close */
1223 1.266 dyoung rs->sc_flags |= RAIDF_SHUTDOWN;
1224 1.266 dyoung retcode = 0;
1225 1.9 oster }
1226 1.11 oster
1227 1.266 dyoung raidunlock(rs);
1228 1.1 oster
1229 1.9 oster return (retcode);
1230 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1231 1.48 oster clabel_ptr = (RF_ComponentLabel_t **) data;
1232 1.11 oster /* need to read the component label for the disk indicated
1233 1.48 oster by row,column in clabel */
1234 1.11 oster
1235 1.269 jld /*
1236 1.269 jld * Perhaps there should be an option to skip the in-core
1237 1.269 jld * copy and hit the disk, as with disklabel(8).
1238 1.269 jld */
1239 1.269 jld RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
1240 1.11 oster
1241 1.277 christos retcode = copyin(*clabel_ptr, clabel, sizeof(*clabel));
1242 1.11 oster
1243 1.11 oster if (retcode) {
1244 1.277 christos RF_Free(clabel, sizeof(*clabel));
1245 1.277 christos return retcode;
1246 1.11 oster }
1247 1.11 oster
1248 1.166 oster clabel->row = 0; /* Don't allow looking at anything else.*/
1249 1.166 oster
1250 1.48 oster column = clabel->column;
1251 1.26 oster
1252 1.166 oster if ((column < 0) || (column >= raidPtr->numCol +
1253 1.277 christos raidPtr->numSpare)) {
1254 1.277 christos RF_Free(clabel, sizeof(*clabel));
1255 1.277 christos return EINVAL;
1256 1.11 oster }
1257 1.11 oster
1258 1.269 jld RF_Free(clabel, sizeof(*clabel));
1259 1.269 jld
1260 1.269 jld clabel = raidget_component_label(raidPtr, column);
1261 1.11 oster
1262 1.277 christos return copyout(clabel, *clabel_ptr, sizeof(**clabel_ptr));
1263 1.11 oster
1264 1.269 jld #if 0
1265 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1266 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1267 1.11 oster
1268 1.11 oster /* XXX check the label for valid stuff... */
1269 1.11 oster /* Note that some things *should not* get modified --
1270 1.186 perry the user should be re-initing the labels instead of
1271 1.11 oster trying to patch things.
1272 1.11 oster */
1273 1.11 oster
1274 1.123 oster raidid = raidPtr->raidid;
1275 1.224 oster #ifdef DEBUG
1276 1.123 oster printf("raid%d: Got component label:\n", raidid);
1277 1.123 oster printf("raid%d: Version: %d\n", raidid, clabel->version);
1278 1.123 oster printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1279 1.123 oster printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1280 1.123 oster printf("raid%d: Column: %d\n", raidid, clabel->column);
1281 1.123 oster printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1282 1.123 oster printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1283 1.123 oster printf("raid%d: Status: %d\n", raidid, clabel->status);
1284 1.174 oster #endif
1285 1.166 oster clabel->row = 0;
1286 1.48 oster column = clabel->column;
1287 1.12 oster
1288 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1289 1.12 oster return(EINVAL);
1290 1.11 oster }
1291 1.12 oster
1292 1.12 oster /* XXX this isn't allowed to do anything for now :-) */
1293 1.48 oster
1294 1.48 oster /* XXX and before it is, we need to fill in the rest
1295 1.48 oster of the fields!?!?!?! */
1296 1.269 jld memcpy(raidget_component_label(raidPtr, column),
1297 1.269 jld clabel, sizeof(*clabel));
1298 1.269 jld raidflush_component_label(raidPtr, column);
1299 1.269 jld return (0);
1300 1.12 oster #endif
1301 1.11 oster
1302 1.186 perry case RAIDFRAME_INIT_LABELS:
1303 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1304 1.186 perry /*
1305 1.11 oster we only want the serial number from
1306 1.11 oster the above. We get all the rest of the information
1307 1.11 oster from the config that was used to create this RAID
1308 1.186 perry set.
1309 1.11 oster */
1310 1.12 oster
1311 1.48 oster raidPtr->serial_number = clabel->serial_number;
1312 1.186 perry
1313 1.166 oster for(column=0;column<raidPtr->numCol;column++) {
1314 1.166 oster diskPtr = &raidPtr->Disks[column];
1315 1.166 oster if (!RF_DEAD_DISK(diskPtr->status)) {
1316 1.269 jld ci_label = raidget_component_label(raidPtr,
1317 1.269 jld column);
1318 1.269 jld /* Zeroing this is important. */
1319 1.269 jld memset(ci_label, 0, sizeof(*ci_label));
1320 1.269 jld raid_init_component_label(raidPtr, ci_label);
1321 1.269 jld ci_label->serial_number =
1322 1.269 jld raidPtr->serial_number;
1323 1.269 jld ci_label->row = 0; /* we dont' pretend to support more */
1324 1.282 enami rf_component_label_set_partitionsize(ci_label,
1325 1.282 enami diskPtr->partitionSize);
1326 1.209 oster ci_label->column = column;
1327 1.269 jld raidflush_component_label(raidPtr, column);
1328 1.11 oster }
1329 1.269 jld /* XXXjld what about the spares? */
1330 1.11 oster }
1331 1.209 oster
1332 1.11 oster return (retcode);
1333 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1334 1.78 minoura d = rf_set_autoconfig(raidPtr, *(int *) data);
1335 1.186 perry printf("raid%d: New autoconfig value is: %d\n",
1336 1.123 oster raidPtr->raidid, d);
1337 1.78 minoura *(int *) data = d;
1338 1.48 oster return (retcode);
1339 1.48 oster
1340 1.48 oster case RAIDFRAME_SET_ROOT:
1341 1.78 minoura d = rf_set_rootpartition(raidPtr, *(int *) data);
1342 1.186 perry printf("raid%d: New rootpartition value is: %d\n",
1343 1.123 oster raidPtr->raidid, d);
1344 1.78 minoura *(int *) data = d;
1345 1.48 oster return (retcode);
1346 1.9 oster
1347 1.1 oster /* initialize all parity */
1348 1.1 oster case RAIDFRAME_REWRITEPARITY:
1349 1.1 oster
1350 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1351 1.17 oster /* Parity for RAID 0 is trivially correct */
1352 1.42 oster raidPtr->parity_good = RF_RAID_CLEAN;
1353 1.17 oster return(0);
1354 1.17 oster }
1355 1.186 perry
1356 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1357 1.37 oster /* Re-write is already in progress! */
1358 1.37 oster return(EINVAL);
1359 1.37 oster }
1360 1.27 oster
1361 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1362 1.37 oster rf_RewriteParityThread,
1363 1.42 oster raidPtr,"raid_parity");
1364 1.9 oster return (retcode);
1365 1.9 oster
1366 1.11 oster
1367 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1368 1.12 oster sparePtr = (RF_SingleComponent_t *) data;
1369 1.209 oster memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1370 1.209 oster retcode = rf_add_hot_spare(raidPtr, &component);
1371 1.11 oster return(retcode);
1372 1.11 oster
1373 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1374 1.73 oster return(retcode);
1375 1.73 oster
1376 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1377 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1378 1.186 perry memcpy( &component, componentPtr,
1379 1.73 oster sizeof(RF_SingleComponent_t));
1380 1.73 oster retcode = rf_delete_component(raidPtr, &component);
1381 1.73 oster return(retcode);
1382 1.73 oster
1383 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1384 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1385 1.186 perry memcpy( &component, componentPtr,
1386 1.73 oster sizeof(RF_SingleComponent_t));
1387 1.73 oster retcode = rf_incorporate_hot_spare(raidPtr, &component);
1388 1.11 oster return(retcode);
1389 1.11 oster
1390 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1391 1.24 oster
1392 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1393 1.24 oster /* Can't do this on a RAID 0!! */
1394 1.24 oster return(EINVAL);
1395 1.24 oster }
1396 1.24 oster
1397 1.42 oster if (raidPtr->recon_in_progress == 1) {
1398 1.37 oster /* a reconstruct is already in progress! */
1399 1.37 oster return(EINVAL);
1400 1.37 oster }
1401 1.37 oster
1402 1.12 oster componentPtr = (RF_SingleComponent_t *) data;
1403 1.186 perry memcpy( &component, componentPtr,
1404 1.12 oster sizeof(RF_SingleComponent_t));
1405 1.166 oster component.row = 0; /* we don't support any more */
1406 1.12 oster column = component.column;
1407 1.147 oster
1408 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1409 1.12 oster return(EINVAL);
1410 1.12 oster }
1411 1.37 oster
1412 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1413 1.166 oster if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1414 1.186 perry (raidPtr->numFailures > 0)) {
1415 1.149 oster /* XXX 0 above shouldn't be constant!!! */
1416 1.149 oster /* some component other than this has failed.
1417 1.149 oster Let's not make things worse than they already
1418 1.149 oster are... */
1419 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1420 1.149 oster raidPtr->raidid);
1421 1.166 oster printf("raid%d: Col: %d Too many failures.\n",
1422 1.166 oster raidPtr->raidid, column);
1423 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1424 1.149 oster return (EINVAL);
1425 1.149 oster }
1426 1.186 perry if (raidPtr->Disks[column].status ==
1427 1.149 oster rf_ds_reconstructing) {
1428 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1429 1.149 oster raidPtr->raidid);
1430 1.298.2.2 tls printf("raid%d: Col: %d Reconstruction already occurring!\n", raidPtr->raidid, column);
1431 1.186 perry
1432 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1433 1.149 oster return (EINVAL);
1434 1.149 oster }
1435 1.166 oster if (raidPtr->Disks[column].status == rf_ds_spared) {
1436 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1437 1.149 oster return (EINVAL);
1438 1.149 oster }
1439 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1440 1.149 oster
1441 1.37 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1442 1.38 oster if (rrcopy == NULL)
1443 1.38 oster return(ENOMEM);
1444 1.37 oster
1445 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1446 1.37 oster rrcopy->col = column;
1447 1.37 oster
1448 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1449 1.37 oster rf_ReconstructInPlaceThread,
1450 1.37 oster rrcopy,"raid_reconip");
1451 1.12 oster return(retcode);
1452 1.12 oster
1453 1.1 oster case RAIDFRAME_GET_INFO:
1454 1.42 oster if (!raidPtr->valid)
1455 1.41 oster return (ENODEV);
1456 1.41 oster ucfgp = (RF_DeviceConfig_t **) data;
1457 1.41 oster RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1458 1.41 oster (RF_DeviceConfig_t *));
1459 1.41 oster if (d_cfg == NULL)
1460 1.41 oster return (ENOMEM);
1461 1.166 oster d_cfg->rows = 1; /* there is only 1 row now */
1462 1.42 oster d_cfg->cols = raidPtr->numCol;
1463 1.166 oster d_cfg->ndevs = raidPtr->numCol;
1464 1.41 oster if (d_cfg->ndevs >= RF_MAX_DISKS) {
1465 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1466 1.41 oster return (ENOMEM);
1467 1.41 oster }
1468 1.42 oster d_cfg->nspares = raidPtr->numSpare;
1469 1.41 oster if (d_cfg->nspares >= RF_MAX_DISKS) {
1470 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1471 1.41 oster return (ENOMEM);
1472 1.41 oster }
1473 1.42 oster d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1474 1.41 oster d = 0;
1475 1.166 oster for (j = 0; j < d_cfg->cols; j++) {
1476 1.166 oster d_cfg->devs[d] = raidPtr->Disks[j];
1477 1.166 oster d++;
1478 1.41 oster }
1479 1.41 oster for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1480 1.166 oster d_cfg->spares[i] = raidPtr->Disks[j];
1481 1.298.2.5 jdolecek if (d_cfg->spares[i].status == rf_ds_rebuilding_spare) {
1482 1.298.2.5 jdolecek /* XXX: raidctl(8) expects to see this as a used spare */
1483 1.298.2.5 jdolecek d_cfg->spares[i].status = rf_ds_used_spare;
1484 1.298.2.5 jdolecek }
1485 1.41 oster }
1486 1.156 dsl retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1487 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1488 1.41 oster
1489 1.41 oster return (retcode);
1490 1.9 oster
1491 1.22 oster case RAIDFRAME_CHECK_PARITY:
1492 1.42 oster *(int *) data = raidPtr->parity_good;
1493 1.22 oster return (0);
1494 1.41 oster
1495 1.269 jld case RAIDFRAME_PARITYMAP_STATUS:
1496 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1497 1.273 jld return EINVAL;
1498 1.269 jld rf_paritymap_status(raidPtr->parity_map,
1499 1.269 jld (struct rf_pmstat *)data);
1500 1.269 jld return 0;
1501 1.269 jld
1502 1.269 jld case RAIDFRAME_PARITYMAP_SET_PARAMS:
1503 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1504 1.273 jld return EINVAL;
1505 1.269 jld if (raidPtr->parity_map == NULL)
1506 1.269 jld return ENOENT; /* ??? */
1507 1.269 jld if (0 != rf_paritymap_set_params(raidPtr->parity_map,
1508 1.269 jld (struct rf_pmparams *)data, 1))
1509 1.269 jld return EINVAL;
1510 1.269 jld return 0;
1511 1.269 jld
1512 1.269 jld case RAIDFRAME_PARITYMAP_GET_DISABLE:
1513 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1514 1.273 jld return EINVAL;
1515 1.269 jld *(int *) data = rf_paritymap_get_disable(raidPtr);
1516 1.269 jld return 0;
1517 1.269 jld
1518 1.269 jld case RAIDFRAME_PARITYMAP_SET_DISABLE:
1519 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1520 1.273 jld return EINVAL;
1521 1.269 jld rf_paritymap_set_disable(raidPtr, *(int *)data);
1522 1.269 jld /* XXX should errors be passed up? */
1523 1.269 jld return 0;
1524 1.269 jld
1525 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1526 1.108 thorpej memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1527 1.41 oster return (0);
1528 1.9 oster
1529 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1530 1.41 oster totals = (RF_AccTotals_t *) data;
1531 1.42 oster *totals = raidPtr->acc_totals;
1532 1.41 oster return (0);
1533 1.9 oster
1534 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1535 1.42 oster raidPtr->keep_acc_totals = *(int *)data;
1536 1.41 oster return (0);
1537 1.9 oster
1538 1.1 oster case RAIDFRAME_GET_SIZE:
1539 1.42 oster *(int *) data = raidPtr->totalSectors;
1540 1.9 oster return (0);
1541 1.1 oster
1542 1.1 oster /* fail a disk & optionally start reconstruction */
1543 1.1 oster case RAIDFRAME_FAIL_DISK:
1544 1.24 oster
1545 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1546 1.24 oster /* Can't do this on a RAID 0!! */
1547 1.24 oster return(EINVAL);
1548 1.24 oster }
1549 1.24 oster
1550 1.1 oster rr = (struct rf_recon_req *) data;
1551 1.166 oster rr->row = 0;
1552 1.166 oster if (rr->col < 0 || rr->col >= raidPtr->numCol)
1553 1.9 oster return (EINVAL);
1554 1.149 oster
1555 1.149 oster
1556 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1557 1.185 oster if (raidPtr->status == rf_rs_reconstructing) {
1558 1.185 oster /* you can't fail a disk while we're reconstructing! */
1559 1.185 oster /* XXX wrong for RAID6 */
1560 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1561 1.185 oster return (EINVAL);
1562 1.185 oster }
1563 1.186 perry if ((raidPtr->Disks[rr->col].status ==
1564 1.186 perry rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1565 1.149 oster /* some other component has failed. Let's not make
1566 1.149 oster things worse. XXX wrong for RAID6 */
1567 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1568 1.149 oster return (EINVAL);
1569 1.149 oster }
1570 1.166 oster if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1571 1.149 oster /* Can't fail a spared disk! */
1572 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1573 1.149 oster return (EINVAL);
1574 1.149 oster }
1575 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1576 1.1 oster
1577 1.9 oster /* make a copy of the recon request so that we don't rely on
1578 1.9 oster * the user's buffer */
1579 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1580 1.38 oster if (rrcopy == NULL)
1581 1.38 oster return(ENOMEM);
1582 1.118 wiz memcpy(rrcopy, rr, sizeof(*rr));
1583 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1584 1.1 oster
1585 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1586 1.37 oster rf_ReconThread,
1587 1.37 oster rrcopy,"raid_recon");
1588 1.9 oster return (0);
1589 1.9 oster
1590 1.9 oster /* invoke a copyback operation after recon on whatever disk
1591 1.9 oster * needs it, if any */
1592 1.9 oster case RAIDFRAME_COPYBACK:
1593 1.24 oster
1594 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1595 1.24 oster /* This makes no sense on a RAID 0!! */
1596 1.24 oster return(EINVAL);
1597 1.24 oster }
1598 1.24 oster
1599 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1600 1.37 oster /* Copyback is already in progress! */
1601 1.37 oster return(EINVAL);
1602 1.37 oster }
1603 1.27 oster
1604 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1605 1.37 oster rf_CopybackThread,
1606 1.42 oster raidPtr,"raid_copyback");
1607 1.37 oster return (retcode);
1608 1.9 oster
1609 1.1 oster /* return the percentage completion of reconstruction */
1610 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1611 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1612 1.71 oster /* This makes no sense on a RAID 0, so tell the
1613 1.71 oster user it's done. */
1614 1.71 oster *(int *) data = 100;
1615 1.71 oster return(0);
1616 1.24 oster }
1617 1.166 oster if (raidPtr->status != rf_rs_reconstructing)
1618 1.1 oster *(int *) data = 100;
1619 1.171 oster else {
1620 1.171 oster if (raidPtr->reconControl->numRUsTotal > 0) {
1621 1.171 oster *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1622 1.171 oster } else {
1623 1.171 oster *(int *) data = 0;
1624 1.171 oster }
1625 1.171 oster }
1626 1.9 oster return (0);
1627 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1628 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1629 1.166 oster if (raidPtr->status != rf_rs_reconstructing) {
1630 1.83 oster progressInfo.remaining = 0;
1631 1.83 oster progressInfo.completed = 100;
1632 1.83 oster progressInfo.total = 100;
1633 1.83 oster } else {
1634 1.186 perry progressInfo.total =
1635 1.166 oster raidPtr->reconControl->numRUsTotal;
1636 1.186 perry progressInfo.completed =
1637 1.166 oster raidPtr->reconControl->numRUsComplete;
1638 1.83 oster progressInfo.remaining = progressInfo.total -
1639 1.83 oster progressInfo.completed;
1640 1.83 oster }
1641 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1642 1.83 oster sizeof(RF_ProgressInfo_t));
1643 1.83 oster return (retcode);
1644 1.9 oster
1645 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1646 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1647 1.80 oster /* This makes no sense on a RAID 0, so tell the
1648 1.80 oster user it's done. */
1649 1.80 oster *(int *) data = 100;
1650 1.80 oster return(0);
1651 1.37 oster }
1652 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1653 1.186 perry *(int *) data = 100 *
1654 1.186 perry raidPtr->parity_rewrite_stripes_done /
1655 1.83 oster raidPtr->Layout.numStripe;
1656 1.37 oster } else {
1657 1.37 oster *(int *) data = 100;
1658 1.37 oster }
1659 1.37 oster return (0);
1660 1.37 oster
1661 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1662 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1663 1.83 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1664 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1665 1.186 perry progressInfo.completed =
1666 1.83 oster raidPtr->parity_rewrite_stripes_done;
1667 1.83 oster progressInfo.remaining = progressInfo.total -
1668 1.83 oster progressInfo.completed;
1669 1.83 oster } else {
1670 1.83 oster progressInfo.remaining = 0;
1671 1.83 oster progressInfo.completed = 100;
1672 1.83 oster progressInfo.total = 100;
1673 1.83 oster }
1674 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1675 1.83 oster sizeof(RF_ProgressInfo_t));
1676 1.83 oster return (retcode);
1677 1.83 oster
1678 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1679 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1680 1.37 oster /* This makes no sense on a RAID 0 */
1681 1.83 oster *(int *) data = 100;
1682 1.83 oster return(0);
1683 1.37 oster }
1684 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1685 1.42 oster *(int *) data = 100 * raidPtr->copyback_stripes_done /
1686 1.42 oster raidPtr->Layout.numStripe;
1687 1.37 oster } else {
1688 1.37 oster *(int *) data = 100;
1689 1.37 oster }
1690 1.37 oster return (0);
1691 1.37 oster
1692 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1693 1.93 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1694 1.83 oster if (raidPtr->copyback_in_progress == 1) {
1695 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1696 1.186 perry progressInfo.completed =
1697 1.93 oster raidPtr->copyback_stripes_done;
1698 1.83 oster progressInfo.remaining = progressInfo.total -
1699 1.83 oster progressInfo.completed;
1700 1.83 oster } else {
1701 1.83 oster progressInfo.remaining = 0;
1702 1.83 oster progressInfo.completed = 100;
1703 1.83 oster progressInfo.total = 100;
1704 1.83 oster }
1705 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1706 1.83 oster sizeof(RF_ProgressInfo_t));
1707 1.83 oster return (retcode);
1708 1.37 oster
1709 1.298.2.5 jdolecek case RAIDFRAME_SET_LAST_UNIT:
1710 1.298.2.5 jdolecek for (column = 0; column < raidPtr->numCol; column++)
1711 1.298.2.5 jdolecek if (raidPtr->Disks[column].status != rf_ds_optimal)
1712 1.298.2.5 jdolecek return EBUSY;
1713 1.298.2.5 jdolecek
1714 1.298.2.5 jdolecek for (column = 0; column < raidPtr->numCol; column++) {
1715 1.298.2.5 jdolecek clabel = raidget_component_label(raidPtr, column);
1716 1.298.2.5 jdolecek clabel->last_unit = *(int *)data;
1717 1.298.2.5 jdolecek raidflush_component_label(raidPtr, column);
1718 1.298.2.5 jdolecek }
1719 1.298.2.5 jdolecek rs->sc_cflags |= RAIDF_UNIT_CHANGED;
1720 1.298.2.5 jdolecek return 0;
1721 1.298.2.5 jdolecek
1722 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1723 1.9 oster * need a spare table. this ioctl does not return until a
1724 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1725 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1726 1.9 oster * -- I should either compute the spare table in the kernel,
1727 1.9 oster * or have a different -- XXX XXX -- interface (a different
1728 1.42 oster * character device) for delivering the table -- XXX */
1729 1.250 oster #if 0
1730 1.1 oster case RAIDFRAME_SPARET_WAIT:
1731 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1732 1.9 oster while (!rf_sparet_wait_queue)
1733 1.287 mrg rf_wait_cond2(rf_sparet_wait_cv, rf_sparet_wait_mutex);
1734 1.1 oster waitreq = rf_sparet_wait_queue;
1735 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1736 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1737 1.9 oster
1738 1.42 oster /* structure assignment */
1739 1.186 perry *((RF_SparetWait_t *) data) = *waitreq;
1740 1.9 oster
1741 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1742 1.9 oster return (0);
1743 1.9 oster
1744 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1745 1.9 oster * code in it that will cause the dameon to exit */
1746 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1747 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1748 1.1 oster waitreq->fcol = -1;
1749 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1750 1.1 oster waitreq->next = rf_sparet_wait_queue;
1751 1.1 oster rf_sparet_wait_queue = waitreq;
1752 1.287 mrg rf_broadcast_conf2(rf_sparet_wait_cv);
1753 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1754 1.9 oster return (0);
1755 1.1 oster
1756 1.9 oster /* used by the spare table daemon to deliver a spare table
1757 1.9 oster * into the kernel */
1758 1.1 oster case RAIDFRAME_SEND_SPARET:
1759 1.9 oster
1760 1.1 oster /* install the spare table */
1761 1.42 oster retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1762 1.9 oster
1763 1.9 oster /* respond to the requestor. the return status of the spare
1764 1.9 oster * table installation is passed in the "fcol" field */
1765 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1766 1.1 oster waitreq->fcol = retcode;
1767 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1768 1.1 oster waitreq->next = rf_sparet_resp_queue;
1769 1.1 oster rf_sparet_resp_queue = waitreq;
1770 1.287 mrg rf_broadcast_cond2(rf_sparet_resp_cv);
1771 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1772 1.9 oster
1773 1.9 oster return (retcode);
1774 1.1 oster #endif
1775 1.1 oster
1776 1.9 oster default:
1777 1.36 oster break; /* fall through to the os-specific code below */
1778 1.1 oster
1779 1.1 oster }
1780 1.9 oster
1781 1.42 oster if (!raidPtr->valid)
1782 1.9 oster return (EINVAL);
1783 1.9 oster
1784 1.1 oster /*
1785 1.1 oster * Add support for "regular" device ioctls here.
1786 1.1 oster */
1787 1.263 haad
1788 1.1 oster switch (cmd) {
1789 1.298.2.5 jdolecek case DIOCGCACHE:
1790 1.298.2.5 jdolecek retcode = rf_get_component_caches(raidPtr, (int *)data);
1791 1.102 fvdl break;
1792 1.1 oster
1793 1.298.2.5 jdolecek case DIOCCACHESYNC:
1794 1.298.2.5 jdolecek retcode = rf_sync_component_caches(raidPtr);
1795 1.1 oster break;
1796 1.1 oster
1797 1.298.2.5 jdolecek default:
1798 1.298.2.5 jdolecek retcode = dk_ioctl(dksc, dev, cmd, data, flag, l);
1799 1.1 oster break;
1800 1.102 fvdl }
1801 1.1 oster
1802 1.9 oster return (retcode);
1803 1.1 oster
1804 1.1 oster }
1805 1.1 oster
1806 1.1 oster
1807 1.9 oster /* raidinit -- complete the rest of the initialization for the
1808 1.1 oster RAIDframe device. */
1809 1.1 oster
1810 1.1 oster
1811 1.59 oster static void
1812 1.298.2.3 tls raidinit(struct raid_softc *rs)
1813 1.1 oster {
1814 1.262 cegger cfdata_t cf;
1815 1.298.2.5 jdolecek unsigned int unit;
1816 1.298.2.5 jdolecek struct dk_softc *dksc = &rs->sc_dksc;
1817 1.298.2.3 tls RF_Raid_t *raidPtr = &rs->sc_r;
1818 1.298.2.5 jdolecek device_t dev;
1819 1.1 oster
1820 1.59 oster unit = raidPtr->raidid;
1821 1.1 oster
1822 1.179 itojun /* XXX doesn't check bounds. */
1823 1.298.2.5 jdolecek snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%u", unit);
1824 1.1 oster
1825 1.217 oster /* attach the pseudo device */
1826 1.217 oster cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1827 1.217 oster cf->cf_name = raid_cd.cd_name;
1828 1.217 oster cf->cf_atname = raid_cd.cd_name;
1829 1.217 oster cf->cf_unit = unit;
1830 1.217 oster cf->cf_fstate = FSTATE_STAR;
1831 1.217 oster
1832 1.298.2.5 jdolecek dev = config_attach_pseudo(cf);
1833 1.298.2.5 jdolecek if (dev == NULL) {
1834 1.217 oster printf("raid%d: config_attach_pseudo failed\n",
1835 1.270 christos raidPtr->raidid);
1836 1.265 pooka free(cf, M_RAIDFRAME);
1837 1.265 pooka return;
1838 1.217 oster }
1839 1.217 oster
1840 1.298.2.5 jdolecek /* provide a backpointer to the real softc */
1841 1.298.2.5 jdolecek raidsoftc(dev) = rs;
1842 1.298.2.5 jdolecek
1843 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1844 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1845 1.9 oster * with disklabels. */
1846 1.298.2.5 jdolecek dk_init(dksc, dev, DKTYPE_RAID);
1847 1.298.2.5 jdolecek disk_init(&dksc->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1848 1.1 oster
1849 1.1 oster /* XXX There may be a weird interaction here between this, and
1850 1.9 oster * protectedSectors, as used in RAIDframe. */
1851 1.11 oster
1852 1.9 oster rs->sc_size = raidPtr->totalSectors;
1853 1.234 oster
1854 1.298.2.5 jdolecek /* Attach dk and disk subsystems */
1855 1.298.2.5 jdolecek dk_attach(dksc);
1856 1.298.2.5 jdolecek disk_attach(&dksc->sc_dkdev);
1857 1.298.2.3 tls rf_set_geometry(rs, raidPtr);
1858 1.234 oster
1859 1.298.2.5 jdolecek bufq_alloc(&dksc->sc_bufq, "fcfs", BUFQ_SORT_RAWBLOCK);
1860 1.298.2.5 jdolecek
1861 1.298.2.5 jdolecek /* mark unit as usuable */
1862 1.298.2.5 jdolecek rs->sc_flags |= RAIDF_INITED;
1863 1.298.2.5 jdolecek
1864 1.298.2.5 jdolecek dkwedge_discover(&dksc->sc_dkdev);
1865 1.1 oster }
1866 1.298.2.5 jdolecek
1867 1.150 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1868 1.1 oster /* wake up the daemon & tell it to get us a spare table
1869 1.1 oster * XXX
1870 1.9 oster * the entries in the queues should be tagged with the raidPtr
1871 1.186 perry * so that in the extremely rare case that two recons happen at once,
1872 1.11 oster * we know for which device were requesting a spare table
1873 1.1 oster * XXX
1874 1.186 perry *
1875 1.39 oster * XXX This code is not currently used. GO
1876 1.1 oster */
1877 1.186 perry int
1878 1.169 oster rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1879 1.9 oster {
1880 1.9 oster int retcode;
1881 1.9 oster
1882 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1883 1.9 oster req->next = rf_sparet_wait_queue;
1884 1.9 oster rf_sparet_wait_queue = req;
1885 1.289 mrg rf_broadcast_cond2(rf_sparet_wait_cv);
1886 1.9 oster
1887 1.9 oster /* mpsleep unlocks the mutex */
1888 1.9 oster while (!rf_sparet_resp_queue) {
1889 1.289 mrg rf_wait_cond2(rf_sparet_resp_cv, rf_sparet_wait_mutex);
1890 1.9 oster }
1891 1.9 oster req = rf_sparet_resp_queue;
1892 1.9 oster rf_sparet_resp_queue = req->next;
1893 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1894 1.9 oster
1895 1.9 oster retcode = req->fcol;
1896 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
1897 1.9 oster * alloc'd */
1898 1.9 oster return (retcode);
1899 1.1 oster }
1900 1.150 oster #endif
1901 1.39 oster
1902 1.186 perry /* a wrapper around rf_DoAccess that extracts appropriate info from the
1903 1.11 oster * bp & passes it down.
1904 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1905 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1906 1.1 oster * certain conditions (to make some standard utilities work)
1907 1.186 perry *
1908 1.34 oster * Formerly known as: rf_DoAccessKernel
1909 1.1 oster */
1910 1.34 oster void
1911 1.169 oster raidstart(RF_Raid_t *raidPtr)
1912 1.1 oster {
1913 1.1 oster struct raid_softc *rs;
1914 1.298.2.5 jdolecek struct dk_softc *dksc;
1915 1.1 oster
1916 1.298.2.3 tls rs = raidPtr->softc;
1917 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
1918 1.56 oster /* quick check to see if anything has died recently */
1919 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1920 1.56 oster if (raidPtr->numNewFailures > 0) {
1921 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1922 1.186 perry rf_update_component_labels(raidPtr,
1923 1.91 oster RF_NORMAL_COMPONENT_UPDATE);
1924 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1925 1.56 oster raidPtr->numNewFailures--;
1926 1.56 oster }
1927 1.298.2.5 jdolecek rf_unlock_mutex2(raidPtr->mutex);
1928 1.56 oster
1929 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_INITED) == 0) {
1930 1.298.2.5 jdolecek printf("raid%d: raidstart not ready\n", raidPtr->raidid);
1931 1.298.2.5 jdolecek return;
1932 1.298.2.5 jdolecek }
1933 1.186 perry
1934 1.298.2.5 jdolecek dk_start(dksc, NULL);
1935 1.298.2.5 jdolecek }
1936 1.186 perry
1937 1.298.2.5 jdolecek static int
1938 1.298.2.5 jdolecek raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp)
1939 1.298.2.5 jdolecek {
1940 1.298.2.5 jdolecek RF_SectorCount_t num_blocks, pb, sum;
1941 1.298.2.5 jdolecek RF_RaidAddr_t raid_addr;
1942 1.298.2.5 jdolecek daddr_t blocknum;
1943 1.298.2.5 jdolecek int do_async;
1944 1.298.2.5 jdolecek int rc;
1945 1.1 oster
1946 1.298.2.5 jdolecek rf_lock_mutex2(raidPtr->mutex);
1947 1.298.2.5 jdolecek if (raidPtr->openings == 0) {
1948 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1949 1.298.2.5 jdolecek return EAGAIN;
1950 1.298.2.5 jdolecek }
1951 1.298.2.5 jdolecek rf_unlock_mutex2(raidPtr->mutex);
1952 1.1 oster
1953 1.298.2.5 jdolecek blocknum = bp->b_rawblkno;
1954 1.186 perry
1955 1.298.2.5 jdolecek db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1956 1.298.2.5 jdolecek (int) blocknum));
1957 1.99 oster
1958 1.298.2.5 jdolecek db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1959 1.298.2.5 jdolecek db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1960 1.20 oster
1961 1.298.2.5 jdolecek /* *THIS* is where we adjust what block we're going to...
1962 1.298.2.5 jdolecek * but DO NOT TOUCH bp->b_blkno!!! */
1963 1.298.2.5 jdolecek raid_addr = blocknum;
1964 1.298.2.5 jdolecek
1965 1.298.2.5 jdolecek num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1966 1.298.2.5 jdolecek pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1967 1.298.2.5 jdolecek sum = raid_addr + num_blocks + pb;
1968 1.298.2.5 jdolecek if (1 || rf_debugKernelAccess) {
1969 1.298.2.5 jdolecek db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1970 1.298.2.5 jdolecek (int) raid_addr, (int) sum, (int) num_blocks,
1971 1.298.2.5 jdolecek (int) pb, (int) bp->b_resid));
1972 1.298.2.5 jdolecek }
1973 1.298.2.5 jdolecek if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1974 1.298.2.5 jdolecek || (sum < num_blocks) || (sum < pb)) {
1975 1.298.2.5 jdolecek rc = ENOSPC;
1976 1.298.2.5 jdolecek goto done;
1977 1.298.2.5 jdolecek }
1978 1.298.2.5 jdolecek /*
1979 1.298.2.5 jdolecek * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1980 1.298.2.5 jdolecek */
1981 1.20 oster
1982 1.298.2.5 jdolecek if (bp->b_bcount & raidPtr->sectorMask) {
1983 1.298.2.5 jdolecek rc = ENOSPC;
1984 1.298.2.5 jdolecek goto done;
1985 1.20 oster }
1986 1.298.2.5 jdolecek db1_printf(("Calling DoAccess..\n"));
1987 1.298.2.5 jdolecek
1988 1.298.2.5 jdolecek
1989 1.298.2.5 jdolecek rf_lock_mutex2(raidPtr->mutex);
1990 1.298.2.5 jdolecek raidPtr->openings--;
1991 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1992 1.20 oster
1993 1.298.2.5 jdolecek /*
1994 1.298.2.5 jdolecek * Everything is async.
1995 1.298.2.5 jdolecek */
1996 1.298.2.5 jdolecek do_async = 1;
1997 1.298.2.5 jdolecek
1998 1.298.2.5 jdolecek /* don't ever condition on bp->b_flags & B_WRITE.
1999 1.298.2.5 jdolecek * always condition on B_READ instead */
2000 1.20 oster
2001 1.298.2.5 jdolecek rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2002 1.298.2.5 jdolecek RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2003 1.298.2.5 jdolecek do_async, raid_addr, num_blocks,
2004 1.298.2.5 jdolecek bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2005 1.7 explorer
2006 1.298.2.5 jdolecek done:
2007 1.298.2.5 jdolecek return rc;
2008 1.298.2.5 jdolecek }
2009 1.7 explorer
2010 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2011 1.1 oster
2012 1.186 perry int
2013 1.169 oster rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2014 1.1 oster {
2015 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2016 1.1 oster struct buf *bp;
2017 1.9 oster
2018 1.1 oster req->queue = queue;
2019 1.1 oster bp = req->bp;
2020 1.1 oster
2021 1.1 oster switch (req->type) {
2022 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2023 1.1 oster /* XXX need to do something extra here.. */
2024 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
2025 1.9 oster * and I'd like folks to report it... GO */
2026 1.1 oster printf(("WAKEUP CALLED\n"));
2027 1.1 oster queue->numOutstanding++;
2028 1.1 oster
2029 1.197 oster bp->b_flags = 0;
2030 1.207 simonb bp->b_private = req;
2031 1.1 oster
2032 1.194 oster KernelWakeupFunc(bp);
2033 1.1 oster break;
2034 1.9 oster
2035 1.1 oster case RF_IO_TYPE_READ:
2036 1.1 oster case RF_IO_TYPE_WRITE:
2037 1.175 oster #if RF_ACC_TRACE > 0
2038 1.1 oster if (req->tracerec) {
2039 1.1 oster RF_ETIMER_START(req->tracerec->timer);
2040 1.1 oster }
2041 1.175 oster #endif
2042 1.194 oster InitBP(bp, queue->rf_cinfo->ci_vp,
2043 1.197 oster op, queue->rf_cinfo->ci_dev,
2044 1.9 oster req->sectorOffset, req->numSector,
2045 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
2046 1.9 oster queue->raidPtr->logBytesPerSector, req->b_proc);
2047 1.1 oster
2048 1.1 oster if (rf_debugKernelAccess) {
2049 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
2050 1.9 oster (long) bp->b_blkno));
2051 1.1 oster }
2052 1.1 oster queue->numOutstanding++;
2053 1.1 oster queue->last_deq_sector = req->sectorOffset;
2054 1.9 oster /* acc wouldn't have been let in if there were any pending
2055 1.9 oster * reqs at any other priority */
2056 1.1 oster queue->curPriority = req->priority;
2057 1.1 oster
2058 1.166 oster db1_printf(("Going for %c to unit %d col %d\n",
2059 1.186 perry req->type, queue->raidPtr->raidid,
2060 1.166 oster queue->col));
2061 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
2062 1.9 oster (int) req->sectorOffset, (int) req->numSector,
2063 1.9 oster (int) (req->numSector <<
2064 1.9 oster queue->raidPtr->logBytesPerSector),
2065 1.9 oster (int) queue->raidPtr->logBytesPerSector));
2066 1.256 oster
2067 1.256 oster /*
2068 1.256 oster * XXX: drop lock here since this can block at
2069 1.256 oster * least with backing SCSI devices. Retake it
2070 1.256 oster * to minimize fuss with calling interfaces.
2071 1.256 oster */
2072 1.256 oster
2073 1.256 oster RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2074 1.247 oster bdev_strategy(bp);
2075 1.256 oster RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2076 1.1 oster break;
2077 1.9 oster
2078 1.1 oster default:
2079 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
2080 1.1 oster }
2081 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
2082 1.134 oster
2083 1.9 oster return (0);
2084 1.1 oster }
2085 1.9 oster /* this is the callback function associated with a I/O invoked from
2086 1.1 oster kernel code.
2087 1.1 oster */
2088 1.186 perry static void
2089 1.194 oster KernelWakeupFunc(struct buf *bp)
2090 1.9 oster {
2091 1.9 oster RF_DiskQueueData_t *req = NULL;
2092 1.9 oster RF_DiskQueue_t *queue;
2093 1.9 oster
2094 1.9 oster db1_printf(("recovering the request queue:\n"));
2095 1.285 mrg
2096 1.207 simonb req = bp->b_private;
2097 1.1 oster
2098 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
2099 1.1 oster
2100 1.286 mrg rf_lock_mutex2(queue->raidPtr->iodone_lock);
2101 1.285 mrg
2102 1.175 oster #if RF_ACC_TRACE > 0
2103 1.9 oster if (req->tracerec) {
2104 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
2105 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
2106 1.288 mrg rf_lock_mutex2(rf_tracing_mutex);
2107 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2108 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2109 1.9 oster req->tracerec->num_phys_ios++;
2110 1.288 mrg rf_unlock_mutex2(rf_tracing_mutex);
2111 1.9 oster }
2112 1.175 oster #endif
2113 1.1 oster
2114 1.230 ad /* XXX Ok, let's get aggressive... If b_error is set, let's go
2115 1.9 oster * ballistic, and mark the component as hosed... */
2116 1.36 oster
2117 1.230 ad if (bp->b_error != 0) {
2118 1.9 oster /* Mark the disk as dead */
2119 1.9 oster /* but only mark it once... */
2120 1.186 perry /* and only if it wouldn't leave this RAID set
2121 1.183 oster completely broken */
2122 1.193 oster if (((queue->raidPtr->Disks[queue->col].status ==
2123 1.193 oster rf_ds_optimal) ||
2124 1.193 oster (queue->raidPtr->Disks[queue->col].status ==
2125 1.193 oster rf_ds_used_spare)) &&
2126 1.193 oster (queue->raidPtr->numFailures <
2127 1.204 simonb queue->raidPtr->Layout.map->faultsTolerated)) {
2128 1.298.2.5 jdolecek printf("raid%d: IO Error (%d). Marking %s as failed.\n",
2129 1.136 oster queue->raidPtr->raidid,
2130 1.298.2.5 jdolecek bp->b_error,
2131 1.166 oster queue->raidPtr->Disks[queue->col].devname);
2132 1.166 oster queue->raidPtr->Disks[queue->col].status =
2133 1.9 oster rf_ds_failed;
2134 1.166 oster queue->raidPtr->status = rf_rs_degraded;
2135 1.9 oster queue->raidPtr->numFailures++;
2136 1.56 oster queue->raidPtr->numNewFailures++;
2137 1.9 oster } else { /* Disk is already dead... */
2138 1.9 oster /* printf("Disk already marked as dead!\n"); */
2139 1.9 oster }
2140 1.4 oster
2141 1.9 oster }
2142 1.4 oster
2143 1.143 oster /* Fill in the error value */
2144 1.230 ad req->error = bp->b_error;
2145 1.143 oster
2146 1.143 oster /* Drop this one on the "finished" queue... */
2147 1.143 oster TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2148 1.143 oster
2149 1.143 oster /* Let the raidio thread know there is work to be done. */
2150 1.286 mrg rf_signal_cond2(queue->raidPtr->iodone_cv);
2151 1.143 oster
2152 1.286 mrg rf_unlock_mutex2(queue->raidPtr->iodone_lock);
2153 1.1 oster }
2154 1.1 oster
2155 1.1 oster
2156 1.1 oster /*
2157 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
2158 1.1 oster */
2159 1.186 perry static void
2160 1.169 oster InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2161 1.225 christos RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2162 1.169 oster void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2163 1.169 oster struct proc *b_proc)
2164 1.9 oster {
2165 1.9 oster /* bp->b_flags = B_PHYS | rw_flag; */
2166 1.242 ad bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
2167 1.242 ad bp->b_oflags = 0;
2168 1.242 ad bp->b_cflags = 0;
2169 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
2170 1.298.2.5 jdolecek bp->b_bufsize = bp->b_bcount;
2171 1.298.2.5 jdolecek bp->b_error = 0;
2172 1.298.2.5 jdolecek bp->b_dev = dev;
2173 1.298.2.5 jdolecek bp->b_data = bf;
2174 1.298.2.5 jdolecek bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
2175 1.298.2.5 jdolecek bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2176 1.298.2.5 jdolecek if (bp->b_bcount == 0) {
2177 1.298.2.5 jdolecek panic("bp->b_bcount is zero in InitBP!!");
2178 1.1 oster }
2179 1.298.2.5 jdolecek bp->b_proc = b_proc;
2180 1.298.2.5 jdolecek bp->b_iodone = cbFunc;
2181 1.298.2.5 jdolecek bp->b_private = cbArg;
2182 1.1 oster }
2183 1.1 oster
2184 1.1 oster /*
2185 1.1 oster * Wait interruptibly for an exclusive lock.
2186 1.1 oster *
2187 1.1 oster * XXX
2188 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
2189 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
2190 1.1 oster */
2191 1.1 oster static int
2192 1.169 oster raidlock(struct raid_softc *rs)
2193 1.1 oster {
2194 1.9 oster int error;
2195 1.1 oster
2196 1.298.2.5 jdolecek error = 0;
2197 1.298.2.5 jdolecek mutex_enter(&rs->sc_mutex);
2198 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2199 1.1 oster rs->sc_flags |= RAIDF_WANTED;
2200 1.298.2.5 jdolecek error = cv_wait_sig(&rs->sc_cv, &rs->sc_mutex);
2201 1.298.2.5 jdolecek if (error != 0)
2202 1.298.2.5 jdolecek goto done;
2203 1.1 oster }
2204 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
2205 1.298.2.5 jdolecek done:
2206 1.298.2.5 jdolecek mutex_exit(&rs->sc_mutex);
2207 1.298.2.5 jdolecek return (error);
2208 1.1 oster }
2209 1.1 oster /*
2210 1.1 oster * Unlock and wake up any waiters.
2211 1.1 oster */
2212 1.1 oster static void
2213 1.169 oster raidunlock(struct raid_softc *rs)
2214 1.1 oster {
2215 1.1 oster
2216 1.298.2.5 jdolecek mutex_enter(&rs->sc_mutex);
2217 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
2218 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2219 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
2220 1.298.2.5 jdolecek cv_broadcast(&rs->sc_cv);
2221 1.1 oster }
2222 1.298.2.5 jdolecek mutex_exit(&rs->sc_mutex);
2223 1.11 oster }
2224 1.186 perry
2225 1.11 oster
2226 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2227 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2228 1.269 jld #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2229 1.11 oster
2230 1.276 mrg static daddr_t
2231 1.276 mrg rf_component_info_offset(void)
2232 1.276 mrg {
2233 1.276 mrg
2234 1.276 mrg return RF_COMPONENT_INFO_OFFSET;
2235 1.276 mrg }
2236 1.276 mrg
2237 1.276 mrg static daddr_t
2238 1.276 mrg rf_component_info_size(unsigned secsize)
2239 1.276 mrg {
2240 1.276 mrg daddr_t info_size;
2241 1.276 mrg
2242 1.276 mrg KASSERT(secsize);
2243 1.276 mrg if (secsize > RF_COMPONENT_INFO_SIZE)
2244 1.276 mrg info_size = secsize;
2245 1.276 mrg else
2246 1.276 mrg info_size = RF_COMPONENT_INFO_SIZE;
2247 1.276 mrg
2248 1.276 mrg return info_size;
2249 1.276 mrg }
2250 1.276 mrg
2251 1.276 mrg static daddr_t
2252 1.276 mrg rf_parity_map_offset(RF_Raid_t *raidPtr)
2253 1.276 mrg {
2254 1.276 mrg daddr_t map_offset;
2255 1.276 mrg
2256 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2257 1.276 mrg if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
2258 1.276 mrg map_offset = raidPtr->bytesPerSector;
2259 1.276 mrg else
2260 1.276 mrg map_offset = RF_COMPONENT_INFO_SIZE;
2261 1.276 mrg map_offset += rf_component_info_offset();
2262 1.276 mrg
2263 1.276 mrg return map_offset;
2264 1.276 mrg }
2265 1.276 mrg
2266 1.276 mrg static daddr_t
2267 1.276 mrg rf_parity_map_size(RF_Raid_t *raidPtr)
2268 1.276 mrg {
2269 1.276 mrg daddr_t map_size;
2270 1.276 mrg
2271 1.276 mrg if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
2272 1.276 mrg map_size = raidPtr->bytesPerSector;
2273 1.276 mrg else
2274 1.276 mrg map_size = RF_PARITY_MAP_SIZE;
2275 1.276 mrg
2276 1.276 mrg return map_size;
2277 1.276 mrg }
2278 1.276 mrg
2279 1.186 perry int
2280 1.269 jld raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2281 1.12 oster {
2282 1.269 jld RF_ComponentLabel_t *clabel;
2283 1.269 jld
2284 1.269 jld clabel = raidget_component_label(raidPtr, col);
2285 1.269 jld clabel->clean = RF_RAID_CLEAN;
2286 1.269 jld raidflush_component_label(raidPtr, col);
2287 1.12 oster return(0);
2288 1.12 oster }
2289 1.12 oster
2290 1.12 oster
2291 1.186 perry int
2292 1.269 jld raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2293 1.11 oster {
2294 1.269 jld RF_ComponentLabel_t *clabel;
2295 1.269 jld
2296 1.269 jld clabel = raidget_component_label(raidPtr, col);
2297 1.269 jld clabel->clean = RF_RAID_DIRTY;
2298 1.269 jld raidflush_component_label(raidPtr, col);
2299 1.11 oster return(0);
2300 1.11 oster }
2301 1.11 oster
2302 1.11 oster int
2303 1.269 jld raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2304 1.269 jld {
2305 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2306 1.276 mrg return raidread_component_label(raidPtr->bytesPerSector,
2307 1.276 mrg raidPtr->Disks[col].dev,
2308 1.269 jld raidPtr->raid_cinfo[col].ci_vp,
2309 1.269 jld &raidPtr->raid_cinfo[col].ci_label);
2310 1.269 jld }
2311 1.269 jld
2312 1.269 jld RF_ComponentLabel_t *
2313 1.269 jld raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2314 1.269 jld {
2315 1.269 jld return &raidPtr->raid_cinfo[col].ci_label;
2316 1.269 jld }
2317 1.269 jld
2318 1.269 jld int
2319 1.269 jld raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2320 1.269 jld {
2321 1.269 jld RF_ComponentLabel_t *label;
2322 1.269 jld
2323 1.269 jld label = &raidPtr->raid_cinfo[col].ci_label;
2324 1.269 jld label->mod_counter = raidPtr->mod_counter;
2325 1.269 jld #ifndef RF_NO_PARITY_MAP
2326 1.269 jld label->parity_map_modcount = label->mod_counter;
2327 1.269 jld #endif
2328 1.276 mrg return raidwrite_component_label(raidPtr->bytesPerSector,
2329 1.276 mrg raidPtr->Disks[col].dev,
2330 1.269 jld raidPtr->raid_cinfo[col].ci_vp, label);
2331 1.269 jld }
2332 1.269 jld
2333 1.269 jld
2334 1.269 jld static int
2335 1.276 mrg raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2336 1.269 jld RF_ComponentLabel_t *clabel)
2337 1.269 jld {
2338 1.269 jld return raidread_component_area(dev, b_vp, clabel,
2339 1.269 jld sizeof(RF_ComponentLabel_t),
2340 1.276 mrg rf_component_info_offset(),
2341 1.276 mrg rf_component_info_size(secsize));
2342 1.269 jld }
2343 1.269 jld
2344 1.269 jld /* ARGSUSED */
2345 1.269 jld static int
2346 1.269 jld raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2347 1.269 jld size_t msize, daddr_t offset, daddr_t dsize)
2348 1.11 oster {
2349 1.11 oster struct buf *bp;
2350 1.11 oster int error;
2351 1.186 perry
2352 1.11 oster /* XXX should probably ensure that we don't try to do this if
2353 1.186 perry someone has changed rf_protected_sectors. */
2354 1.11 oster
2355 1.98 oster if (b_vp == NULL) {
2356 1.98 oster /* For whatever reason, this component is not valid.
2357 1.98 oster Don't try to read a component label from it. */
2358 1.98 oster return(EINVAL);
2359 1.98 oster }
2360 1.98 oster
2361 1.11 oster /* get a block of the appropriate size... */
2362 1.269 jld bp = geteblk((int)dsize);
2363 1.11 oster bp->b_dev = dev;
2364 1.11 oster
2365 1.11 oster /* get our ducks in a row for the read */
2366 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2367 1.269 jld bp->b_bcount = dsize;
2368 1.100 chs bp->b_flags |= B_READ;
2369 1.269 jld bp->b_resid = dsize;
2370 1.11 oster
2371 1.298.2.5 jdolecek bdev_strategy(bp);
2372 1.186 perry error = biowait(bp);
2373 1.11 oster
2374 1.11 oster if (!error) {
2375 1.269 jld memcpy(data, bp->b_data, msize);
2376 1.204 simonb }
2377 1.11 oster
2378 1.233 ad brelse(bp, 0);
2379 1.11 oster return(error);
2380 1.11 oster }
2381 1.269 jld
2382 1.269 jld
2383 1.269 jld static int
2384 1.276 mrg raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2385 1.276 mrg RF_ComponentLabel_t *clabel)
2386 1.269 jld {
2387 1.269 jld return raidwrite_component_area(dev, b_vp, clabel,
2388 1.269 jld sizeof(RF_ComponentLabel_t),
2389 1.276 mrg rf_component_info_offset(),
2390 1.276 mrg rf_component_info_size(secsize), 0);
2391 1.269 jld }
2392 1.269 jld
2393 1.11 oster /* ARGSUSED */
2394 1.269 jld static int
2395 1.269 jld raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2396 1.269 jld size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2397 1.11 oster {
2398 1.11 oster struct buf *bp;
2399 1.11 oster int error;
2400 1.11 oster
2401 1.11 oster /* get a block of the appropriate size... */
2402 1.269 jld bp = geteblk((int)dsize);
2403 1.11 oster bp->b_dev = dev;
2404 1.11 oster
2405 1.11 oster /* get our ducks in a row for the write */
2406 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2407 1.269 jld bp->b_bcount = dsize;
2408 1.269 jld bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2409 1.269 jld bp->b_resid = dsize;
2410 1.11 oster
2411 1.269 jld memset(bp->b_data, 0, dsize);
2412 1.269 jld memcpy(bp->b_data, data, msize);
2413 1.11 oster
2414 1.298.2.5 jdolecek bdev_strategy(bp);
2415 1.269 jld if (asyncp)
2416 1.269 jld return 0;
2417 1.186 perry error = biowait(bp);
2418 1.233 ad brelse(bp, 0);
2419 1.11 oster if (error) {
2420 1.48 oster #if 1
2421 1.11 oster printf("Failed to write RAID component info!\n");
2422 1.48 oster #endif
2423 1.11 oster }
2424 1.11 oster
2425 1.11 oster return(error);
2426 1.1 oster }
2427 1.12 oster
2428 1.186 perry void
2429 1.269 jld rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2430 1.269 jld {
2431 1.269 jld int c;
2432 1.269 jld
2433 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2434 1.269 jld /* Skip dead disks. */
2435 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2436 1.269 jld continue;
2437 1.269 jld /* XXXjld: what if an error occurs here? */
2438 1.269 jld raidwrite_component_area(raidPtr->Disks[c].dev,
2439 1.269 jld raidPtr->raid_cinfo[c].ci_vp, map,
2440 1.269 jld RF_PARITYMAP_NBYTE,
2441 1.276 mrg rf_parity_map_offset(raidPtr),
2442 1.276 mrg rf_parity_map_size(raidPtr), 0);
2443 1.269 jld }
2444 1.269 jld }
2445 1.269 jld
2446 1.269 jld void
2447 1.269 jld rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2448 1.269 jld {
2449 1.269 jld struct rf_paritymap_ondisk tmp;
2450 1.272 oster int c,first;
2451 1.269 jld
2452 1.272 oster first=1;
2453 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2454 1.269 jld /* Skip dead disks. */
2455 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2456 1.269 jld continue;
2457 1.269 jld raidread_component_area(raidPtr->Disks[c].dev,
2458 1.269 jld raidPtr->raid_cinfo[c].ci_vp, &tmp,
2459 1.269 jld RF_PARITYMAP_NBYTE,
2460 1.276 mrg rf_parity_map_offset(raidPtr),
2461 1.276 mrg rf_parity_map_size(raidPtr));
2462 1.272 oster if (first) {
2463 1.269 jld memcpy(map, &tmp, sizeof(*map));
2464 1.272 oster first = 0;
2465 1.269 jld } else {
2466 1.269 jld rf_paritymap_merge(map, &tmp);
2467 1.269 jld }
2468 1.269 jld }
2469 1.269 jld }
2470 1.269 jld
2471 1.269 jld void
2472 1.169 oster rf_markalldirty(RF_Raid_t *raidPtr)
2473 1.12 oster {
2474 1.269 jld RF_ComponentLabel_t *clabel;
2475 1.146 oster int sparecol;
2476 1.166 oster int c;
2477 1.166 oster int j;
2478 1.166 oster int scol = -1;
2479 1.12 oster
2480 1.12 oster raidPtr->mod_counter++;
2481 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2482 1.166 oster /* we don't want to touch (at all) a disk that has
2483 1.166 oster failed */
2484 1.166 oster if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2485 1.269 jld clabel = raidget_component_label(raidPtr, c);
2486 1.269 jld if (clabel->status == rf_ds_spared) {
2487 1.186 perry /* XXX do something special...
2488 1.186 perry but whatever you do, don't
2489 1.166 oster try to access it!! */
2490 1.166 oster } else {
2491 1.269 jld raidmarkdirty(raidPtr, c);
2492 1.12 oster }
2493 1.166 oster }
2494 1.186 perry }
2495 1.146 oster
2496 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2497 1.12 oster sparecol = raidPtr->numCol + c;
2498 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2499 1.186 perry /*
2500 1.186 perry
2501 1.186 perry we claim this disk is "optimal" if it's
2502 1.186 perry rf_ds_used_spare, as that means it should be
2503 1.186 perry directly substitutable for the disk it replaced.
2504 1.12 oster We note that too...
2505 1.12 oster
2506 1.12 oster */
2507 1.12 oster
2508 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2509 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2510 1.166 oster scol = j;
2511 1.166 oster break;
2512 1.12 oster }
2513 1.12 oster }
2514 1.186 perry
2515 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2516 1.12 oster /* make sure status is noted */
2517 1.146 oster
2518 1.269 jld raid_init_component_label(raidPtr, clabel);
2519 1.146 oster
2520 1.269 jld clabel->row = 0;
2521 1.269 jld clabel->column = scol;
2522 1.146 oster /* Note: we *don't* change status from rf_ds_used_spare
2523 1.146 oster to rf_ds_optimal */
2524 1.146 oster /* clabel.status = rf_ds_optimal; */
2525 1.186 perry
2526 1.269 jld raidmarkdirty(raidPtr, sparecol);
2527 1.12 oster }
2528 1.12 oster }
2529 1.12 oster }
2530 1.12 oster
2531 1.13 oster
2532 1.13 oster void
2533 1.169 oster rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2534 1.13 oster {
2535 1.269 jld RF_ComponentLabel_t *clabel;
2536 1.13 oster int sparecol;
2537 1.166 oster int c;
2538 1.166 oster int j;
2539 1.166 oster int scol;
2540 1.298.2.5 jdolecek struct raid_softc *rs = raidPtr->softc;
2541 1.13 oster
2542 1.13 oster scol = -1;
2543 1.13 oster
2544 1.186 perry /* XXX should do extra checks to make sure things really are clean,
2545 1.13 oster rather than blindly setting the clean bit... */
2546 1.13 oster
2547 1.13 oster raidPtr->mod_counter++;
2548 1.13 oster
2549 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2550 1.166 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
2551 1.269 jld clabel = raidget_component_label(raidPtr, c);
2552 1.201 oster /* make sure status is noted */
2553 1.269 jld clabel->status = rf_ds_optimal;
2554 1.201 oster
2555 1.214 oster /* note what unit we are configured as */
2556 1.298.2.5 jdolecek if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2557 1.298.2.5 jdolecek clabel->last_unit = raidPtr->raidid;
2558 1.214 oster
2559 1.269 jld raidflush_component_label(raidPtr, c);
2560 1.166 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2561 1.166 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2562 1.269 jld raidmarkclean(raidPtr, c);
2563 1.91 oster }
2564 1.166 oster }
2565 1.186 perry }
2566 1.166 oster /* else we don't touch it.. */
2567 1.186 perry }
2568 1.63 oster
2569 1.63 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2570 1.63 oster sparecol = raidPtr->numCol + c;
2571 1.110 oster /* Need to ensure that the reconstruct actually completed! */
2572 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2573 1.186 perry /*
2574 1.186 perry
2575 1.186 perry we claim this disk is "optimal" if it's
2576 1.186 perry rf_ds_used_spare, as that means it should be
2577 1.186 perry directly substitutable for the disk it replaced.
2578 1.63 oster We note that too...
2579 1.63 oster
2580 1.63 oster */
2581 1.63 oster
2582 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2583 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2584 1.166 oster scol = j;
2585 1.166 oster break;
2586 1.63 oster }
2587 1.63 oster }
2588 1.186 perry
2589 1.63 oster /* XXX shouldn't *really* need this... */
2590 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2591 1.63 oster /* make sure status is noted */
2592 1.63 oster
2593 1.269 jld raid_init_component_label(raidPtr, clabel);
2594 1.269 jld
2595 1.269 jld clabel->column = scol;
2596 1.269 jld clabel->status = rf_ds_optimal;
2597 1.298.2.5 jdolecek if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2598 1.298.2.5 jdolecek clabel->last_unit = raidPtr->raidid;
2599 1.63 oster
2600 1.269 jld raidflush_component_label(raidPtr, sparecol);
2601 1.91 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2602 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2603 1.269 jld raidmarkclean(raidPtr, sparecol);
2604 1.13 oster }
2605 1.13 oster }
2606 1.13 oster }
2607 1.13 oster }
2608 1.68 oster }
2609 1.68 oster
2610 1.68 oster void
2611 1.169 oster rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2612 1.69 oster {
2613 1.69 oster
2614 1.69 oster if (vp != NULL) {
2615 1.69 oster if (auto_configured == 1) {
2616 1.96 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2617 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2618 1.69 oster vput(vp);
2619 1.186 perry
2620 1.186 perry } else {
2621 1.244 ad (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2622 1.69 oster }
2623 1.186 perry }
2624 1.69 oster }
2625 1.69 oster
2626 1.69 oster
2627 1.69 oster void
2628 1.169 oster rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2629 1.68 oster {
2630 1.186 perry int r,c;
2631 1.69 oster struct vnode *vp;
2632 1.69 oster int acd;
2633 1.68 oster
2634 1.68 oster
2635 1.68 oster /* We take this opportunity to close the vnodes like we should.. */
2636 1.68 oster
2637 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2638 1.166 oster vp = raidPtr->raid_cinfo[c].ci_vp;
2639 1.166 oster acd = raidPtr->Disks[c].auto_configured;
2640 1.166 oster rf_close_component(raidPtr, vp, acd);
2641 1.166 oster raidPtr->raid_cinfo[c].ci_vp = NULL;
2642 1.166 oster raidPtr->Disks[c].auto_configured = 0;
2643 1.68 oster }
2644 1.166 oster
2645 1.68 oster for (r = 0; r < raidPtr->numSpare; r++) {
2646 1.166 oster vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2647 1.166 oster acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2648 1.69 oster rf_close_component(raidPtr, vp, acd);
2649 1.166 oster raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2650 1.166 oster raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2651 1.68 oster }
2652 1.37 oster }
2653 1.63 oster
2654 1.37 oster
2655 1.186 perry void
2656 1.169 oster rf_ReconThread(struct rf_recon_req *req)
2657 1.37 oster {
2658 1.37 oster int s;
2659 1.37 oster RF_Raid_t *raidPtr;
2660 1.37 oster
2661 1.37 oster s = splbio();
2662 1.37 oster raidPtr = (RF_Raid_t *) req->raidPtr;
2663 1.37 oster raidPtr->recon_in_progress = 1;
2664 1.37 oster
2665 1.166 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2666 1.37 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2667 1.37 oster
2668 1.37 oster RF_Free(req, sizeof(*req));
2669 1.37 oster
2670 1.37 oster raidPtr->recon_in_progress = 0;
2671 1.37 oster splx(s);
2672 1.37 oster
2673 1.37 oster /* That's all... */
2674 1.204 simonb kthread_exit(0); /* does not return */
2675 1.37 oster }
2676 1.37 oster
2677 1.37 oster void
2678 1.169 oster rf_RewriteParityThread(RF_Raid_t *raidPtr)
2679 1.37 oster {
2680 1.37 oster int retcode;
2681 1.37 oster int s;
2682 1.37 oster
2683 1.184 oster raidPtr->parity_rewrite_stripes_done = 0;
2684 1.37 oster raidPtr->parity_rewrite_in_progress = 1;
2685 1.37 oster s = splbio();
2686 1.37 oster retcode = rf_RewriteParity(raidPtr);
2687 1.37 oster splx(s);
2688 1.37 oster if (retcode) {
2689 1.279 christos printf("raid%d: Error re-writing parity (%d)!\n",
2690 1.279 christos raidPtr->raidid, retcode);
2691 1.37 oster } else {
2692 1.37 oster /* set the clean bit! If we shutdown correctly,
2693 1.37 oster the clean bit on each component label will get
2694 1.37 oster set */
2695 1.37 oster raidPtr->parity_good = RF_RAID_CLEAN;
2696 1.37 oster }
2697 1.37 oster raidPtr->parity_rewrite_in_progress = 0;
2698 1.85 oster
2699 1.85 oster /* Anyone waiting for us to stop? If so, inform them... */
2700 1.85 oster if (raidPtr->waitShutdown) {
2701 1.85 oster wakeup(&raidPtr->parity_rewrite_in_progress);
2702 1.85 oster }
2703 1.37 oster
2704 1.37 oster /* That's all... */
2705 1.204 simonb kthread_exit(0); /* does not return */
2706 1.37 oster }
2707 1.37 oster
2708 1.37 oster
2709 1.37 oster void
2710 1.169 oster rf_CopybackThread(RF_Raid_t *raidPtr)
2711 1.37 oster {
2712 1.37 oster int s;
2713 1.37 oster
2714 1.37 oster raidPtr->copyback_in_progress = 1;
2715 1.37 oster s = splbio();
2716 1.37 oster rf_CopybackReconstructedData(raidPtr);
2717 1.37 oster splx(s);
2718 1.37 oster raidPtr->copyback_in_progress = 0;
2719 1.37 oster
2720 1.37 oster /* That's all... */
2721 1.204 simonb kthread_exit(0); /* does not return */
2722 1.37 oster }
2723 1.37 oster
2724 1.37 oster
2725 1.37 oster void
2726 1.169 oster rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2727 1.37 oster {
2728 1.37 oster int s;
2729 1.37 oster RF_Raid_t *raidPtr;
2730 1.186 perry
2731 1.37 oster s = splbio();
2732 1.37 oster raidPtr = req->raidPtr;
2733 1.37 oster raidPtr->recon_in_progress = 1;
2734 1.166 oster rf_ReconstructInPlace(raidPtr, req->col);
2735 1.37 oster RF_Free(req, sizeof(*req));
2736 1.37 oster raidPtr->recon_in_progress = 0;
2737 1.37 oster splx(s);
2738 1.37 oster
2739 1.37 oster /* That's all... */
2740 1.204 simonb kthread_exit(0); /* does not return */
2741 1.48 oster }
2742 1.48 oster
2743 1.213 christos static RF_AutoConfig_t *
2744 1.213 christos rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2745 1.276 mrg const char *cname, RF_SectorCount_t size, uint64_t numsecs,
2746 1.276 mrg unsigned secsize)
2747 1.213 christos {
2748 1.213 christos int good_one = 0;
2749 1.213 christos RF_ComponentLabel_t *clabel;
2750 1.213 christos RF_AutoConfig_t *ac;
2751 1.213 christos
2752 1.213 christos clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2753 1.213 christos if (clabel == NULL) {
2754 1.213 christos oomem:
2755 1.213 christos while(ac_list) {
2756 1.213 christos ac = ac_list;
2757 1.213 christos if (ac->clabel)
2758 1.213 christos free(ac->clabel, M_RAIDFRAME);
2759 1.213 christos ac_list = ac_list->next;
2760 1.213 christos free(ac, M_RAIDFRAME);
2761 1.213 christos }
2762 1.213 christos printf("RAID auto config: out of memory!\n");
2763 1.213 christos return NULL; /* XXX probably should panic? */
2764 1.213 christos }
2765 1.213 christos
2766 1.276 mrg if (!raidread_component_label(secsize, dev, vp, clabel)) {
2767 1.276 mrg /* Got the label. Does it look reasonable? */
2768 1.284 mrg if (rf_reasonable_label(clabel, numsecs) &&
2769 1.282 enami (rf_component_label_partitionsize(clabel) <= size)) {
2770 1.224 oster #ifdef DEBUG
2771 1.276 mrg printf("Component on: %s: %llu\n",
2772 1.213 christos cname, (unsigned long long)size);
2773 1.276 mrg rf_print_component_label(clabel);
2774 1.213 christos #endif
2775 1.276 mrg /* if it's reasonable, add it, else ignore it. */
2776 1.276 mrg ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2777 1.213 christos M_NOWAIT);
2778 1.276 mrg if (ac == NULL) {
2779 1.276 mrg free(clabel, M_RAIDFRAME);
2780 1.276 mrg goto oomem;
2781 1.276 mrg }
2782 1.276 mrg strlcpy(ac->devname, cname, sizeof(ac->devname));
2783 1.276 mrg ac->dev = dev;
2784 1.276 mrg ac->vp = vp;
2785 1.276 mrg ac->clabel = clabel;
2786 1.276 mrg ac->next = ac_list;
2787 1.276 mrg ac_list = ac;
2788 1.276 mrg good_one = 1;
2789 1.276 mrg }
2790 1.213 christos }
2791 1.213 christos if (!good_one) {
2792 1.213 christos /* cleanup */
2793 1.213 christos free(clabel, M_RAIDFRAME);
2794 1.213 christos vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2795 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2796 1.213 christos vput(vp);
2797 1.213 christos }
2798 1.213 christos return ac_list;
2799 1.213 christos }
2800 1.213 christos
2801 1.48 oster RF_AutoConfig_t *
2802 1.259 cegger rf_find_raid_components(void)
2803 1.48 oster {
2804 1.48 oster struct vnode *vp;
2805 1.48 oster struct disklabel label;
2806 1.261 dyoung device_t dv;
2807 1.268 dyoung deviter_t di;
2808 1.48 oster dev_t dev;
2809 1.296 buhrow int bmajor, bminor, wedge, rf_part_found;
2810 1.48 oster int error;
2811 1.48 oster int i;
2812 1.48 oster RF_AutoConfig_t *ac_list;
2813 1.276 mrg uint64_t numsecs;
2814 1.276 mrg unsigned secsize;
2815 1.298.2.5 jdolecek int dowedges;
2816 1.48 oster
2817 1.48 oster /* initialize the AutoConfig list */
2818 1.48 oster ac_list = NULL;
2819 1.48 oster
2820 1.298.2.5 jdolecek /*
2821 1.298.2.5 jdolecek * we begin by trolling through *all* the devices on the system *twice*
2822 1.298.2.5 jdolecek * first we scan for wedges, second for other devices. This avoids
2823 1.298.2.5 jdolecek * using a raw partition instead of a wedge that covers the whole disk
2824 1.298.2.5 jdolecek */
2825 1.48 oster
2826 1.298.2.5 jdolecek for (dowedges=1; dowedges>=0; --dowedges) {
2827 1.298.2.5 jdolecek for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
2828 1.298.2.5 jdolecek dv = deviter_next(&di)) {
2829 1.48 oster
2830 1.298.2.5 jdolecek /* we are only interested in disks... */
2831 1.298.2.5 jdolecek if (device_class(dv) != DV_DISK)
2832 1.298.2.5 jdolecek continue;
2833 1.129 oster
2834 1.298.2.5 jdolecek /* we don't care about floppies... */
2835 1.298.2.5 jdolecek if (device_is_a(dv, "fd")) {
2836 1.298.2.5 jdolecek continue;
2837 1.298.2.5 jdolecek }
2838 1.129 oster
2839 1.298.2.5 jdolecek /* we don't care about CD's... */
2840 1.298.2.5 jdolecek if (device_is_a(dv, "cd")) {
2841 1.298.2.5 jdolecek continue;
2842 1.298.2.5 jdolecek }
2843 1.248 oster
2844 1.298.2.5 jdolecek /* we don't care about md's... */
2845 1.298.2.5 jdolecek if (device_is_a(dv, "md")) {
2846 1.298.2.5 jdolecek continue;
2847 1.298.2.5 jdolecek }
2848 1.206 thorpej
2849 1.298.2.5 jdolecek /* hdfd is the Atari/Hades floppy driver */
2850 1.298.2.5 jdolecek if (device_is_a(dv, "hdfd")) {
2851 1.298.2.5 jdolecek continue;
2852 1.298.2.5 jdolecek }
2853 1.186 perry
2854 1.298.2.5 jdolecek /* fdisa is the Atari/Milan floppy driver */
2855 1.298.2.5 jdolecek if (device_is_a(dv, "fdisa")) {
2856 1.298.2.5 jdolecek continue;
2857 1.298.2.5 jdolecek }
2858 1.48 oster
2859 1.298.2.5 jdolecek /* are we in the wedges pass ? */
2860 1.298.2.5 jdolecek wedge = device_is_a(dv, "dk");
2861 1.298.2.5 jdolecek if (wedge != dowedges) {
2862 1.298.2.5 jdolecek continue;
2863 1.298.2.5 jdolecek }
2864 1.296 buhrow
2865 1.298.2.5 jdolecek /* need to find the device_name_to_block_device_major stuff */
2866 1.298.2.5 jdolecek bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
2867 1.48 oster
2868 1.298.2.5 jdolecek rf_part_found = 0; /*No raid partition as yet*/
2869 1.48 oster
2870 1.298.2.5 jdolecek /* get a vnode for the raw partition of this disk */
2871 1.298.2.5 jdolecek bminor = minor(device_unit(dv));
2872 1.298.2.5 jdolecek dev = wedge ? makedev(bmajor, bminor) :
2873 1.298.2.5 jdolecek MAKEDISKDEV(bmajor, bminor, RAW_PART);
2874 1.298.2.5 jdolecek if (bdevvp(dev, &vp))
2875 1.298.2.5 jdolecek panic("RAID can't alloc vnode");
2876 1.48 oster
2877 1.298.2.5 jdolecek error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
2878 1.48 oster
2879 1.213 christos if (error) {
2880 1.298.2.5 jdolecek /* "Who cares." Continue looking
2881 1.298.2.5 jdolecek for something that exists*/
2882 1.241 oster vput(vp);
2883 1.213 christos continue;
2884 1.213 christos }
2885 1.213 christos
2886 1.298.2.5 jdolecek error = getdisksize(vp, &numsecs, &secsize);
2887 1.298.2.5 jdolecek if (error) {
2888 1.298.2.5 jdolecek /*
2889 1.298.2.5 jdolecek * Pseudo devices like vnd and cgd can be
2890 1.298.2.5 jdolecek * opened but may still need some configuration.
2891 1.298.2.5 jdolecek * Ignore these quietly.
2892 1.298.2.5 jdolecek */
2893 1.298.2.5 jdolecek if (error != ENXIO)
2894 1.298.2.5 jdolecek printf("RAIDframe: can't get disk size"
2895 1.298.2.5 jdolecek " for dev %s (%d)\n",
2896 1.298.2.5 jdolecek device_xname(dv), error);
2897 1.241 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2898 1.241 oster VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2899 1.241 oster vput(vp);
2900 1.228 christos continue;
2901 1.241 oster }
2902 1.298.2.5 jdolecek if (wedge) {
2903 1.298.2.5 jdolecek struct dkwedge_info dkw;
2904 1.298.2.5 jdolecek error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
2905 1.298.2.5 jdolecek NOCRED);
2906 1.298.2.5 jdolecek if (error) {
2907 1.298.2.5 jdolecek printf("RAIDframe: can't get wedge info for "
2908 1.298.2.5 jdolecek "dev %s (%d)\n", device_xname(dv), error);
2909 1.298.2.5 jdolecek vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2910 1.298.2.5 jdolecek VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2911 1.298.2.5 jdolecek vput(vp);
2912 1.298.2.5 jdolecek continue;
2913 1.298.2.5 jdolecek }
2914 1.48 oster
2915 1.298.2.5 jdolecek if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
2916 1.298.2.5 jdolecek vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2917 1.298.2.5 jdolecek VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2918 1.298.2.5 jdolecek vput(vp);
2919 1.298.2.5 jdolecek continue;
2920 1.298.2.5 jdolecek }
2921 1.298.2.5 jdolecek
2922 1.298.2.5 jdolecek ac_list = rf_get_component(ac_list, dev, vp,
2923 1.298.2.5 jdolecek device_xname(dv), dkw.dkw_size, numsecs, secsize);
2924 1.298.2.5 jdolecek rf_part_found = 1; /*There is a raid component on this disk*/
2925 1.298.2.5 jdolecek continue;
2926 1.298.2.5 jdolecek }
2927 1.48 oster
2928 1.298.2.5 jdolecek /* Ok, the disk exists. Go get the disklabel. */
2929 1.298.2.5 jdolecek error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
2930 1.298.2.5 jdolecek if (error) {
2931 1.298.2.5 jdolecek /*
2932 1.298.2.5 jdolecek * XXX can't happen - open() would
2933 1.298.2.5 jdolecek * have errored out (or faked up one)
2934 1.298.2.5 jdolecek */
2935 1.298.2.5 jdolecek if (error != ENOTTY)
2936 1.298.2.5 jdolecek printf("RAIDframe: can't get label for dev "
2937 1.298.2.5 jdolecek "%s (%d)\n", device_xname(dv), error);
2938 1.298.2.5 jdolecek }
2939 1.181 thorpej
2940 1.298.2.5 jdolecek /* don't need this any more. We'll allocate it again
2941 1.298.2.5 jdolecek a little later if we really do... */
2942 1.298.2.5 jdolecek vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2943 1.298.2.5 jdolecek VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2944 1.298.2.5 jdolecek vput(vp);
2945 1.213 christos
2946 1.298.2.5 jdolecek if (error)
2947 1.48 oster continue;
2948 1.48 oster
2949 1.298.2.5 jdolecek rf_part_found = 0; /*No raid partitions yet*/
2950 1.298.2.5 jdolecek for (i = 0; i < label.d_npartitions; i++) {
2951 1.298.2.5 jdolecek char cname[sizeof(ac_list->devname)];
2952 1.298.2.5 jdolecek
2953 1.298.2.5 jdolecek /* We only support partitions marked as RAID */
2954 1.298.2.5 jdolecek if (label.d_partitions[i].p_fstype != FS_RAID)
2955 1.298.2.5 jdolecek continue;
2956 1.298.2.5 jdolecek
2957 1.298.2.5 jdolecek dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
2958 1.298.2.5 jdolecek if (bdevvp(dev, &vp))
2959 1.298.2.5 jdolecek panic("RAID can't alloc vnode");
2960 1.298.2.5 jdolecek
2961 1.298.2.5 jdolecek error = VOP_OPEN(vp, FREAD, NOCRED);
2962 1.298.2.5 jdolecek if (error) {
2963 1.298.2.5 jdolecek /* Whatever... */
2964 1.298.2.5 jdolecek vput(vp);
2965 1.298.2.5 jdolecek continue;
2966 1.298.2.5 jdolecek }
2967 1.298.2.5 jdolecek snprintf(cname, sizeof(cname), "%s%c",
2968 1.298.2.5 jdolecek device_xname(dv), 'a' + i);
2969 1.298.2.5 jdolecek ac_list = rf_get_component(ac_list, dev, vp, cname,
2970 1.298.2.5 jdolecek label.d_partitions[i].p_size, numsecs, secsize);
2971 1.296 buhrow rf_part_found = 1; /*There is at least one raid partition on this disk*/
2972 1.298.2.5 jdolecek }
2973 1.296 buhrow
2974 1.298.2.5 jdolecek /*
2975 1.298.2.5 jdolecek *If there is no raid component on this disk, either in a
2976 1.298.2.5 jdolecek *disklabel or inside a wedge, check the raw partition as well,
2977 1.298.2.5 jdolecek *as it is possible to configure raid components on raw disk
2978 1.298.2.5 jdolecek *devices.
2979 1.298.2.5 jdolecek */
2980 1.296 buhrow
2981 1.298.2.5 jdolecek if (!rf_part_found) {
2982 1.298.2.5 jdolecek char cname[sizeof(ac_list->devname)];
2983 1.296 buhrow
2984 1.298.2.5 jdolecek dev = MAKEDISKDEV(bmajor, device_unit(dv), RAW_PART);
2985 1.298.2.5 jdolecek if (bdevvp(dev, &vp))
2986 1.298.2.5 jdolecek panic("RAID can't alloc vnode");
2987 1.298.2.5 jdolecek
2988 1.298.2.5 jdolecek error = VOP_OPEN(vp, FREAD, NOCRED);
2989 1.298.2.5 jdolecek if (error) {
2990 1.298.2.5 jdolecek /* Whatever... */
2991 1.298.2.5 jdolecek vput(vp);
2992 1.298.2.5 jdolecek continue;
2993 1.298.2.5 jdolecek }
2994 1.298.2.5 jdolecek snprintf(cname, sizeof(cname), "%s%c",
2995 1.298.2.5 jdolecek device_xname(dv), 'a' + RAW_PART);
2996 1.298.2.5 jdolecek ac_list = rf_get_component(ac_list, dev, vp, cname,
2997 1.298.2.5 jdolecek label.d_partitions[RAW_PART].p_size, numsecs, secsize);
2998 1.296 buhrow }
2999 1.48 oster }
3000 1.298.2.5 jdolecek deviter_release(&di);
3001 1.48 oster }
3002 1.213 christos return ac_list;
3003 1.48 oster }
3004 1.186 perry
3005 1.213 christos
3006 1.292 oster int
3007 1.284 mrg rf_reasonable_label(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3008 1.48 oster {
3009 1.186 perry
3010 1.48 oster if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3011 1.48 oster (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3012 1.48 oster ((clabel->clean == RF_RAID_CLEAN) ||
3013 1.48 oster (clabel->clean == RF_RAID_DIRTY)) &&
3014 1.186 perry clabel->row >=0 &&
3015 1.186 perry clabel->column >= 0 &&
3016 1.48 oster clabel->num_rows > 0 &&
3017 1.48 oster clabel->num_columns > 0 &&
3018 1.186 perry clabel->row < clabel->num_rows &&
3019 1.48 oster clabel->column < clabel->num_columns &&
3020 1.48 oster clabel->blockSize > 0 &&
3021 1.282 enami /*
3022 1.282 enami * numBlocksHi may contain garbage, but it is ok since
3023 1.282 enami * the type is unsigned. If it is really garbage,
3024 1.282 enami * rf_fix_old_label_size() will fix it.
3025 1.282 enami */
3026 1.282 enami rf_component_label_numblocks(clabel) > 0) {
3027 1.284 mrg /*
3028 1.284 mrg * label looks reasonable enough...
3029 1.284 mrg * let's make sure it has no old garbage.
3030 1.284 mrg */
3031 1.292 oster if (numsecs)
3032 1.292 oster rf_fix_old_label_size(clabel, numsecs);
3033 1.48 oster return(1);
3034 1.48 oster }
3035 1.48 oster return(0);
3036 1.48 oster }
3037 1.48 oster
3038 1.48 oster
3039 1.278 mrg /*
3040 1.278 mrg * For reasons yet unknown, some old component labels have garbage in
3041 1.278 mrg * the newer numBlocksHi region, and this causes lossage. Since those
3042 1.278 mrg * disks will also have numsecs set to less than 32 bits of sectors,
3043 1.298.2.2 tls * we can determine when this corruption has occurred, and fix it.
3044 1.284 mrg *
3045 1.284 mrg * The exact same problem, with the same unknown reason, happens to
3046 1.284 mrg * the partitionSizeHi member as well.
3047 1.278 mrg */
3048 1.278 mrg static void
3049 1.278 mrg rf_fix_old_label_size(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3050 1.278 mrg {
3051 1.278 mrg
3052 1.284 mrg if (numsecs < ((uint64_t)1 << 32)) {
3053 1.284 mrg if (clabel->numBlocksHi) {
3054 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3055 1.284 mrg "numBlocksHi set\n"
3056 1.284 mrg "WARNING: resetting numBlocksHi to zero.\n");
3057 1.284 mrg clabel->numBlocksHi = 0;
3058 1.284 mrg }
3059 1.284 mrg
3060 1.284 mrg if (clabel->partitionSizeHi) {
3061 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3062 1.284 mrg "partitionSizeHi set\n"
3063 1.284 mrg "WARNING: resetting partitionSizeHi to zero.\n");
3064 1.284 mrg clabel->partitionSizeHi = 0;
3065 1.284 mrg }
3066 1.278 mrg }
3067 1.278 mrg }
3068 1.278 mrg
3069 1.278 mrg
3070 1.224 oster #ifdef DEBUG
3071 1.48 oster void
3072 1.169 oster rf_print_component_label(RF_ComponentLabel_t *clabel)
3073 1.48 oster {
3074 1.282 enami uint64_t numBlocks;
3075 1.298.2.4 tls static const char *rp[] = {
3076 1.298.2.4 tls "No", "Force", "Soft", "*invalid*"
3077 1.298.2.4 tls };
3078 1.298.2.4 tls
3079 1.275 mrg
3080 1.282 enami numBlocks = rf_component_label_numblocks(clabel);
3081 1.275 mrg
3082 1.48 oster printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3083 1.186 perry clabel->row, clabel->column,
3084 1.48 oster clabel->num_rows, clabel->num_columns);
3085 1.48 oster printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3086 1.48 oster clabel->version, clabel->serial_number,
3087 1.48 oster clabel->mod_counter);
3088 1.48 oster printf(" Clean: %s Status: %d\n",
3089 1.271 dyoung clabel->clean ? "Yes" : "No", clabel->status);
3090 1.48 oster printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3091 1.48 oster clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3092 1.275 mrg printf(" RAID Level: %c blocksize: %d numBlocks: %"PRIu64"\n",
3093 1.275 mrg (char) clabel->parityConfig, clabel->blockSize, numBlocks);
3094 1.271 dyoung printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3095 1.298.2.4 tls printf(" Root partition: %s\n", rp[clabel->root_partition & 3]);
3096 1.271 dyoung printf(" Last configured as: raid%d\n", clabel->last_unit);
3097 1.51 oster #if 0
3098 1.51 oster printf(" Config order: %d\n", clabel->config_order);
3099 1.51 oster #endif
3100 1.186 perry
3101 1.48 oster }
3102 1.133 oster #endif
3103 1.48 oster
3104 1.48 oster RF_ConfigSet_t *
3105 1.169 oster rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3106 1.48 oster {
3107 1.48 oster RF_AutoConfig_t *ac;
3108 1.48 oster RF_ConfigSet_t *config_sets;
3109 1.48 oster RF_ConfigSet_t *cset;
3110 1.48 oster RF_AutoConfig_t *ac_next;
3111 1.48 oster
3112 1.48 oster
3113 1.48 oster config_sets = NULL;
3114 1.48 oster
3115 1.48 oster /* Go through the AutoConfig list, and figure out which components
3116 1.48 oster belong to what sets. */
3117 1.48 oster ac = ac_list;
3118 1.48 oster while(ac!=NULL) {
3119 1.48 oster /* we're going to putz with ac->next, so save it here
3120 1.48 oster for use at the end of the loop */
3121 1.48 oster ac_next = ac->next;
3122 1.48 oster
3123 1.48 oster if (config_sets == NULL) {
3124 1.48 oster /* will need at least this one... */
3125 1.48 oster config_sets = (RF_ConfigSet_t *)
3126 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3127 1.48 oster M_RAIDFRAME, M_NOWAIT);
3128 1.48 oster if (config_sets == NULL) {
3129 1.141 provos panic("rf_create_auto_sets: No memory!");
3130 1.48 oster }
3131 1.48 oster /* this one is easy :) */
3132 1.48 oster config_sets->ac = ac;
3133 1.48 oster config_sets->next = NULL;
3134 1.51 oster config_sets->rootable = 0;
3135 1.48 oster ac->next = NULL;
3136 1.48 oster } else {
3137 1.48 oster /* which set does this component fit into? */
3138 1.48 oster cset = config_sets;
3139 1.48 oster while(cset!=NULL) {
3140 1.49 oster if (rf_does_it_fit(cset, ac)) {
3141 1.86 oster /* looks like it matches... */
3142 1.86 oster ac->next = cset->ac;
3143 1.86 oster cset->ac = ac;
3144 1.48 oster break;
3145 1.48 oster }
3146 1.48 oster cset = cset->next;
3147 1.48 oster }
3148 1.48 oster if (cset==NULL) {
3149 1.48 oster /* didn't find a match above... new set..*/
3150 1.48 oster cset = (RF_ConfigSet_t *)
3151 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3152 1.48 oster M_RAIDFRAME, M_NOWAIT);
3153 1.48 oster if (cset == NULL) {
3154 1.141 provos panic("rf_create_auto_sets: No memory!");
3155 1.48 oster }
3156 1.48 oster cset->ac = ac;
3157 1.48 oster ac->next = NULL;
3158 1.48 oster cset->next = config_sets;
3159 1.51 oster cset->rootable = 0;
3160 1.48 oster config_sets = cset;
3161 1.48 oster }
3162 1.48 oster }
3163 1.48 oster ac = ac_next;
3164 1.48 oster }
3165 1.48 oster
3166 1.48 oster
3167 1.48 oster return(config_sets);
3168 1.48 oster }
3169 1.48 oster
3170 1.48 oster static int
3171 1.169 oster rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3172 1.48 oster {
3173 1.48 oster RF_ComponentLabel_t *clabel1, *clabel2;
3174 1.48 oster
3175 1.48 oster /* If this one matches the *first* one in the set, that's good
3176 1.48 oster enough, since the other members of the set would have been
3177 1.48 oster through here too... */
3178 1.60 oster /* note that we are not checking partitionSize here..
3179 1.60 oster
3180 1.60 oster Note that we are also not checking the mod_counters here.
3181 1.298.2.2 tls If everything else matches except the mod_counter, that's
3182 1.60 oster good enough for this test. We will deal with the mod_counters
3183 1.186 perry a little later in the autoconfiguration process.
3184 1.60 oster
3185 1.60 oster (clabel1->mod_counter == clabel2->mod_counter) &&
3186 1.81 oster
3187 1.81 oster The reason we don't check for this is that failed disks
3188 1.81 oster will have lower modification counts. If those disks are
3189 1.81 oster not added to the set they used to belong to, then they will
3190 1.81 oster form their own set, which may result in 2 different sets,
3191 1.81 oster for example, competing to be configured at raid0, and
3192 1.81 oster perhaps competing to be the root filesystem set. If the
3193 1.81 oster wrong ones get configured, or both attempt to become /,
3194 1.81 oster weird behaviour and or serious lossage will occur. Thus we
3195 1.81 oster need to bring them into the fold here, and kick them out at
3196 1.81 oster a later point.
3197 1.60 oster
3198 1.60 oster */
3199 1.48 oster
3200 1.48 oster clabel1 = cset->ac->clabel;
3201 1.48 oster clabel2 = ac->clabel;
3202 1.48 oster if ((clabel1->version == clabel2->version) &&
3203 1.48 oster (clabel1->serial_number == clabel2->serial_number) &&
3204 1.48 oster (clabel1->num_rows == clabel2->num_rows) &&
3205 1.48 oster (clabel1->num_columns == clabel2->num_columns) &&
3206 1.48 oster (clabel1->sectPerSU == clabel2->sectPerSU) &&
3207 1.48 oster (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3208 1.48 oster (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3209 1.48 oster (clabel1->parityConfig == clabel2->parityConfig) &&
3210 1.48 oster (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3211 1.48 oster (clabel1->blockSize == clabel2->blockSize) &&
3212 1.282 enami rf_component_label_numblocks(clabel1) ==
3213 1.282 enami rf_component_label_numblocks(clabel2) &&
3214 1.48 oster (clabel1->autoconfigure == clabel2->autoconfigure) &&
3215 1.48 oster (clabel1->root_partition == clabel2->root_partition) &&
3216 1.48 oster (clabel1->last_unit == clabel2->last_unit) &&
3217 1.48 oster (clabel1->config_order == clabel2->config_order)) {
3218 1.48 oster /* if it get's here, it almost *has* to be a match */
3219 1.48 oster } else {
3220 1.186 perry /* it's not consistent with somebody in the set..
3221 1.48 oster punt */
3222 1.48 oster return(0);
3223 1.48 oster }
3224 1.48 oster /* all was fine.. it must fit... */
3225 1.48 oster return(1);
3226 1.48 oster }
3227 1.48 oster
3228 1.48 oster int
3229 1.169 oster rf_have_enough_components(RF_ConfigSet_t *cset)
3230 1.48 oster {
3231 1.51 oster RF_AutoConfig_t *ac;
3232 1.51 oster RF_AutoConfig_t *auto_config;
3233 1.51 oster RF_ComponentLabel_t *clabel;
3234 1.166 oster int c;
3235 1.51 oster int num_cols;
3236 1.51 oster int num_missing;
3237 1.86 oster int mod_counter;
3238 1.87 oster int mod_counter_found;
3239 1.88 oster int even_pair_failed;
3240 1.88 oster char parity_type;
3241 1.186 perry
3242 1.51 oster
3243 1.48 oster /* check to see that we have enough 'live' components
3244 1.48 oster of this set. If so, we can configure it if necessary */
3245 1.48 oster
3246 1.51 oster num_cols = cset->ac->clabel->num_columns;
3247 1.88 oster parity_type = cset->ac->clabel->parityConfig;
3248 1.51 oster
3249 1.51 oster /* XXX Check for duplicate components!?!?!? */
3250 1.51 oster
3251 1.86 oster /* Determine what the mod_counter is supposed to be for this set. */
3252 1.86 oster
3253 1.87 oster mod_counter_found = 0;
3254 1.101 oster mod_counter = 0;
3255 1.86 oster ac = cset->ac;
3256 1.86 oster while(ac!=NULL) {
3257 1.87 oster if (mod_counter_found==0) {
3258 1.86 oster mod_counter = ac->clabel->mod_counter;
3259 1.87 oster mod_counter_found = 1;
3260 1.87 oster } else {
3261 1.87 oster if (ac->clabel->mod_counter > mod_counter) {
3262 1.87 oster mod_counter = ac->clabel->mod_counter;
3263 1.87 oster }
3264 1.86 oster }
3265 1.86 oster ac = ac->next;
3266 1.86 oster }
3267 1.86 oster
3268 1.51 oster num_missing = 0;
3269 1.51 oster auto_config = cset->ac;
3270 1.51 oster
3271 1.166 oster even_pair_failed = 0;
3272 1.166 oster for(c=0; c<num_cols; c++) {
3273 1.166 oster ac = auto_config;
3274 1.166 oster while(ac!=NULL) {
3275 1.186 perry if ((ac->clabel->column == c) &&
3276 1.166 oster (ac->clabel->mod_counter == mod_counter)) {
3277 1.166 oster /* it's this one... */
3278 1.224 oster #ifdef DEBUG
3279 1.166 oster printf("Found: %s at %d\n",
3280 1.166 oster ac->devname,c);
3281 1.51 oster #endif
3282 1.166 oster break;
3283 1.51 oster }
3284 1.166 oster ac=ac->next;
3285 1.166 oster }
3286 1.166 oster if (ac==NULL) {
3287 1.51 oster /* Didn't find one here! */
3288 1.88 oster /* special case for RAID 1, especially
3289 1.88 oster where there are more than 2
3290 1.88 oster components (where RAIDframe treats
3291 1.88 oster things a little differently :( ) */
3292 1.166 oster if (parity_type == '1') {
3293 1.166 oster if (c%2 == 0) { /* even component */
3294 1.166 oster even_pair_failed = 1;
3295 1.166 oster } else { /* odd component. If
3296 1.166 oster we're failed, and
3297 1.166 oster so is the even
3298 1.166 oster component, it's
3299 1.166 oster "Good Night, Charlie" */
3300 1.166 oster if (even_pair_failed == 1) {
3301 1.166 oster return(0);
3302 1.88 oster }
3303 1.88 oster }
3304 1.166 oster } else {
3305 1.166 oster /* normal accounting */
3306 1.166 oster num_missing++;
3307 1.88 oster }
3308 1.166 oster }
3309 1.166 oster if ((parity_type == '1') && (c%2 == 1)) {
3310 1.88 oster /* Just did an even component, and we didn't
3311 1.186 perry bail.. reset the even_pair_failed flag,
3312 1.88 oster and go on to the next component.... */
3313 1.166 oster even_pair_failed = 0;
3314 1.51 oster }
3315 1.51 oster }
3316 1.51 oster
3317 1.51 oster clabel = cset->ac->clabel;
3318 1.51 oster
3319 1.51 oster if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3320 1.51 oster ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3321 1.51 oster ((clabel->parityConfig == '5') && (num_missing > 1))) {
3322 1.51 oster /* XXX this needs to be made *much* more general */
3323 1.51 oster /* Too many failures */
3324 1.51 oster return(0);
3325 1.51 oster }
3326 1.51 oster /* otherwise, all is well, and we've got enough to take a kick
3327 1.51 oster at autoconfiguring this set */
3328 1.51 oster return(1);
3329 1.48 oster }
3330 1.48 oster
3331 1.48 oster void
3332 1.169 oster rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3333 1.222 christos RF_Raid_t *raidPtr)
3334 1.48 oster {
3335 1.48 oster RF_ComponentLabel_t *clabel;
3336 1.77 oster int i;
3337 1.48 oster
3338 1.48 oster clabel = ac->clabel;
3339 1.48 oster
3340 1.48 oster /* 1. Fill in the common stuff */
3341 1.166 oster config->numRow = clabel->num_rows = 1;
3342 1.48 oster config->numCol = clabel->num_columns;
3343 1.48 oster config->numSpare = 0; /* XXX should this be set here? */
3344 1.48 oster config->sectPerSU = clabel->sectPerSU;
3345 1.48 oster config->SUsPerPU = clabel->SUsPerPU;
3346 1.48 oster config->SUsPerRU = clabel->SUsPerRU;
3347 1.48 oster config->parityConfig = clabel->parityConfig;
3348 1.48 oster /* XXX... */
3349 1.48 oster strcpy(config->diskQueueType,"fifo");
3350 1.48 oster config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3351 1.48 oster config->layoutSpecificSize = 0; /* XXX ?? */
3352 1.48 oster
3353 1.48 oster while(ac!=NULL) {
3354 1.48 oster /* row/col values will be in range due to the checks
3355 1.48 oster in reasonable_label() */
3356 1.166 oster strcpy(config->devnames[0][ac->clabel->column],
3357 1.48 oster ac->devname);
3358 1.48 oster ac = ac->next;
3359 1.48 oster }
3360 1.48 oster
3361 1.77 oster for(i=0;i<RF_MAXDBGV;i++) {
3362 1.163 fvdl config->debugVars[i][0] = 0;
3363 1.77 oster }
3364 1.48 oster }
3365 1.48 oster
3366 1.48 oster int
3367 1.169 oster rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3368 1.48 oster {
3369 1.269 jld RF_ComponentLabel_t *clabel;
3370 1.166 oster int column;
3371 1.148 oster int sparecol;
3372 1.48 oster
3373 1.54 oster raidPtr->autoconfigure = new_value;
3374 1.166 oster
3375 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3376 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3377 1.269 jld clabel = raidget_component_label(raidPtr, column);
3378 1.269 jld clabel->autoconfigure = new_value;
3379 1.269 jld raidflush_component_label(raidPtr, column);
3380 1.48 oster }
3381 1.48 oster }
3382 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3383 1.148 oster sparecol = raidPtr->numCol + column;
3384 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3385 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3386 1.269 jld clabel->autoconfigure = new_value;
3387 1.269 jld raidflush_component_label(raidPtr, sparecol);
3388 1.148 oster }
3389 1.148 oster }
3390 1.48 oster return(new_value);
3391 1.48 oster }
3392 1.48 oster
3393 1.48 oster int
3394 1.169 oster rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3395 1.48 oster {
3396 1.269 jld RF_ComponentLabel_t *clabel;
3397 1.166 oster int column;
3398 1.148 oster int sparecol;
3399 1.48 oster
3400 1.54 oster raidPtr->root_partition = new_value;
3401 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3402 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3403 1.269 jld clabel = raidget_component_label(raidPtr, column);
3404 1.269 jld clabel->root_partition = new_value;
3405 1.269 jld raidflush_component_label(raidPtr, column);
3406 1.148 oster }
3407 1.148 oster }
3408 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3409 1.148 oster sparecol = raidPtr->numCol + column;
3410 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3411 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3412 1.269 jld clabel->root_partition = new_value;
3413 1.269 jld raidflush_component_label(raidPtr, sparecol);
3414 1.48 oster }
3415 1.48 oster }
3416 1.48 oster return(new_value);
3417 1.48 oster }
3418 1.48 oster
3419 1.48 oster void
3420 1.169 oster rf_release_all_vps(RF_ConfigSet_t *cset)
3421 1.48 oster {
3422 1.48 oster RF_AutoConfig_t *ac;
3423 1.186 perry
3424 1.48 oster ac = cset->ac;
3425 1.48 oster while(ac!=NULL) {
3426 1.48 oster /* Close the vp, and give it back */
3427 1.48 oster if (ac->vp) {
3428 1.96 oster vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3429 1.298.2.5 jdolecek VOP_CLOSE(ac->vp, FREAD | FWRITE, NOCRED);
3430 1.48 oster vput(ac->vp);
3431 1.86 oster ac->vp = NULL;
3432 1.48 oster }
3433 1.48 oster ac = ac->next;
3434 1.48 oster }
3435 1.48 oster }
3436 1.48 oster
3437 1.48 oster
3438 1.48 oster void
3439 1.169 oster rf_cleanup_config_set(RF_ConfigSet_t *cset)
3440 1.48 oster {
3441 1.48 oster RF_AutoConfig_t *ac;
3442 1.48 oster RF_AutoConfig_t *next_ac;
3443 1.186 perry
3444 1.48 oster ac = cset->ac;
3445 1.48 oster while(ac!=NULL) {
3446 1.48 oster next_ac = ac->next;
3447 1.48 oster /* nuke the label */
3448 1.48 oster free(ac->clabel, M_RAIDFRAME);
3449 1.48 oster /* cleanup the config structure */
3450 1.48 oster free(ac, M_RAIDFRAME);
3451 1.48 oster /* "next.." */
3452 1.48 oster ac = next_ac;
3453 1.48 oster }
3454 1.48 oster /* and, finally, nuke the config set */
3455 1.48 oster free(cset, M_RAIDFRAME);
3456 1.48 oster }
3457 1.48 oster
3458 1.48 oster
3459 1.48 oster void
3460 1.169 oster raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3461 1.48 oster {
3462 1.48 oster /* current version number */
3463 1.186 perry clabel->version = RF_COMPONENT_LABEL_VERSION;
3464 1.57 oster clabel->serial_number = raidPtr->serial_number;
3465 1.48 oster clabel->mod_counter = raidPtr->mod_counter;
3466 1.269 jld
3467 1.166 oster clabel->num_rows = 1;
3468 1.48 oster clabel->num_columns = raidPtr->numCol;
3469 1.48 oster clabel->clean = RF_RAID_DIRTY; /* not clean */
3470 1.48 oster clabel->status = rf_ds_optimal; /* "It's good!" */
3471 1.186 perry
3472 1.48 oster clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3473 1.48 oster clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3474 1.48 oster clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3475 1.54 oster
3476 1.54 oster clabel->blockSize = raidPtr->bytesPerSector;
3477 1.282 enami rf_component_label_set_numblocks(clabel, raidPtr->sectorsPerDisk);
3478 1.54 oster
3479 1.48 oster /* XXX not portable */
3480 1.48 oster clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3481 1.54 oster clabel->maxOutstanding = raidPtr->maxOutstanding;
3482 1.54 oster clabel->autoconfigure = raidPtr->autoconfigure;
3483 1.54 oster clabel->root_partition = raidPtr->root_partition;
3484 1.48 oster clabel->last_unit = raidPtr->raidid;
3485 1.54 oster clabel->config_order = raidPtr->config_order;
3486 1.269 jld
3487 1.269 jld #ifndef RF_NO_PARITY_MAP
3488 1.269 jld rf_paritymap_init_label(raidPtr->parity_map, clabel);
3489 1.269 jld #endif
3490 1.51 oster }
3491 1.51 oster
3492 1.298.2.3 tls struct raid_softc *
3493 1.298.2.3 tls rf_auto_config_set(RF_ConfigSet_t *cset)
3494 1.51 oster {
3495 1.51 oster RF_Raid_t *raidPtr;
3496 1.51 oster RF_Config_t *config;
3497 1.51 oster int raidID;
3498 1.298.2.3 tls struct raid_softc *sc;
3499 1.51 oster
3500 1.224 oster #ifdef DEBUG
3501 1.72 oster printf("RAID autoconfigure\n");
3502 1.127 oster #endif
3503 1.51 oster
3504 1.51 oster /* 1. Create a config structure */
3505 1.298.2.3 tls config = malloc(sizeof(*config), M_RAIDFRAME, M_NOWAIT|M_ZERO);
3506 1.298.2.3 tls if (config == NULL) {
3507 1.298.2.5 jdolecek printf("%s: Out of mem - config!?!?\n", __func__);
3508 1.51 oster /* XXX do something more intelligent here. */
3509 1.298.2.3 tls return NULL;
3510 1.51 oster }
3511 1.77 oster
3512 1.186 perry /*
3513 1.186 perry 2. Figure out what RAID ID this one is supposed to live at
3514 1.51 oster See if we can get the same RAID dev that it was configured
3515 1.186 perry on last time..
3516 1.51 oster */
3517 1.51 oster
3518 1.51 oster raidID = cset->ac->clabel->last_unit;
3519 1.298.2.5 jdolecek for (sc = raidget(raidID, false); sc && sc->sc_r.valid != 0;
3520 1.298.2.5 jdolecek sc = raidget(++raidID, false))
3521 1.298.2.3 tls continue;
3522 1.224 oster #ifdef DEBUG
3523 1.72 oster printf("Configuring raid%d:\n",raidID);
3524 1.127 oster #endif
3525 1.127 oster
3526 1.298.2.5 jdolecek if (sc == NULL)
3527 1.298.2.5 jdolecek sc = raidget(raidID, true);
3528 1.298.2.5 jdolecek if (sc == NULL) {
3529 1.298.2.5 jdolecek printf("%s: Out of mem - softc!?!?\n", __func__);
3530 1.298.2.5 jdolecek /* XXX do something more intelligent here. */
3531 1.298.2.5 jdolecek free(config, M_RAIDFRAME);
3532 1.298.2.5 jdolecek return NULL;
3533 1.298.2.5 jdolecek }
3534 1.298.2.5 jdolecek
3535 1.298.2.3 tls raidPtr = &sc->sc_r;
3536 1.51 oster
3537 1.51 oster /* XXX all this stuff should be done SOMEWHERE ELSE! */
3538 1.298.2.3 tls raidPtr->softc = sc;
3539 1.51 oster raidPtr->raidid = raidID;
3540 1.51 oster raidPtr->openings = RAIDOUTSTANDING;
3541 1.51 oster
3542 1.51 oster /* 3. Build the configuration structure */
3543 1.51 oster rf_create_configuration(cset->ac, config, raidPtr);
3544 1.51 oster
3545 1.51 oster /* 4. Do the configuration */
3546 1.298.2.3 tls if (rf_Configure(raidPtr, config, cset->ac) == 0) {
3547 1.298.2.3 tls raidinit(sc);
3548 1.59 oster
3549 1.298.2.3 tls rf_markalldirty(raidPtr);
3550 1.298.2.3 tls raidPtr->autoconfigure = 1; /* XXX do this here? */
3551 1.298.2.4 tls switch (cset->ac->clabel->root_partition) {
3552 1.298.2.4 tls case 1: /* Force Root */
3553 1.298.2.4 tls case 2: /* Soft Root: root when boot partition part of raid */
3554 1.298.2.4 tls /*
3555 1.298.2.4 tls * everything configured just fine. Make a note
3556 1.298.2.4 tls * that this set is eligible to be root,
3557 1.298.2.4 tls * or forced to be root
3558 1.298.2.4 tls */
3559 1.298.2.4 tls cset->rootable = cset->ac->clabel->root_partition;
3560 1.54 oster /* XXX do this here? */
3561 1.298.2.4 tls raidPtr->root_partition = cset->rootable;
3562 1.298.2.4 tls break;
3563 1.298.2.4 tls default:
3564 1.298.2.4 tls break;
3565 1.51 oster }
3566 1.298.2.3 tls } else {
3567 1.298.2.3 tls raidput(sc);
3568 1.298.2.3 tls sc = NULL;
3569 1.51 oster }
3570 1.51 oster
3571 1.51 oster /* 5. Cleanup */
3572 1.51 oster free(config, M_RAIDFRAME);
3573 1.298.2.3 tls return sc;
3574 1.99 oster }
3575 1.99 oster
3576 1.99 oster void
3577 1.187 christos rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3578 1.187 christos size_t xmin, size_t xmax)
3579 1.177 oster {
3580 1.298.2.5 jdolecek int error;
3581 1.298.2.5 jdolecek
3582 1.227 ad pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3583 1.187 christos pool_sethiwat(p, xmax);
3584 1.298.2.5 jdolecek if ((error = pool_prime(p, xmin)) != 0)
3585 1.298.2.5 jdolecek panic("%s: failed to prime pool: %d", __func__, error);
3586 1.187 christos pool_setlowat(p, xmin);
3587 1.177 oster }
3588 1.190 oster
3589 1.190 oster /*
3590 1.298.2.5 jdolecek * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
3591 1.298.2.5 jdolecek * to see if there is IO pending and if that IO could possibly be done
3592 1.298.2.5 jdolecek * for a given RAID set. Returns 0 if IO is waiting and can be done, 1
3593 1.190 oster * otherwise.
3594 1.190 oster *
3595 1.190 oster */
3596 1.190 oster int
3597 1.298.2.3 tls rf_buf_queue_check(RF_Raid_t *raidPtr)
3598 1.190 oster {
3599 1.298.2.5 jdolecek struct raid_softc *rs;
3600 1.298.2.5 jdolecek struct dk_softc *dksc;
3601 1.298.2.5 jdolecek
3602 1.298.2.5 jdolecek rs = raidPtr->softc;
3603 1.298.2.5 jdolecek dksc = &rs->sc_dksc;
3604 1.298.2.5 jdolecek
3605 1.298.2.5 jdolecek if ((rs->sc_flags & RAIDF_INITED) == 0)
3606 1.298.2.5 jdolecek return 1;
3607 1.298.2.5 jdolecek
3608 1.298.2.5 jdolecek if (dk_strategy_pending(dksc) && raidPtr->openings > 0) {
3609 1.190 oster /* there is work to do */
3610 1.190 oster return 0;
3611 1.298.2.5 jdolecek }
3612 1.190 oster /* default is nothing to do */
3613 1.190 oster return 1;
3614 1.190 oster }
3615 1.213 christos
3616 1.213 christos int
3617 1.294 oster rf_getdisksize(struct vnode *vp, RF_RaidDisk_t *diskPtr)
3618 1.213 christos {
3619 1.275 mrg uint64_t numsecs;
3620 1.275 mrg unsigned secsize;
3621 1.213 christos int error;
3622 1.213 christos
3623 1.275 mrg error = getdisksize(vp, &numsecs, &secsize);
3624 1.213 christos if (error == 0) {
3625 1.275 mrg diskPtr->blockSize = secsize;
3626 1.275 mrg diskPtr->numBlocks = numsecs - rf_protectedSectors;
3627 1.275 mrg diskPtr->partitionSize = numsecs;
3628 1.213 christos return 0;
3629 1.213 christos }
3630 1.213 christos return error;
3631 1.213 christos }
3632 1.217 oster
3633 1.217 oster static int
3634 1.261 dyoung raid_match(device_t self, cfdata_t cfdata, void *aux)
3635 1.217 oster {
3636 1.217 oster return 1;
3637 1.217 oster }
3638 1.217 oster
3639 1.217 oster static void
3640 1.261 dyoung raid_attach(device_t parent, device_t self, void *aux)
3641 1.217 oster {
3642 1.217 oster }
3643 1.217 oster
3644 1.217 oster
3645 1.217 oster static int
3646 1.261 dyoung raid_detach(device_t self, int flags)
3647 1.217 oster {
3648 1.266 dyoung int error;
3649 1.298.2.5 jdolecek struct raid_softc *rs = raidsoftc(self);
3650 1.298.2.3 tls
3651 1.298.2.3 tls if (rs == NULL)
3652 1.298.2.3 tls return ENXIO;
3653 1.266 dyoung
3654 1.266 dyoung if ((error = raidlock(rs)) != 0)
3655 1.266 dyoung return (error);
3656 1.217 oster
3657 1.266 dyoung error = raid_detach_unlocked(rs);
3658 1.266 dyoung
3659 1.266 dyoung raidunlock(rs);
3660 1.217 oster
3661 1.298.2.5 jdolecek /* XXX raid can be referenced here */
3662 1.298.2.3 tls
3663 1.298.2.5 jdolecek if (error)
3664 1.298.2.5 jdolecek return error;
3665 1.298.2.5 jdolecek
3666 1.298.2.5 jdolecek /* Free the softc */
3667 1.298.2.5 jdolecek raidput(rs);
3668 1.298.2.5 jdolecek
3669 1.298.2.5 jdolecek return 0;
3670 1.217 oster }
3671 1.217 oster
3672 1.234 oster static void
3673 1.298.2.3 tls rf_set_geometry(struct raid_softc *rs, RF_Raid_t *raidPtr)
3674 1.234 oster {
3675 1.298.2.5 jdolecek struct dk_softc *dksc = &rs->sc_dksc;
3676 1.298.2.5 jdolecek struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
3677 1.298.2.3 tls
3678 1.298.2.3 tls memset(dg, 0, sizeof(*dg));
3679 1.298.2.3 tls
3680 1.298.2.3 tls dg->dg_secperunit = raidPtr->totalSectors;
3681 1.298.2.3 tls dg->dg_secsize = raidPtr->bytesPerSector;
3682 1.298.2.3 tls dg->dg_nsectors = raidPtr->Layout.dataSectorsPerStripe;
3683 1.298.2.3 tls dg->dg_ntracks = 4 * raidPtr->numCol;
3684 1.298.2.3 tls
3685 1.298.2.5 jdolecek disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
3686 1.298.2.5 jdolecek }
3687 1.298.2.5 jdolecek
3688 1.298.2.5 jdolecek /*
3689 1.298.2.5 jdolecek * Get cache info for all the components (including spares).
3690 1.298.2.5 jdolecek * Returns intersection of all the cache flags of all disks, or first
3691 1.298.2.5 jdolecek * error if any encountered.
3692 1.298.2.5 jdolecek * XXXfua feature flags can change as spares are added - lock down somehow
3693 1.298.2.5 jdolecek */
3694 1.298.2.5 jdolecek static int
3695 1.298.2.5 jdolecek rf_get_component_caches(RF_Raid_t *raidPtr, int *data)
3696 1.298.2.5 jdolecek {
3697 1.298.2.5 jdolecek int c;
3698 1.298.2.5 jdolecek int error;
3699 1.298.2.5 jdolecek int dkwhole = 0, dkpart;
3700 1.298.2.5 jdolecek
3701 1.298.2.5 jdolecek for (c = 0; c < raidPtr->numCol + raidPtr->numSpare; c++) {
3702 1.298.2.5 jdolecek /*
3703 1.298.2.5 jdolecek * Check any non-dead disk, even when currently being
3704 1.298.2.5 jdolecek * reconstructed.
3705 1.298.2.5 jdolecek */
3706 1.298.2.5 jdolecek if (!RF_DEAD_DISK(raidPtr->Disks[c].status)
3707 1.298.2.5 jdolecek || raidPtr->Disks[c].status == rf_ds_reconstructing) {
3708 1.298.2.5 jdolecek error = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp,
3709 1.298.2.5 jdolecek DIOCGCACHE, &dkpart, FREAD, NOCRED);
3710 1.298.2.5 jdolecek if (error) {
3711 1.298.2.5 jdolecek if (error != ENODEV) {
3712 1.298.2.5 jdolecek printf("raid%d: get cache for component %s failed\n",
3713 1.298.2.5 jdolecek raidPtr->raidid,
3714 1.298.2.5 jdolecek raidPtr->Disks[c].devname);
3715 1.298.2.5 jdolecek }
3716 1.298.2.5 jdolecek
3717 1.298.2.5 jdolecek return error;
3718 1.298.2.5 jdolecek }
3719 1.298.2.5 jdolecek
3720 1.298.2.5 jdolecek if (c == 0)
3721 1.298.2.5 jdolecek dkwhole = dkpart;
3722 1.298.2.5 jdolecek else
3723 1.298.2.5 jdolecek dkwhole = DKCACHE_COMBINE(dkwhole, dkpart);
3724 1.298.2.5 jdolecek }
3725 1.298.2.5 jdolecek }
3726 1.298.2.5 jdolecek
3727 1.298.2.5 jdolecek *data = dkwhole;
3728 1.298.2.5 jdolecek
3729 1.298.2.5 jdolecek return 0;
3730 1.234 oster }
3731 1.252 oster
3732 1.252 oster /*
3733 1.252 oster * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3734 1.252 oster * We end up returning whatever error was returned by the first cache flush
3735 1.252 oster * that fails.
3736 1.252 oster */
3737 1.252 oster
3738 1.269 jld int
3739 1.252 oster rf_sync_component_caches(RF_Raid_t *raidPtr)
3740 1.252 oster {
3741 1.252 oster int c, sparecol;
3742 1.252 oster int e,error;
3743 1.252 oster int force = 1;
3744 1.252 oster
3745 1.252 oster error = 0;
3746 1.252 oster for (c = 0; c < raidPtr->numCol; c++) {
3747 1.252 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
3748 1.252 oster e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3749 1.252 oster &force, FWRITE, NOCRED);
3750 1.252 oster if (e) {
3751 1.255 oster if (e != ENODEV)
3752 1.255 oster printf("raid%d: cache flush to component %s failed.\n",
3753 1.255 oster raidPtr->raidid, raidPtr->Disks[c].devname);
3754 1.252 oster if (error == 0) {
3755 1.252 oster error = e;
3756 1.252 oster }
3757 1.252 oster }
3758 1.252 oster }
3759 1.252 oster }
3760 1.252 oster
3761 1.252 oster for( c = 0; c < raidPtr->numSpare ; c++) {
3762 1.252 oster sparecol = raidPtr->numCol + c;
3763 1.252 oster /* Need to ensure that the reconstruct actually completed! */
3764 1.252 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3765 1.252 oster e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3766 1.252 oster DIOCCACHESYNC, &force, FWRITE, NOCRED);
3767 1.252 oster if (e) {
3768 1.255 oster if (e != ENODEV)
3769 1.255 oster printf("raid%d: cache flush to component %s failed.\n",
3770 1.255 oster raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3771 1.252 oster if (error == 0) {
3772 1.252 oster error = e;
3773 1.252 oster }
3774 1.252 oster }
3775 1.252 oster }
3776 1.252 oster }
3777 1.252 oster return error;
3778 1.252 oster }
3779 1.298.2.1 tls
3780 1.298.2.1 tls static void
3781 1.298.2.1 tls raidminphys(struct buf *bp)
3782 1.298.2.1 tls {
3783 1.298.2.1 tls dev_t dev;
3784 1.298.2.1 tls int unit;
3785 1.298.2.1 tls struct raid_softc *rs;
3786 1.298.2.1 tls RF_Raid_t *raidPtr;
3787 1.298.2.1 tls long xmax;
3788 1.298.2.1 tls
3789 1.298.2.1 tls dev = bp->b_dev;
3790 1.298.2.1 tls unit = raidunit(dev);
3791 1.298.2.5 jdolecek rs = raidget(unit, false);
3792 1.298.2.3 tls raidPtr = &(rs->sc_r);
3793 1.298.2.1 tls
3794 1.298.2.1 tls xmax = raidPtr->Layout.numDataCol * MAXPHYS;
3795 1.298.2.1 tls
3796 1.298.2.1 tls if (bp->b_bcount > xmax) {
3797 1.298.2.1 tls bp->b_bcount = xmax;
3798 1.298.2.1 tls }
3799 1.298.2.1 tls }
3800 1.298.2.5 jdolecek
3801 1.298.2.5 jdolecek /*
3802 1.298.2.5 jdolecek * Module interface
3803 1.298.2.5 jdolecek */
3804 1.298.2.5 jdolecek
3805 1.298.2.5 jdolecek MODULE(MODULE_CLASS_DRIVER, raid, "dk_subr");
3806 1.298.2.5 jdolecek
3807 1.298.2.5 jdolecek #ifdef _MODULE
3808 1.298.2.5 jdolecek CFDRIVER_DECL(raid, DV_DISK, NULL);
3809 1.298.2.5 jdolecek #endif
3810 1.298.2.5 jdolecek
3811 1.298.2.5 jdolecek static int raid_modcmd(modcmd_t, void *);
3812 1.298.2.5 jdolecek static int raid_modcmd_init(void);
3813 1.298.2.5 jdolecek static int raid_modcmd_fini(void);
3814 1.298.2.5 jdolecek
3815 1.298.2.5 jdolecek static int
3816 1.298.2.5 jdolecek raid_modcmd(modcmd_t cmd, void *data)
3817 1.298.2.5 jdolecek {
3818 1.298.2.5 jdolecek int error;
3819 1.298.2.5 jdolecek
3820 1.298.2.5 jdolecek error = 0;
3821 1.298.2.5 jdolecek switch (cmd) {
3822 1.298.2.5 jdolecek case MODULE_CMD_INIT:
3823 1.298.2.5 jdolecek error = raid_modcmd_init();
3824 1.298.2.5 jdolecek break;
3825 1.298.2.5 jdolecek case MODULE_CMD_FINI:
3826 1.298.2.5 jdolecek error = raid_modcmd_fini();
3827 1.298.2.5 jdolecek break;
3828 1.298.2.5 jdolecek default:
3829 1.298.2.5 jdolecek error = ENOTTY;
3830 1.298.2.5 jdolecek break;
3831 1.298.2.5 jdolecek }
3832 1.298.2.5 jdolecek return error;
3833 1.298.2.5 jdolecek }
3834 1.298.2.5 jdolecek
3835 1.298.2.5 jdolecek static int
3836 1.298.2.5 jdolecek raid_modcmd_init(void)
3837 1.298.2.5 jdolecek {
3838 1.298.2.5 jdolecek int error;
3839 1.298.2.5 jdolecek int bmajor, cmajor;
3840 1.298.2.5 jdolecek
3841 1.298.2.5 jdolecek mutex_init(&raid_lock, MUTEX_DEFAULT, IPL_NONE);
3842 1.298.2.5 jdolecek mutex_enter(&raid_lock);
3843 1.298.2.5 jdolecek #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
3844 1.298.2.5 jdolecek rf_init_mutex2(rf_sparet_wait_mutex, IPL_VM);
3845 1.298.2.5 jdolecek rf_init_cond2(rf_sparet_wait_cv, "sparetw");
3846 1.298.2.5 jdolecek rf_init_cond2(rf_sparet_resp_cv, "rfgst");
3847 1.298.2.5 jdolecek
3848 1.298.2.5 jdolecek rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
3849 1.298.2.5 jdolecek #endif
3850 1.298.2.5 jdolecek
3851 1.298.2.5 jdolecek bmajor = cmajor = -1;
3852 1.298.2.5 jdolecek error = devsw_attach("raid", &raid_bdevsw, &bmajor,
3853 1.298.2.5 jdolecek &raid_cdevsw, &cmajor);
3854 1.298.2.5 jdolecek if (error != 0 && error != EEXIST) {
3855 1.298.2.5 jdolecek aprint_error("%s: devsw_attach failed %d\n", __func__, error);
3856 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3857 1.298.2.5 jdolecek return error;
3858 1.298.2.5 jdolecek }
3859 1.298.2.5 jdolecek #ifdef _MODULE
3860 1.298.2.5 jdolecek error = config_cfdriver_attach(&raid_cd);
3861 1.298.2.5 jdolecek if (error != 0) {
3862 1.298.2.5 jdolecek aprint_error("%s: config_cfdriver_attach failed %d\n",
3863 1.298.2.5 jdolecek __func__, error);
3864 1.298.2.5 jdolecek devsw_detach(&raid_bdevsw, &raid_cdevsw);
3865 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3866 1.298.2.5 jdolecek return error;
3867 1.298.2.5 jdolecek }
3868 1.298.2.5 jdolecek #endif
3869 1.298.2.5 jdolecek error = config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3870 1.298.2.5 jdolecek if (error != 0) {
3871 1.298.2.5 jdolecek aprint_error("%s: config_cfattach_attach failed %d\n",
3872 1.298.2.5 jdolecek __func__, error);
3873 1.298.2.5 jdolecek #ifdef _MODULE
3874 1.298.2.5 jdolecek config_cfdriver_detach(&raid_cd);
3875 1.298.2.5 jdolecek #endif
3876 1.298.2.5 jdolecek devsw_detach(&raid_bdevsw, &raid_cdevsw);
3877 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3878 1.298.2.5 jdolecek return error;
3879 1.298.2.5 jdolecek }
3880 1.298.2.5 jdolecek
3881 1.298.2.5 jdolecek raidautoconfigdone = false;
3882 1.298.2.5 jdolecek
3883 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3884 1.298.2.5 jdolecek
3885 1.298.2.5 jdolecek if (error == 0) {
3886 1.298.2.5 jdolecek if (rf_BootRaidframe(true) == 0)
3887 1.298.2.5 jdolecek aprint_verbose("Kernelized RAIDframe activated\n");
3888 1.298.2.5 jdolecek else
3889 1.298.2.5 jdolecek panic("Serious error activating RAID!!");
3890 1.298.2.5 jdolecek }
3891 1.298.2.5 jdolecek
3892 1.298.2.5 jdolecek /*
3893 1.298.2.5 jdolecek * Register a finalizer which will be used to auto-config RAID
3894 1.298.2.5 jdolecek * sets once all real hardware devices have been found.
3895 1.298.2.5 jdolecek */
3896 1.298.2.5 jdolecek error = config_finalize_register(NULL, rf_autoconfig);
3897 1.298.2.5 jdolecek if (error != 0) {
3898 1.298.2.5 jdolecek aprint_error("WARNING: unable to register RAIDframe "
3899 1.298.2.5 jdolecek "finalizer\n");
3900 1.298.2.5 jdolecek error = 0;
3901 1.298.2.5 jdolecek }
3902 1.298.2.5 jdolecek
3903 1.298.2.5 jdolecek return error;
3904 1.298.2.5 jdolecek }
3905 1.298.2.5 jdolecek
3906 1.298.2.5 jdolecek static int
3907 1.298.2.5 jdolecek raid_modcmd_fini(void)
3908 1.298.2.5 jdolecek {
3909 1.298.2.5 jdolecek int error;
3910 1.298.2.5 jdolecek
3911 1.298.2.5 jdolecek mutex_enter(&raid_lock);
3912 1.298.2.5 jdolecek
3913 1.298.2.5 jdolecek /* Don't allow unload if raid device(s) exist. */
3914 1.298.2.5 jdolecek if (!LIST_EMPTY(&raids)) {
3915 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3916 1.298.2.5 jdolecek return EBUSY;
3917 1.298.2.5 jdolecek }
3918 1.298.2.5 jdolecek
3919 1.298.2.5 jdolecek error = config_cfattach_detach(raid_cd.cd_name, &raid_ca);
3920 1.298.2.5 jdolecek if (error != 0) {
3921 1.298.2.5 jdolecek aprint_error("%s: cannot detach cfattach\n",__func__);
3922 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3923 1.298.2.5 jdolecek return error;
3924 1.298.2.5 jdolecek }
3925 1.298.2.5 jdolecek #ifdef _MODULE
3926 1.298.2.5 jdolecek error = config_cfdriver_detach(&raid_cd);
3927 1.298.2.5 jdolecek if (error != 0) {
3928 1.298.2.5 jdolecek aprint_error("%s: cannot detach cfdriver\n",__func__);
3929 1.298.2.5 jdolecek config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3930 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3931 1.298.2.5 jdolecek return error;
3932 1.298.2.5 jdolecek }
3933 1.298.2.5 jdolecek #endif
3934 1.298.2.5 jdolecek error = devsw_detach(&raid_bdevsw, &raid_cdevsw);
3935 1.298.2.5 jdolecek if (error != 0) {
3936 1.298.2.5 jdolecek aprint_error("%s: cannot detach devsw\n",__func__);
3937 1.298.2.5 jdolecek #ifdef _MODULE
3938 1.298.2.5 jdolecek config_cfdriver_attach(&raid_cd);
3939 1.298.2.5 jdolecek #endif
3940 1.298.2.5 jdolecek config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3941 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3942 1.298.2.5 jdolecek return error;
3943 1.298.2.5 jdolecek }
3944 1.298.2.5 jdolecek rf_BootRaidframe(false);
3945 1.298.2.5 jdolecek #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
3946 1.298.2.5 jdolecek rf_destroy_mutex2(rf_sparet_wait_mutex);
3947 1.298.2.5 jdolecek rf_destroy_cond2(rf_sparet_wait_cv);
3948 1.298.2.5 jdolecek rf_destroy_cond2(rf_sparet_resp_cv);
3949 1.298.2.5 jdolecek #endif
3950 1.298.2.5 jdolecek mutex_exit(&raid_lock);
3951 1.298.2.5 jdolecek mutex_destroy(&raid_lock);
3952 1.298.2.5 jdolecek
3953 1.298.2.5 jdolecek return error;
3954 1.298.2.5 jdolecek }
3955