rf_netbsdkintf.c revision 1.316.2.7 1 1.316.2.7 skrll /* $NetBSD: rf_netbsdkintf.c,v 1.316.2.7 2016/10/05 20:55:56 skrll Exp $ */
2 1.281 rmind
3 1.1 oster /*-
4 1.295 erh * Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
5 1.1 oster * All rights reserved.
6 1.1 oster *
7 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
8 1.1 oster * by Greg Oster; Jason R. Thorpe.
9 1.1 oster *
10 1.1 oster * Redistribution and use in source and binary forms, with or without
11 1.1 oster * modification, are permitted provided that the following conditions
12 1.1 oster * are met:
13 1.1 oster * 1. Redistributions of source code must retain the above copyright
14 1.1 oster * notice, this list of conditions and the following disclaimer.
15 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 oster * notice, this list of conditions and the following disclaimer in the
17 1.1 oster * documentation and/or other materials provided with the distribution.
18 1.1 oster *
19 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
30 1.1 oster */
31 1.1 oster
32 1.1 oster /*
33 1.281 rmind * Copyright (c) 1988 University of Utah.
34 1.1 oster * Copyright (c) 1990, 1993
35 1.1 oster * The Regents of the University of California. All rights reserved.
36 1.1 oster *
37 1.1 oster * This code is derived from software contributed to Berkeley by
38 1.1 oster * the Systems Programming Group of the University of Utah Computer
39 1.1 oster * Science Department.
40 1.1 oster *
41 1.1 oster * Redistribution and use in source and binary forms, with or without
42 1.1 oster * modification, are permitted provided that the following conditions
43 1.1 oster * are met:
44 1.1 oster * 1. Redistributions of source code must retain the above copyright
45 1.1 oster * notice, this list of conditions and the following disclaimer.
46 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
47 1.1 oster * notice, this list of conditions and the following disclaimer in the
48 1.1 oster * documentation and/or other materials provided with the distribution.
49 1.162 agc * 3. Neither the name of the University nor the names of its contributors
50 1.162 agc * may be used to endorse or promote products derived from this software
51 1.162 agc * without specific prior written permission.
52 1.162 agc *
53 1.162 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.162 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.162 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.162 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.162 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.162 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.162 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.162 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.162 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.162 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.162 agc * SUCH DAMAGE.
64 1.162 agc *
65 1.162 agc * from: Utah $Hdr: cd.c 1.6 90/11/28$
66 1.162 agc *
67 1.162 agc * @(#)cd.c 8.2 (Berkeley) 11/16/93
68 1.162 agc */
69 1.162 agc
70 1.162 agc /*
71 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
72 1.1 oster * All rights reserved.
73 1.1 oster *
74 1.1 oster * Authors: Mark Holland, Jim Zelenka
75 1.1 oster *
76 1.1 oster * Permission to use, copy, modify and distribute this software and
77 1.1 oster * its documentation is hereby granted, provided that both the copyright
78 1.1 oster * notice and this permission notice appear in all copies of the
79 1.1 oster * software, derivative works or modified versions, and any portions
80 1.1 oster * thereof, and that both notices appear in supporting documentation.
81 1.1 oster *
82 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
83 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
84 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
85 1.1 oster *
86 1.1 oster * Carnegie Mellon requests users of this software to return to
87 1.1 oster *
88 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
89 1.1 oster * School of Computer Science
90 1.1 oster * Carnegie Mellon University
91 1.1 oster * Pittsburgh PA 15213-3890
92 1.1 oster *
93 1.1 oster * any improvements or extensions that they make and grant Carnegie the
94 1.1 oster * rights to redistribute these changes.
95 1.1 oster */
96 1.1 oster
97 1.1 oster /***********************************************************
98 1.1 oster *
99 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
100 1.1 oster *
101 1.1 oster ***********************************************************/
102 1.112 lukem
103 1.112 lukem #include <sys/cdefs.h>
104 1.316.2.7 skrll __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.316.2.7 2016/10/05 20:55:56 skrll Exp $");
105 1.251 ad
106 1.251 ad #ifdef _KERNEL_OPT
107 1.254 christos #include "opt_compat_netbsd.h"
108 1.251 ad #include "opt_raid_autoconfig.h"
109 1.251 ad #endif
110 1.1 oster
111 1.113 lukem #include <sys/param.h>
112 1.1 oster #include <sys/errno.h>
113 1.1 oster #include <sys/pool.h>
114 1.152 thorpej #include <sys/proc.h>
115 1.1 oster #include <sys/queue.h>
116 1.1 oster #include <sys/disk.h>
117 1.1 oster #include <sys/device.h>
118 1.1 oster #include <sys/stat.h>
119 1.1 oster #include <sys/ioctl.h>
120 1.1 oster #include <sys/fcntl.h>
121 1.1 oster #include <sys/systm.h>
122 1.1 oster #include <sys/vnode.h>
123 1.1 oster #include <sys/disklabel.h>
124 1.1 oster #include <sys/conf.h>
125 1.1 oster #include <sys/buf.h>
126 1.182 yamt #include <sys/bufq.h>
127 1.65 oster #include <sys/reboot.h>
128 1.208 elad #include <sys/kauth.h>
129 1.316.2.4 skrll #include <sys/module.h>
130 1.8 oster
131 1.234 oster #include <prop/proplib.h>
132 1.234 oster
133 1.110 oster #include <dev/raidframe/raidframevar.h>
134 1.110 oster #include <dev/raidframe/raidframeio.h>
135 1.269 jld #include <dev/raidframe/rf_paritymap.h>
136 1.251 ad
137 1.1 oster #include "rf_raid.h"
138 1.44 oster #include "rf_copyback.h"
139 1.1 oster #include "rf_dag.h"
140 1.1 oster #include "rf_dagflags.h"
141 1.99 oster #include "rf_desc.h"
142 1.1 oster #include "rf_diskqueue.h"
143 1.1 oster #include "rf_etimer.h"
144 1.1 oster #include "rf_general.h"
145 1.1 oster #include "rf_kintf.h"
146 1.1 oster #include "rf_options.h"
147 1.1 oster #include "rf_driver.h"
148 1.1 oster #include "rf_parityscan.h"
149 1.1 oster #include "rf_threadstuff.h"
150 1.1 oster
151 1.254 christos #ifdef COMPAT_50
152 1.254 christos #include "rf_compat50.h"
153 1.254 christos #endif
154 1.254 christos
155 1.316.2.3 skrll #include "ioconf.h"
156 1.316.2.3 skrll
157 1.133 oster #ifdef DEBUG
158 1.9 oster int rf_kdebug_level = 0;
159 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
160 1.9 oster #else /* DEBUG */
161 1.1 oster #define db1_printf(a) { }
162 1.9 oster #endif /* DEBUG */
163 1.1 oster
164 1.316.2.6 skrll #ifdef DEBUG_ROOT
165 1.316.2.6 skrll #define DPRINTF(a, ...) printf(a, __VA_ARGS__)
166 1.316.2.6 skrll #else
167 1.316.2.6 skrll #define DPRINTF(a, ...)
168 1.316.2.6 skrll #endif
169 1.316.2.6 skrll
170 1.249 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
171 1.289 mrg static rf_declare_mutex2(rf_sparet_wait_mutex);
172 1.287 mrg static rf_declare_cond2(rf_sparet_wait_cv);
173 1.287 mrg static rf_declare_cond2(rf_sparet_resp_cv);
174 1.1 oster
175 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
176 1.10 oster * spare table */
177 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
178 1.10 oster * installation process */
179 1.249 oster #endif
180 1.153 thorpej
181 1.153 thorpej MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
182 1.10 oster
183 1.1 oster /* prototypes */
184 1.187 christos static void KernelWakeupFunc(struct buf *);
185 1.187 christos static void InitBP(struct buf *, struct vnode *, unsigned,
186 1.225 christos dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
187 1.187 christos void *, int, struct proc *);
188 1.300 christos struct raid_softc;
189 1.300 christos static void raidinit(struct raid_softc *);
190 1.316.2.5 skrll static int raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp);
191 1.1 oster
192 1.261 dyoung static int raid_match(device_t, cfdata_t, void *);
193 1.261 dyoung static void raid_attach(device_t, device_t, void *);
194 1.261 dyoung static int raid_detach(device_t, int);
195 1.130 gehenna
196 1.316.2.2 skrll static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
197 1.269 jld daddr_t, daddr_t);
198 1.269 jld static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
199 1.269 jld daddr_t, daddr_t, int);
200 1.269 jld
201 1.276 mrg static int raidwrite_component_label(unsigned,
202 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
203 1.276 mrg static int raidread_component_label(unsigned,
204 1.276 mrg dev_t, struct vnode *, RF_ComponentLabel_t *);
205 1.269 jld
206 1.316.2.5 skrll static int raid_diskstart(device_t, struct buf *bp);
207 1.316.2.5 skrll static int raid_dumpblocks(device_t, void *, daddr_t, int);
208 1.316.2.5 skrll static int raid_lastclose(device_t);
209 1.269 jld
210 1.316.2.3 skrll static dev_type_open(raidopen);
211 1.316.2.3 skrll static dev_type_close(raidclose);
212 1.316.2.3 skrll static dev_type_read(raidread);
213 1.316.2.3 skrll static dev_type_write(raidwrite);
214 1.316.2.3 skrll static dev_type_ioctl(raidioctl);
215 1.316.2.3 skrll static dev_type_strategy(raidstrategy);
216 1.316.2.3 skrll static dev_type_dump(raiddump);
217 1.316.2.3 skrll static dev_type_size(raidsize);
218 1.130 gehenna
219 1.130 gehenna const struct bdevsw raid_bdevsw = {
220 1.305 dholland .d_open = raidopen,
221 1.305 dholland .d_close = raidclose,
222 1.305 dholland .d_strategy = raidstrategy,
223 1.305 dholland .d_ioctl = raidioctl,
224 1.305 dholland .d_dump = raiddump,
225 1.305 dholland .d_psize = raidsize,
226 1.311 dholland .d_discard = nodiscard,
227 1.305 dholland .d_flag = D_DISK
228 1.130 gehenna };
229 1.130 gehenna
230 1.130 gehenna const struct cdevsw raid_cdevsw = {
231 1.305 dholland .d_open = raidopen,
232 1.305 dholland .d_close = raidclose,
233 1.305 dholland .d_read = raidread,
234 1.305 dholland .d_write = raidwrite,
235 1.305 dholland .d_ioctl = raidioctl,
236 1.305 dholland .d_stop = nostop,
237 1.305 dholland .d_tty = notty,
238 1.305 dholland .d_poll = nopoll,
239 1.305 dholland .d_mmap = nommap,
240 1.305 dholland .d_kqfilter = nokqfilter,
241 1.312 dholland .d_discard = nodiscard,
242 1.305 dholland .d_flag = D_DISK
243 1.130 gehenna };
244 1.1 oster
245 1.316.2.2 skrll static struct dkdriver rf_dkdriver = {
246 1.316.2.5 skrll .d_open = raidopen,
247 1.316.2.5 skrll .d_close = raidclose,
248 1.316.2.2 skrll .d_strategy = raidstrategy,
249 1.316.2.5 skrll .d_diskstart = raid_diskstart,
250 1.316.2.5 skrll .d_dumpblocks = raid_dumpblocks,
251 1.316.2.5 skrll .d_lastclose = raid_lastclose,
252 1.316.2.2 skrll .d_minphys = minphys
253 1.316.2.2 skrll };
254 1.235 oster
255 1.10 oster struct raid_softc {
256 1.316.2.5 skrll struct dk_softc sc_dksc;
257 1.300 christos int sc_unit;
258 1.10 oster int sc_flags; /* flags */
259 1.10 oster int sc_cflags; /* configuration flags */
260 1.316.2.4 skrll kmutex_t sc_mutex; /* interlock mutex */
261 1.316.2.4 skrll kcondvar_t sc_cv; /* and the condvar */
262 1.212 oster uint64_t sc_size; /* size of the raid device */
263 1.10 oster char sc_xname[20]; /* XXX external name */
264 1.300 christos RF_Raid_t sc_r;
265 1.300 christos LIST_ENTRY(raid_softc) sc_link;
266 1.10 oster };
267 1.1 oster /* sc_flags */
268 1.316.2.5 skrll #define RAIDF_INITED 0x01 /* unit has been initialized */
269 1.316.2.5 skrll #define RAIDF_SHUTDOWN 0x02 /* unit is being shutdown */
270 1.316.2.5 skrll #define RAIDF_DETACH 0x04 /* detach after final close */
271 1.316.2.5 skrll #define RAIDF_WANTED 0x08 /* someone waiting to obtain a lock */
272 1.316.2.5 skrll #define RAIDF_LOCKED 0x10 /* unit is locked */
273 1.316.2.5 skrll #define RAIDF_UNIT_CHANGED 0x20 /* unit is being changed */
274 1.1 oster
275 1.1 oster #define raidunit(x) DISKUNIT(x)
276 1.316.2.5 skrll #define raidsoftc(dev) (((struct raid_softc *)device_private(dev))->sc_r.softc)
277 1.1 oster
278 1.202 oster extern struct cfdriver raid_cd;
279 1.266 dyoung CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
280 1.266 dyoung raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
281 1.266 dyoung DVF_DETACH_SHUTDOWN);
282 1.202 oster
283 1.186 perry /*
284 1.186 perry * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
285 1.186 perry * Be aware that large numbers can allow the driver to consume a lot of
286 1.28 oster * kernel memory, especially on writes, and in degraded mode reads.
287 1.186 perry *
288 1.186 perry * For example: with a stripe width of 64 blocks (32k) and 5 disks,
289 1.186 perry * a single 64K write will typically require 64K for the old data,
290 1.186 perry * 64K for the old parity, and 64K for the new parity, for a total
291 1.28 oster * of 192K (if the parity buffer is not re-used immediately).
292 1.110 oster * Even it if is used immediately, that's still 128K, which when multiplied
293 1.28 oster * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
294 1.186 perry *
295 1.28 oster * Now in degraded mode, for example, a 64K read on the above setup may
296 1.186 perry * require data reconstruction, which will require *all* of the 4 remaining
297 1.28 oster * disks to participate -- 4 * 32K/disk == 128K again.
298 1.20 oster */
299 1.20 oster
300 1.20 oster #ifndef RAIDOUTSTANDING
301 1.28 oster #define RAIDOUTSTANDING 6
302 1.20 oster #endif
303 1.20 oster
304 1.1 oster #define RAIDLABELDEV(dev) \
305 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
306 1.1 oster
307 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
308 1.9 oster
309 1.104 oster static int raidlock(struct raid_softc *);
310 1.104 oster static void raidunlock(struct raid_softc *);
311 1.1 oster
312 1.266 dyoung static int raid_detach_unlocked(struct raid_softc *);
313 1.266 dyoung
314 1.104 oster static void rf_markalldirty(RF_Raid_t *);
315 1.304 christos static void rf_set_geometry(struct raid_softc *, RF_Raid_t *);
316 1.48 oster
317 1.104 oster void rf_ReconThread(struct rf_recon_req *);
318 1.104 oster void rf_RewriteParityThread(RF_Raid_t *raidPtr);
319 1.104 oster void rf_CopybackThread(RF_Raid_t *raidPtr);
320 1.104 oster void rf_ReconstructInPlaceThread(struct rf_recon_req *);
321 1.261 dyoung int rf_autoconfig(device_t);
322 1.142 thorpej void rf_buildroothack(RF_ConfigSet_t *);
323 1.104 oster
324 1.104 oster RF_AutoConfig_t *rf_find_raid_components(void);
325 1.104 oster RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
326 1.104 oster static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
327 1.292 oster int rf_reasonable_label(RF_ComponentLabel_t *, uint64_t);
328 1.104 oster void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
329 1.104 oster int rf_set_autoconfig(RF_Raid_t *, int);
330 1.104 oster int rf_set_rootpartition(RF_Raid_t *, int);
331 1.104 oster void rf_release_all_vps(RF_ConfigSet_t *);
332 1.104 oster void rf_cleanup_config_set(RF_ConfigSet_t *);
333 1.104 oster int rf_have_enough_components(RF_ConfigSet_t *);
334 1.300 christos struct raid_softc *rf_auto_config_set(RF_ConfigSet_t *);
335 1.278 mrg static void rf_fix_old_label_size(RF_ComponentLabel_t *, uint64_t);
336 1.48 oster
337 1.295 erh /*
338 1.295 erh * Debugging, mostly. Set to 0 to not allow autoconfig to take place.
339 1.295 erh * Note that this is overridden by having RAID_AUTOCONFIG as an option
340 1.295 erh * in the kernel config file.
341 1.295 erh */
342 1.295 erh #ifdef RAID_AUTOCONFIG
343 1.295 erh int raidautoconfig = 1;
344 1.295 erh #else
345 1.295 erh int raidautoconfig = 0;
346 1.295 erh #endif
347 1.295 erh static bool raidautoconfigdone = false;
348 1.37 oster
349 1.177 oster struct RF_Pools_s rf_pools;
350 1.177 oster
351 1.300 christos static LIST_HEAD(, raid_softc) raids = LIST_HEAD_INITIALIZER(raids);
352 1.300 christos static kmutex_t raid_lock;
353 1.1 oster
354 1.300 christos static struct raid_softc *
355 1.300 christos raidcreate(int unit) {
356 1.300 christos struct raid_softc *sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
357 1.300 christos if (sc == NULL) {
358 1.1 oster #ifdef DIAGNOSTIC
359 1.300 christos printf("%s: out of memory\n", __func__);
360 1.1 oster #endif
361 1.300 christos return NULL;
362 1.1 oster }
363 1.300 christos sc->sc_unit = unit;
364 1.316.2.4 skrll cv_init(&sc->sc_cv, "raidunit");
365 1.316.2.4 skrll mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
366 1.300 christos return sc;
367 1.300 christos }
368 1.1 oster
369 1.300 christos static void
370 1.300 christos raiddestroy(struct raid_softc *sc) {
371 1.316.2.4 skrll cv_destroy(&sc->sc_cv);
372 1.316.2.4 skrll mutex_destroy(&sc->sc_mutex);
373 1.300 christos kmem_free(sc, sizeof(*sc));
374 1.300 christos }
375 1.50 oster
376 1.300 christos static struct raid_softc *
377 1.316.2.4 skrll raidget(int unit, bool create) {
378 1.300 christos struct raid_softc *sc;
379 1.300 christos if (unit < 0) {
380 1.300 christos #ifdef DIAGNOSTIC
381 1.300 christos panic("%s: unit %d!", __func__, unit);
382 1.300 christos #endif
383 1.300 christos return NULL;
384 1.300 christos }
385 1.300 christos mutex_enter(&raid_lock);
386 1.300 christos LIST_FOREACH(sc, &raids, sc_link) {
387 1.300 christos if (sc->sc_unit == unit) {
388 1.300 christos mutex_exit(&raid_lock);
389 1.300 christos return sc;
390 1.300 christos }
391 1.300 christos }
392 1.300 christos mutex_exit(&raid_lock);
393 1.316.2.4 skrll if (!create)
394 1.316.2.4 skrll return NULL;
395 1.300 christos if ((sc = raidcreate(unit)) == NULL)
396 1.300 christos return NULL;
397 1.300 christos mutex_enter(&raid_lock);
398 1.300 christos LIST_INSERT_HEAD(&raids, sc, sc_link);
399 1.300 christos mutex_exit(&raid_lock);
400 1.300 christos return sc;
401 1.300 christos }
402 1.300 christos
403 1.316.2.2 skrll static void
404 1.300 christos raidput(struct raid_softc *sc) {
405 1.300 christos mutex_enter(&raid_lock);
406 1.300 christos LIST_REMOVE(sc, sc_link);
407 1.300 christos mutex_exit(&raid_lock);
408 1.300 christos raiddestroy(sc);
409 1.300 christos }
410 1.1 oster
411 1.300 christos void
412 1.300 christos raidattach(int num)
413 1.300 christos {
414 1.62 oster
415 1.142 thorpej /*
416 1.316.2.4 skrll * Device attachment and associated initialization now occurs
417 1.316.2.4 skrll * as part of the module initialization.
418 1.142 thorpej */
419 1.142 thorpej }
420 1.142 thorpej
421 1.142 thorpej int
422 1.261 dyoung rf_autoconfig(device_t self)
423 1.142 thorpej {
424 1.142 thorpej RF_AutoConfig_t *ac_list;
425 1.142 thorpej RF_ConfigSet_t *config_sets;
426 1.142 thorpej
427 1.295 erh if (!raidautoconfig || raidautoconfigdone == true)
428 1.142 thorpej return (0);
429 1.142 thorpej
430 1.142 thorpej /* XXX This code can only be run once. */
431 1.295 erh raidautoconfigdone = true;
432 1.142 thorpej
433 1.307 christos #ifdef __HAVE_CPU_BOOTCONF
434 1.307 christos /*
435 1.307 christos * 0. find the boot device if needed first so we can use it later
436 1.307 christos * this needs to be done before we autoconfigure any raid sets,
437 1.307 christos * because if we use wedges we are not going to be able to open
438 1.307 christos * the boot device later
439 1.307 christos */
440 1.307 christos if (booted_device == NULL)
441 1.307 christos cpu_bootconf();
442 1.307 christos #endif
443 1.48 oster /* 1. locate all RAID components on the system */
444 1.258 ad aprint_debug("Searching for RAID components...\n");
445 1.48 oster ac_list = rf_find_raid_components();
446 1.48 oster
447 1.142 thorpej /* 2. Sort them into their respective sets. */
448 1.48 oster config_sets = rf_create_auto_sets(ac_list);
449 1.48 oster
450 1.142 thorpej /*
451 1.299 oster * 3. Evaluate each set and configure the valid ones.
452 1.142 thorpej * This gets done in rf_buildroothack().
453 1.142 thorpej */
454 1.142 thorpej rf_buildroothack(config_sets);
455 1.48 oster
456 1.213 christos return 1;
457 1.48 oster }
458 1.48 oster
459 1.306 christos static int
460 1.307 christos rf_containsboot(RF_Raid_t *r, device_t bdv) {
461 1.307 christos const char *bootname = device_xname(bdv);
462 1.306 christos size_t len = strlen(bootname);
463 1.306 christos
464 1.306 christos for (int col = 0; col < r->numCol; col++) {
465 1.307 christos const char *devname = r->Disks[col].devname;
466 1.306 christos devname += sizeof("/dev/") - 1;
467 1.307 christos if (strncmp(devname, "dk", 2) == 0) {
468 1.307 christos const char *parent =
469 1.307 christos dkwedge_get_parent_name(r->Disks[col].dev);
470 1.307 christos if (parent != NULL)
471 1.307 christos devname = parent;
472 1.307 christos }
473 1.306 christos if (strncmp(devname, bootname, len) == 0) {
474 1.306 christos struct raid_softc *sc = r->softc;
475 1.306 christos aprint_debug("raid%d includes boot device %s\n",
476 1.306 christos sc->sc_unit, devname);
477 1.306 christos return 1;
478 1.306 christos }
479 1.306 christos }
480 1.306 christos return 0;
481 1.306 christos }
482 1.306 christos
483 1.48 oster void
484 1.142 thorpej rf_buildroothack(RF_ConfigSet_t *config_sets)
485 1.48 oster {
486 1.48 oster RF_ConfigSet_t *cset;
487 1.48 oster RF_ConfigSet_t *next_cset;
488 1.51 oster int num_root;
489 1.300 christos struct raid_softc *sc, *rsc;
490 1.316.2.5 skrll struct dk_softc *dksc;
491 1.48 oster
492 1.300 christos sc = rsc = NULL;
493 1.51 oster num_root = 0;
494 1.48 oster cset = config_sets;
495 1.271 dyoung while (cset != NULL) {
496 1.48 oster next_cset = cset->next;
497 1.186 perry if (rf_have_enough_components(cset) &&
498 1.300 christos cset->ac->clabel->autoconfigure == 1) {
499 1.300 christos sc = rf_auto_config_set(cset);
500 1.300 christos if (sc != NULL) {
501 1.300 christos aprint_debug("raid%d: configured ok\n",
502 1.300 christos sc->sc_unit);
503 1.51 oster if (cset->rootable) {
504 1.300 christos rsc = sc;
505 1.51 oster num_root++;
506 1.51 oster }
507 1.51 oster } else {
508 1.51 oster /* The autoconfig didn't work :( */
509 1.300 christos aprint_debug("Autoconfig failed\n");
510 1.51 oster rf_release_all_vps(cset);
511 1.48 oster }
512 1.48 oster } else {
513 1.186 perry /* we're not autoconfiguring this set...
514 1.48 oster release the associated resources */
515 1.49 oster rf_release_all_vps(cset);
516 1.48 oster }
517 1.48 oster /* cleanup */
518 1.49 oster rf_cleanup_config_set(cset);
519 1.48 oster cset = next_cset;
520 1.48 oster }
521 1.316.2.5 skrll dksc = &rsc->sc_dksc;
522 1.122 oster
523 1.223 oster /* if the user has specified what the root device should be
524 1.223 oster then we don't touch booted_device or boothowto... */
525 1.223 oster
526 1.223 oster if (rootspec != NULL)
527 1.223 oster return;
528 1.223 oster
529 1.122 oster /* we found something bootable... */
530 1.122 oster
531 1.310 christos /*
532 1.310 christos * XXX: The following code assumes that the root raid
533 1.310 christos * is the first ('a') partition. This is about the best
534 1.310 christos * we can do with a BSD disklabel, but we might be able
535 1.310 christos * to do better with a GPT label, by setting a specified
536 1.310 christos * attribute to indicate the root partition. We can then
537 1.310 christos * stash the partition number in the r->root_partition
538 1.310 christos * high bits (the bottom 2 bits are already used). For
539 1.310 christos * now we just set booted_partition to 0 when we override
540 1.310 christos * root.
541 1.310 christos */
542 1.122 oster if (num_root == 1) {
543 1.306 christos device_t candidate_root;
544 1.316.2.5 skrll if (dksc->sc_dkdev.dk_nwedges != 0) {
545 1.297 christos char cname[sizeof(cset->ac->devname)];
546 1.316.2.6 skrll /* XXX: assume partition 'a' first */
547 1.297 christos snprintf(cname, sizeof(cname), "%s%c",
548 1.316.2.5 skrll device_xname(dksc->sc_dev), 'a');
549 1.306 christos candidate_root = dkwedge_find_by_wname(cname);
550 1.316.2.6 skrll DPRINTF("%s: candidate wedge root=%s\n", __func__,
551 1.316.2.6 skrll cname);
552 1.316.2.6 skrll if (candidate_root == NULL) {
553 1.316.2.6 skrll /*
554 1.316.2.6 skrll * If that is not found, because we don't use
555 1.316.2.6 skrll * disklabel, return the first dk child
556 1.316.2.6 skrll * XXX: we can skip the 'a' check above
557 1.316.2.6 skrll * and always do this...
558 1.316.2.6 skrll */
559 1.316.2.6 skrll size_t i = 0;
560 1.316.2.6 skrll candidate_root = dkwedge_find_by_parent(
561 1.316.2.6 skrll device_xname(dksc->sc_dev), &i);
562 1.316.2.6 skrll }
563 1.316.2.6 skrll DPRINTF("%s: candidate wedge root=%p\n", __func__,
564 1.316.2.6 skrll candidate_root);
565 1.297 christos } else
566 1.316.2.5 skrll candidate_root = dksc->sc_dev;
567 1.316.2.6 skrll DPRINTF("%s: candidate root=%p\n", __func__, candidate_root);
568 1.316.2.6 skrll DPRINTF("%s: booted_device=%p root_partition=%d "
569 1.316.2.6 skrll "contains_boot=%d\n", __func__, booted_device,
570 1.316.2.6 skrll rsc->sc_r.root_partition,
571 1.316.2.6 skrll rf_containsboot(&rsc->sc_r, booted_device));
572 1.308 christos if (booted_device == NULL ||
573 1.308 christos rsc->sc_r.root_partition == 1 ||
574 1.310 christos rf_containsboot(&rsc->sc_r, booted_device)) {
575 1.308 christos booted_device = candidate_root;
576 1.310 christos booted_partition = 0; /* XXX assume 'a' */
577 1.310 christos }
578 1.122 oster } else if (num_root > 1) {
579 1.316.2.6 skrll DPRINTF("%s: many roots=%d, %p\n", __func__, num_root,
580 1.316.2.6 skrll booted_device);
581 1.226 oster
582 1.316.2.2 skrll /*
583 1.226 oster * Maybe the MD code can help. If it cannot, then
584 1.226 oster * setroot() will discover that we have no
585 1.226 oster * booted_device and will ask the user if nothing was
586 1.316.2.2 skrll * hardwired in the kernel config file
587 1.226 oster */
588 1.316.2.2 skrll if (booted_device == NULL)
589 1.226 oster return;
590 1.226 oster
591 1.226 oster num_root = 0;
592 1.300 christos mutex_enter(&raid_lock);
593 1.300 christos LIST_FOREACH(sc, &raids, sc_link) {
594 1.300 christos RF_Raid_t *r = &sc->sc_r;
595 1.300 christos if (r->valid == 0)
596 1.226 oster continue;
597 1.226 oster
598 1.300 christos if (r->root_partition == 0)
599 1.226 oster continue;
600 1.226 oster
601 1.306 christos if (rf_containsboot(r, booted_device)) {
602 1.226 oster num_root++;
603 1.300 christos rsc = sc;
604 1.316.2.5 skrll dksc = &rsc->sc_dksc;
605 1.226 oster }
606 1.226 oster }
607 1.300 christos mutex_exit(&raid_lock);
608 1.295 erh
609 1.226 oster if (num_root == 1) {
610 1.316.2.5 skrll booted_device = dksc->sc_dev;
611 1.310 christos booted_partition = 0; /* XXX assume 'a' */
612 1.226 oster } else {
613 1.226 oster /* we can't guess.. require the user to answer... */
614 1.226 oster boothowto |= RB_ASKNAME;
615 1.226 oster }
616 1.51 oster }
617 1.1 oster }
618 1.1 oster
619 1.316.2.3 skrll static int
620 1.169 oster raidsize(dev_t dev)
621 1.1 oster {
622 1.1 oster struct raid_softc *rs;
623 1.316.2.5 skrll struct dk_softc *dksc;
624 1.316.2.5 skrll unsigned int unit;
625 1.1 oster
626 1.1 oster unit = raidunit(dev);
627 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL)
628 1.300 christos return -1;
629 1.316.2.5 skrll dksc = &rs->sc_dksc;
630 1.316.2.5 skrll
631 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
632 1.316.2.5 skrll return -1;
633 1.1 oster
634 1.316.2.5 skrll return dk_size(dksc, dev);
635 1.316.2.5 skrll }
636 1.1 oster
637 1.316.2.5 skrll static int
638 1.316.2.5 skrll raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
639 1.316.2.5 skrll {
640 1.316.2.5 skrll unsigned int unit;
641 1.316.2.5 skrll struct raid_softc *rs;
642 1.316.2.5 skrll struct dk_softc *dksc;
643 1.1 oster
644 1.316.2.5 skrll unit = raidunit(dev);
645 1.316.2.5 skrll if ((rs = raidget(unit, false)) == NULL)
646 1.316.2.5 skrll return ENXIO;
647 1.316.2.5 skrll dksc = &rs->sc_dksc;
648 1.1 oster
649 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_INITED) == 0)
650 1.316.2.5 skrll return ENODEV;
651 1.1 oster
652 1.316.2.5 skrll /*
653 1.316.2.5 skrll Note that blkno is relative to this particular partition.
654 1.316.2.5 skrll By adding adding RF_PROTECTED_SECTORS, we get a value that
655 1.316.2.5 skrll is relative to the partition used for the underlying component.
656 1.316.2.5 skrll */
657 1.316.2.5 skrll blkno += RF_PROTECTED_SECTORS;
658 1.1 oster
659 1.316.2.5 skrll return dk_dump(dksc, dev, blkno, va, size);
660 1.1 oster }
661 1.1 oster
662 1.316.2.3 skrll static int
663 1.316.2.5 skrll raid_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
664 1.1 oster {
665 1.316.2.5 skrll struct raid_softc *rs = raidsoftc(dev);
666 1.231 oster const struct bdevsw *bdev;
667 1.231 oster RF_Raid_t *raidPtr;
668 1.316.2.5 skrll int c, sparecol, j, scol, dumpto;
669 1.231 oster int error = 0;
670 1.231 oster
671 1.300 christos raidPtr = &rs->sc_r;
672 1.231 oster
673 1.231 oster /* we only support dumping to RAID 1 sets */
674 1.316.2.2 skrll if (raidPtr->Layout.numDataCol != 1 ||
675 1.231 oster raidPtr->Layout.numParityCol != 1)
676 1.231 oster return EINVAL;
677 1.231 oster
678 1.231 oster if ((error = raidlock(rs)) != 0)
679 1.231 oster return error;
680 1.231 oster
681 1.231 oster /* figure out what device is alive.. */
682 1.231 oster
683 1.316.2.2 skrll /*
684 1.231 oster Look for a component to dump to. The preference for the
685 1.231 oster component to dump to is as follows:
686 1.231 oster 1) the master
687 1.231 oster 2) a used_spare of the master
688 1.231 oster 3) the slave
689 1.231 oster 4) a used_spare of the slave
690 1.231 oster */
691 1.231 oster
692 1.231 oster dumpto = -1;
693 1.231 oster for (c = 0; c < raidPtr->numCol; c++) {
694 1.231 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
695 1.231 oster /* this might be the one */
696 1.231 oster dumpto = c;
697 1.231 oster break;
698 1.231 oster }
699 1.231 oster }
700 1.316.2.2 skrll
701 1.316.2.2 skrll /*
702 1.231 oster At this point we have possibly selected a live master or a
703 1.231 oster live slave. We now check to see if there is a spared
704 1.231 oster master (or a spared slave), if we didn't find a live master
705 1.316.2.2 skrll or a live slave.
706 1.231 oster */
707 1.231 oster
708 1.231 oster for (c = 0; c < raidPtr->numSpare; c++) {
709 1.231 oster sparecol = raidPtr->numCol + c;
710 1.231 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
711 1.231 oster /* How about this one? */
712 1.231 oster scol = -1;
713 1.231 oster for(j=0;j<raidPtr->numCol;j++) {
714 1.231 oster if (raidPtr->Disks[j].spareCol == sparecol) {
715 1.231 oster scol = j;
716 1.231 oster break;
717 1.231 oster }
718 1.231 oster }
719 1.231 oster if (scol == 0) {
720 1.316.2.2 skrll /*
721 1.231 oster We must have found a spared master!
722 1.231 oster We'll take that over anything else
723 1.231 oster found so far. (We couldn't have
724 1.231 oster found a real master before, since
725 1.231 oster this is a used spare, and it's
726 1.231 oster saying that it's replacing the
727 1.231 oster master.) On reboot (with
728 1.231 oster autoconfiguration turned on)
729 1.231 oster sparecol will become the 1st
730 1.316.2.2 skrll component (component0) of this set.
731 1.231 oster */
732 1.231 oster dumpto = sparecol;
733 1.231 oster break;
734 1.231 oster } else if (scol != -1) {
735 1.316.2.2 skrll /*
736 1.231 oster Must be a spared slave. We'll dump
737 1.231 oster to that if we havn't found anything
738 1.316.2.2 skrll else so far.
739 1.231 oster */
740 1.231 oster if (dumpto == -1)
741 1.231 oster dumpto = sparecol;
742 1.231 oster }
743 1.231 oster }
744 1.231 oster }
745 1.316.2.2 skrll
746 1.231 oster if (dumpto == -1) {
747 1.231 oster /* we couldn't find any live components to dump to!?!?
748 1.231 oster */
749 1.231 oster error = EINVAL;
750 1.231 oster goto out;
751 1.231 oster }
752 1.231 oster
753 1.231 oster bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
754 1.316.2.5 skrll if (bdev == NULL) {
755 1.316.2.5 skrll error = ENXIO;
756 1.316.2.5 skrll goto out;
757 1.316.2.5 skrll }
758 1.231 oster
759 1.316.2.7 skrll error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
760 1.316.2.5 skrll blkno, va, nblk * raidPtr->bytesPerSector);
761 1.316.2.7 skrll
762 1.231 oster out:
763 1.231 oster raidunlock(rs);
764 1.316.2.2 skrll
765 1.231 oster return error;
766 1.1 oster }
767 1.316.2.3 skrll
768 1.1 oster /* ARGSUSED */
769 1.316.2.3 skrll static int
770 1.222 christos raidopen(dev_t dev, int flags, int fmt,
771 1.222 christos struct lwp *l)
772 1.1 oster {
773 1.9 oster int unit = raidunit(dev);
774 1.1 oster struct raid_softc *rs;
775 1.316.2.5 skrll struct dk_softc *dksc;
776 1.9 oster int error = 0;
777 1.316.2.5 skrll int part, pmask;
778 1.9 oster
779 1.316.2.4 skrll if ((rs = raidget(unit, true)) == NULL)
780 1.300 christos return ENXIO;
781 1.1 oster if ((error = raidlock(rs)) != 0)
782 1.9 oster return (error);
783 1.266 dyoung
784 1.266 dyoung if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
785 1.266 dyoung error = EBUSY;
786 1.266 dyoung goto bad;
787 1.266 dyoung }
788 1.266 dyoung
789 1.316.2.5 skrll dksc = &rs->sc_dksc;
790 1.1 oster
791 1.1 oster part = DISKPART(dev);
792 1.1 oster pmask = (1 << part);
793 1.1 oster
794 1.316.2.5 skrll if (!DK_BUSY(dksc, pmask) &&
795 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
796 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
797 1.13 oster have done a configure before this. I DO NOT WANT TO BE
798 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
799 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
800 1.13 oster /* XXX should check to see if we're only open for reading
801 1.13 oster here... If so, we needn't do this, but then need some
802 1.13 oster other way of keeping track of what's happened.. */
803 1.13 oster
804 1.300 christos rf_markalldirty(&rs->sc_r);
805 1.13 oster }
806 1.13 oster
807 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_INITED) != 0)
808 1.316.2.5 skrll error = dk_open(dksc, dev, flags, fmt, l);
809 1.1 oster
810 1.213 christos bad:
811 1.1 oster raidunlock(rs);
812 1.1 oster
813 1.9 oster return (error);
814 1.1 oster
815 1.1 oster
816 1.1 oster }
817 1.316.2.3 skrll
818 1.316.2.5 skrll static int
819 1.316.2.5 skrll raid_lastclose(device_t self)
820 1.316.2.5 skrll {
821 1.316.2.5 skrll struct raid_softc *rs = raidsoftc(self);
822 1.316.2.5 skrll
823 1.316.2.5 skrll /* Last one... device is not unconfigured yet.
824 1.316.2.5 skrll Device shutdown has taken care of setting the
825 1.316.2.5 skrll clean bits if RAIDF_INITED is not set
826 1.316.2.5 skrll mark things as clean... */
827 1.316.2.5 skrll
828 1.316.2.5 skrll rf_update_component_labels(&rs->sc_r,
829 1.316.2.5 skrll RF_FINAL_COMPONENT_UPDATE);
830 1.316.2.5 skrll
831 1.316.2.5 skrll /* pass to unlocked code */
832 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
833 1.316.2.5 skrll rs->sc_flags |= RAIDF_DETACH;
834 1.316.2.5 skrll
835 1.316.2.5 skrll return 0;
836 1.316.2.5 skrll }
837 1.316.2.5 skrll
838 1.1 oster /* ARGSUSED */
839 1.316.2.3 skrll static int
840 1.222 christos raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
841 1.1 oster {
842 1.9 oster int unit = raidunit(dev);
843 1.1 oster struct raid_softc *rs;
844 1.316.2.5 skrll struct dk_softc *dksc;
845 1.316.2.5 skrll cfdata_t cf;
846 1.316.2.5 skrll int error = 0, do_detach = 0, do_put = 0;
847 1.1 oster
848 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL)
849 1.300 christos return ENXIO;
850 1.316.2.5 skrll dksc = &rs->sc_dksc;
851 1.1 oster
852 1.1 oster if ((error = raidlock(rs)) != 0)
853 1.1 oster return (error);
854 1.1 oster
855 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_INITED) != 0) {
856 1.316.2.5 skrll error = dk_close(dksc, dev, flags, fmt, l);
857 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_DETACH) != 0)
858 1.316.2.5 skrll do_detach = 1;
859 1.316.2.5 skrll } else if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
860 1.316.2.5 skrll do_put = 1;
861 1.186 perry
862 1.316.2.5 skrll raidunlock(rs);
863 1.147 oster
864 1.316.2.5 skrll if (do_detach) {
865 1.316.2.5 skrll /* free the pseudo device attach bits */
866 1.316.2.5 skrll cf = device_cfdata(dksc->sc_dev);
867 1.316.2.5 skrll error = config_detach(dksc->sc_dev, 0);
868 1.316.2.7 skrll if (error == 0)
869 1.316.2.5 skrll free(cf, M_RAIDFRAME);
870 1.316.2.5 skrll } else if (do_put) {
871 1.316.2.5 skrll raidput(rs);
872 1.316.2.4 skrll }
873 1.316.2.4 skrll
874 1.316.2.5 skrll return (error);
875 1.316.2.4 skrll
876 1.316.2.5 skrll }
877 1.1 oster
878 1.316.2.5 skrll static void
879 1.316.2.5 skrll raid_wakeup(RF_Raid_t *raidPtr)
880 1.316.2.5 skrll {
881 1.316.2.5 skrll rf_lock_mutex2(raidPtr->iodone_lock);
882 1.316.2.5 skrll rf_signal_cond2(raidPtr->iodone_cv);
883 1.316.2.5 skrll rf_unlock_mutex2(raidPtr->iodone_lock);
884 1.1 oster }
885 1.1 oster
886 1.316.2.3 skrll static void
887 1.169 oster raidstrategy(struct buf *bp)
888 1.1 oster {
889 1.316.2.5 skrll unsigned int unit;
890 1.300 christos struct raid_softc *rs;
891 1.316.2.5 skrll struct dk_softc *dksc;
892 1.316.2.5 skrll RF_Raid_t *raidPtr;
893 1.1 oster
894 1.316.2.5 skrll unit = raidunit(bp->b_dev);
895 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL) {
896 1.30 oster bp->b_error = ENXIO;
897 1.316.2.5 skrll goto fail;
898 1.30 oster }
899 1.300 christos if ((rs->sc_flags & RAIDF_INITED) == 0) {
900 1.300 christos bp->b_error = ENXIO;
901 1.316.2.5 skrll goto fail;
902 1.1 oster }
903 1.316.2.5 skrll dksc = &rs->sc_dksc;
904 1.300 christos raidPtr = &rs->sc_r;
905 1.316.2.5 skrll
906 1.316.2.5 skrll /* Queue IO only */
907 1.316.2.5 skrll if (dk_strategy_defer(dksc, bp))
908 1.196 yamt goto done;
909 1.1 oster
910 1.316.2.5 skrll /* schedule the IO to happen at the next convenient time */
911 1.316.2.5 skrll raid_wakeup(raidPtr);
912 1.1 oster
913 1.316.2.5 skrll done:
914 1.316.2.5 skrll return;
915 1.285 mrg
916 1.316.2.5 skrll fail:
917 1.316.2.5 skrll bp->b_resid = bp->b_bcount;
918 1.316.2.5 skrll biodone(bp);
919 1.316.2.5 skrll }
920 1.316.2.5 skrll
921 1.316.2.5 skrll static int
922 1.316.2.5 skrll raid_diskstart(device_t dev, struct buf *bp)
923 1.316.2.5 skrll {
924 1.316.2.5 skrll struct raid_softc *rs = raidsoftc(dev);
925 1.316.2.5 skrll RF_Raid_t *raidPtr;
926 1.316.2.5 skrll
927 1.316.2.5 skrll raidPtr = &rs->sc_r;
928 1.316.2.5 skrll if (!raidPtr->valid) {
929 1.316.2.5 skrll db1_printf(("raid is not valid..\n"));
930 1.316.2.5 skrll return ENODEV;
931 1.316.2.5 skrll }
932 1.1 oster
933 1.316.2.5 skrll /* XXX */
934 1.1 oster bp->b_resid = 0;
935 1.34 oster
936 1.316.2.5 skrll return raiddoaccess(raidPtr, bp);
937 1.316.2.5 skrll }
938 1.34 oster
939 1.316.2.5 skrll void
940 1.316.2.5 skrll raiddone(RF_Raid_t *raidPtr, struct buf *bp)
941 1.316.2.5 skrll {
942 1.316.2.5 skrll struct raid_softc *rs;
943 1.316.2.5 skrll struct dk_softc *dksc;
944 1.34 oster
945 1.316.2.5 skrll rs = raidPtr->softc;
946 1.316.2.5 skrll dksc = &rs->sc_dksc;
947 1.196 yamt
948 1.316.2.5 skrll dk_done(dksc, bp);
949 1.316.2.5 skrll
950 1.316.2.5 skrll rf_lock_mutex2(raidPtr->mutex);
951 1.316.2.5 skrll raidPtr->openings++;
952 1.316.2.5 skrll rf_unlock_mutex2(raidPtr->mutex);
953 1.316.2.5 skrll
954 1.316.2.5 skrll /* schedule more IO */
955 1.316.2.5 skrll raid_wakeup(raidPtr);
956 1.1 oster }
957 1.316.2.3 skrll
958 1.1 oster /* ARGSUSED */
959 1.316.2.3 skrll static int
960 1.222 christos raidread(dev_t dev, struct uio *uio, int flags)
961 1.1 oster {
962 1.9 oster int unit = raidunit(dev);
963 1.1 oster struct raid_softc *rs;
964 1.1 oster
965 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL)
966 1.300 christos return ENXIO;
967 1.1 oster
968 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
969 1.1 oster return (ENXIO);
970 1.1 oster
971 1.1 oster return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
972 1.1 oster
973 1.1 oster }
974 1.316.2.3 skrll
975 1.1 oster /* ARGSUSED */
976 1.316.2.3 skrll static int
977 1.222 christos raidwrite(dev_t dev, struct uio *uio, int flags)
978 1.1 oster {
979 1.9 oster int unit = raidunit(dev);
980 1.1 oster struct raid_softc *rs;
981 1.1 oster
982 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL)
983 1.300 christos return ENXIO;
984 1.1 oster
985 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
986 1.1 oster return (ENXIO);
987 1.147 oster
988 1.1 oster return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
989 1.1 oster
990 1.1 oster }
991 1.1 oster
992 1.266 dyoung static int
993 1.266 dyoung raid_detach_unlocked(struct raid_softc *rs)
994 1.266 dyoung {
995 1.316.2.5 skrll struct dk_softc *dksc = &rs->sc_dksc;
996 1.266 dyoung RF_Raid_t *raidPtr;
997 1.316.2.5 skrll int error;
998 1.266 dyoung
999 1.300 christos raidPtr = &rs->sc_r;
1000 1.266 dyoung
1001 1.316.2.5 skrll if (DK_BUSY(dksc, 0) ||
1002 1.316.2.5 skrll raidPtr->recon_in_progress != 0 ||
1003 1.316.2.5 skrll raidPtr->parity_rewrite_in_progress != 0 ||
1004 1.316.2.5 skrll raidPtr->copyback_in_progress != 0)
1005 1.266 dyoung return EBUSY;
1006 1.266 dyoung
1007 1.266 dyoung if ((rs->sc_flags & RAIDF_INITED) == 0)
1008 1.316.2.5 skrll return 0;
1009 1.316.2.5 skrll
1010 1.316.2.5 skrll rs->sc_flags &= ~RAIDF_SHUTDOWN;
1011 1.316.2.5 skrll
1012 1.316.2.5 skrll if ((error = rf_Shutdown(raidPtr)) != 0)
1013 1.266 dyoung return error;
1014 1.266 dyoung
1015 1.316.2.5 skrll rs->sc_flags &= ~RAIDF_INITED;
1016 1.266 dyoung
1017 1.316.2.5 skrll /* Kill off any queued buffers */
1018 1.316.2.5 skrll dk_drain(dksc);
1019 1.316.2.5 skrll bufq_free(dksc->sc_bufq);
1020 1.316.2.4 skrll
1021 1.316.2.5 skrll /* Detach the disk. */
1022 1.316.2.5 skrll dkwedge_delall(&dksc->sc_dkdev);
1023 1.316.2.5 skrll disk_detach(&dksc->sc_dkdev);
1024 1.316.2.5 skrll disk_destroy(&dksc->sc_dkdev);
1025 1.316.2.5 skrll dk_detach(dksc);
1026 1.290 mrg
1027 1.266 dyoung return 0;
1028 1.266 dyoung }
1029 1.266 dyoung
1030 1.316.2.3 skrll static int
1031 1.225 christos raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1032 1.1 oster {
1033 1.9 oster int unit = raidunit(dev);
1034 1.9 oster int error = 0;
1035 1.316.2.5 skrll int part, pmask;
1036 1.1 oster struct raid_softc *rs;
1037 1.316.2.5 skrll struct dk_softc *dksc;
1038 1.1 oster RF_Config_t *k_cfg, *u_cfg;
1039 1.42 oster RF_Raid_t *raidPtr;
1040 1.48 oster RF_RaidDisk_t *diskPtr;
1041 1.41 oster RF_AccTotals_t *totals;
1042 1.41 oster RF_DeviceConfig_t *d_cfg, **ucfgp;
1043 1.1 oster u_char *specific_buf;
1044 1.11 oster int retcode = 0;
1045 1.11 oster int column;
1046 1.269 jld /* int raidid; */
1047 1.1 oster struct rf_recon_req *rrcopy, *rr;
1048 1.48 oster RF_ComponentLabel_t *clabel;
1049 1.209 oster RF_ComponentLabel_t *ci_label;
1050 1.48 oster RF_ComponentLabel_t **clabel_ptr;
1051 1.12 oster RF_SingleComponent_t *sparePtr,*componentPtr;
1052 1.12 oster RF_SingleComponent_t component;
1053 1.83 oster RF_ProgressInfo_t progressInfo, **progressInfoPtr;
1054 1.41 oster int i, j, d;
1055 1.1 oster
1056 1.316.2.4 skrll if ((rs = raidget(unit, false)) == NULL)
1057 1.300 christos return ENXIO;
1058 1.316.2.5 skrll dksc = &rs->sc_dksc;
1059 1.300 christos raidPtr = &rs->sc_r;
1060 1.1 oster
1061 1.276 mrg db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
1062 1.276 mrg (int) DISKPART(dev), (int) unit, cmd));
1063 1.1 oster
1064 1.1 oster /* Must be initialized for these... */
1065 1.1 oster switch (cmd) {
1066 1.1 oster case RAIDFRAME_REWRITEPARITY:
1067 1.1 oster case RAIDFRAME_GET_INFO:
1068 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1069 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1070 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1071 1.1 oster case RAIDFRAME_GET_SIZE:
1072 1.1 oster case RAIDFRAME_FAIL_DISK:
1073 1.1 oster case RAIDFRAME_COPYBACK:
1074 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1075 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1076 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1077 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1078 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1079 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1080 1.11 oster case RAIDFRAME_INIT_LABELS:
1081 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1082 1.23 oster case RAIDFRAME_CHECK_PARITY:
1083 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1084 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1085 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1086 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1087 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1088 1.48 oster case RAIDFRAME_SET_ROOT:
1089 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1090 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1091 1.269 jld case RAIDFRAME_PARITYMAP_STATUS:
1092 1.269 jld case RAIDFRAME_PARITYMAP_GET_DISABLE:
1093 1.269 jld case RAIDFRAME_PARITYMAP_SET_DISABLE:
1094 1.269 jld case RAIDFRAME_PARITYMAP_SET_PARAMS:
1095 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
1096 1.1 oster return (ENXIO);
1097 1.1 oster }
1098 1.9 oster
1099 1.1 oster switch (cmd) {
1100 1.254 christos #ifdef COMPAT_50
1101 1.254 christos case RAIDFRAME_GET_INFO50:
1102 1.254 christos return rf_get_info50(raidPtr, data);
1103 1.254 christos
1104 1.254 christos case RAIDFRAME_CONFIGURE50:
1105 1.254 christos if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1106 1.254 christos return retcode;
1107 1.254 christos goto config;
1108 1.254 christos #endif
1109 1.1 oster /* configure the system */
1110 1.1 oster case RAIDFRAME_CONFIGURE:
1111 1.48 oster
1112 1.48 oster if (raidPtr->valid) {
1113 1.48 oster /* There is a valid RAID set running on this unit! */
1114 1.48 oster printf("raid%d: Device already configured!\n",unit);
1115 1.66 oster return(EINVAL);
1116 1.48 oster }
1117 1.48 oster
1118 1.1 oster /* copy-in the configuration information */
1119 1.1 oster /* data points to a pointer to the configuration structure */
1120 1.43 oster
1121 1.9 oster u_cfg = *((RF_Config_t **) data);
1122 1.9 oster RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1123 1.1 oster if (k_cfg == NULL) {
1124 1.9 oster return (ENOMEM);
1125 1.1 oster }
1126 1.156 dsl retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1127 1.1 oster if (retcode) {
1128 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1129 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1130 1.9 oster retcode));
1131 1.316.2.4 skrll goto no_config;
1132 1.1 oster }
1133 1.254 christos goto config;
1134 1.254 christos config:
1135 1.316.2.4 skrll rs->sc_flags &= ~RAIDF_SHUTDOWN;
1136 1.316.2.4 skrll
1137 1.9 oster /* allocate a buffer for the layout-specific data, and copy it
1138 1.9 oster * in */
1139 1.1 oster if (k_cfg->layoutSpecificSize) {
1140 1.9 oster if (k_cfg->layoutSpecificSize > 10000) {
1141 1.1 oster /* sanity check */
1142 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1143 1.316.2.4 skrll retcode = EINVAL;
1144 1.316.2.4 skrll goto no_config;
1145 1.1 oster }
1146 1.9 oster RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1147 1.9 oster (u_char *));
1148 1.1 oster if (specific_buf == NULL) {
1149 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1150 1.316.2.4 skrll retcode = ENOMEM;
1151 1.316.2.4 skrll goto no_config;
1152 1.1 oster }
1153 1.156 dsl retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1154 1.9 oster k_cfg->layoutSpecificSize);
1155 1.1 oster if (retcode) {
1156 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1157 1.186 perry RF_Free(specific_buf,
1158 1.42 oster k_cfg->layoutSpecificSize);
1159 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1160 1.9 oster retcode));
1161 1.316.2.4 skrll goto no_config;
1162 1.1 oster }
1163 1.9 oster } else
1164 1.9 oster specific_buf = NULL;
1165 1.1 oster k_cfg->layoutSpecific = specific_buf;
1166 1.9 oster
1167 1.9 oster /* should do some kind of sanity check on the configuration.
1168 1.9 oster * Store the sum of all the bytes in the last byte? */
1169 1.1 oster
1170 1.1 oster /* configure the system */
1171 1.1 oster
1172 1.48 oster /*
1173 1.48 oster * Clear the entire RAID descriptor, just to make sure
1174 1.186 perry * there is no stale data left in the case of a
1175 1.186 perry * reconfiguration
1176 1.48 oster */
1177 1.277 christos memset(raidPtr, 0, sizeof(*raidPtr));
1178 1.302 christos raidPtr->softc = rs;
1179 1.42 oster raidPtr->raidid = unit;
1180 1.20 oster
1181 1.48 oster retcode = rf_Configure(raidPtr, k_cfg, NULL);
1182 1.1 oster
1183 1.40 oster if (retcode == 0) {
1184 1.37 oster
1185 1.186 perry /* allow this many simultaneous IO's to
1186 1.40 oster this RAID device */
1187 1.42 oster raidPtr->openings = RAIDOUTSTANDING;
1188 1.186 perry
1189 1.300 christos raidinit(rs);
1190 1.316.2.5 skrll raid_wakeup(raidPtr);
1191 1.59 oster rf_markalldirty(raidPtr);
1192 1.9 oster }
1193 1.1 oster /* free the buffers. No return code here. */
1194 1.1 oster if (k_cfg->layoutSpecificSize) {
1195 1.9 oster RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1196 1.1 oster }
1197 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1198 1.9 oster
1199 1.316.2.4 skrll no_config:
1200 1.316.2.4 skrll /*
1201 1.316.2.4 skrll * If configuration failed, set sc_flags so that we
1202 1.316.2.4 skrll * will detach the device when we close it.
1203 1.316.2.4 skrll */
1204 1.316.2.4 skrll if (retcode != 0)
1205 1.316.2.4 skrll rs->sc_flags |= RAIDF_SHUTDOWN;
1206 1.9 oster return (retcode);
1207 1.9 oster
1208 1.9 oster /* shutdown the system */
1209 1.1 oster case RAIDFRAME_SHUTDOWN:
1210 1.9 oster
1211 1.266 dyoung part = DISKPART(dev);
1212 1.266 dyoung pmask = (1 << part);
1213 1.266 dyoung
1214 1.9 oster if ((error = raidlock(rs)) != 0)
1215 1.9 oster return (error);
1216 1.1 oster
1217 1.316.2.5 skrll if (DK_BUSY(dksc, pmask) ||
1218 1.316.2.5 skrll raidPtr->recon_in_progress != 0 ||
1219 1.316.2.5 skrll raidPtr->parity_rewrite_in_progress != 0 ||
1220 1.316.2.5 skrll raidPtr->copyback_in_progress != 0)
1221 1.266 dyoung retcode = EBUSY;
1222 1.266 dyoung else {
1223 1.316.2.5 skrll /* detach and free on close */
1224 1.266 dyoung rs->sc_flags |= RAIDF_SHUTDOWN;
1225 1.266 dyoung retcode = 0;
1226 1.9 oster }
1227 1.11 oster
1228 1.266 dyoung raidunlock(rs);
1229 1.1 oster
1230 1.9 oster return (retcode);
1231 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1232 1.48 oster clabel_ptr = (RF_ComponentLabel_t **) data;
1233 1.11 oster /* need to read the component label for the disk indicated
1234 1.48 oster by row,column in clabel */
1235 1.11 oster
1236 1.269 jld /*
1237 1.269 jld * Perhaps there should be an option to skip the in-core
1238 1.269 jld * copy and hit the disk, as with disklabel(8).
1239 1.269 jld */
1240 1.269 jld RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
1241 1.11 oster
1242 1.277 christos retcode = copyin(*clabel_ptr, clabel, sizeof(*clabel));
1243 1.11 oster
1244 1.11 oster if (retcode) {
1245 1.277 christos RF_Free(clabel, sizeof(*clabel));
1246 1.277 christos return retcode;
1247 1.11 oster }
1248 1.11 oster
1249 1.166 oster clabel->row = 0; /* Don't allow looking at anything else.*/
1250 1.166 oster
1251 1.48 oster column = clabel->column;
1252 1.26 oster
1253 1.166 oster if ((column < 0) || (column >= raidPtr->numCol +
1254 1.277 christos raidPtr->numSpare)) {
1255 1.277 christos RF_Free(clabel, sizeof(*clabel));
1256 1.277 christos return EINVAL;
1257 1.11 oster }
1258 1.11 oster
1259 1.269 jld RF_Free(clabel, sizeof(*clabel));
1260 1.269 jld
1261 1.269 jld clabel = raidget_component_label(raidPtr, column);
1262 1.11 oster
1263 1.277 christos return copyout(clabel, *clabel_ptr, sizeof(**clabel_ptr));
1264 1.11 oster
1265 1.269 jld #if 0
1266 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1267 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1268 1.11 oster
1269 1.11 oster /* XXX check the label for valid stuff... */
1270 1.11 oster /* Note that some things *should not* get modified --
1271 1.186 perry the user should be re-initing the labels instead of
1272 1.11 oster trying to patch things.
1273 1.11 oster */
1274 1.11 oster
1275 1.123 oster raidid = raidPtr->raidid;
1276 1.224 oster #ifdef DEBUG
1277 1.123 oster printf("raid%d: Got component label:\n", raidid);
1278 1.123 oster printf("raid%d: Version: %d\n", raidid, clabel->version);
1279 1.123 oster printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1280 1.123 oster printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1281 1.123 oster printf("raid%d: Column: %d\n", raidid, clabel->column);
1282 1.123 oster printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1283 1.123 oster printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1284 1.123 oster printf("raid%d: Status: %d\n", raidid, clabel->status);
1285 1.174 oster #endif
1286 1.166 oster clabel->row = 0;
1287 1.48 oster column = clabel->column;
1288 1.12 oster
1289 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1290 1.12 oster return(EINVAL);
1291 1.11 oster }
1292 1.12 oster
1293 1.12 oster /* XXX this isn't allowed to do anything for now :-) */
1294 1.48 oster
1295 1.48 oster /* XXX and before it is, we need to fill in the rest
1296 1.48 oster of the fields!?!?!?! */
1297 1.269 jld memcpy(raidget_component_label(raidPtr, column),
1298 1.269 jld clabel, sizeof(*clabel));
1299 1.269 jld raidflush_component_label(raidPtr, column);
1300 1.269 jld return (0);
1301 1.12 oster #endif
1302 1.11 oster
1303 1.186 perry case RAIDFRAME_INIT_LABELS:
1304 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1305 1.186 perry /*
1306 1.11 oster we only want the serial number from
1307 1.11 oster the above. We get all the rest of the information
1308 1.11 oster from the config that was used to create this RAID
1309 1.186 perry set.
1310 1.11 oster */
1311 1.12 oster
1312 1.48 oster raidPtr->serial_number = clabel->serial_number;
1313 1.186 perry
1314 1.166 oster for(column=0;column<raidPtr->numCol;column++) {
1315 1.166 oster diskPtr = &raidPtr->Disks[column];
1316 1.166 oster if (!RF_DEAD_DISK(diskPtr->status)) {
1317 1.269 jld ci_label = raidget_component_label(raidPtr,
1318 1.269 jld column);
1319 1.269 jld /* Zeroing this is important. */
1320 1.269 jld memset(ci_label, 0, sizeof(*ci_label));
1321 1.269 jld raid_init_component_label(raidPtr, ci_label);
1322 1.316.2.2 skrll ci_label->serial_number =
1323 1.269 jld raidPtr->serial_number;
1324 1.269 jld ci_label->row = 0; /* we dont' pretend to support more */
1325 1.282 enami rf_component_label_set_partitionsize(ci_label,
1326 1.282 enami diskPtr->partitionSize);
1327 1.209 oster ci_label->column = column;
1328 1.269 jld raidflush_component_label(raidPtr, column);
1329 1.11 oster }
1330 1.269 jld /* XXXjld what about the spares? */
1331 1.11 oster }
1332 1.316.2.2 skrll
1333 1.11 oster return (retcode);
1334 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1335 1.78 minoura d = rf_set_autoconfig(raidPtr, *(int *) data);
1336 1.186 perry printf("raid%d: New autoconfig value is: %d\n",
1337 1.123 oster raidPtr->raidid, d);
1338 1.78 minoura *(int *) data = d;
1339 1.48 oster return (retcode);
1340 1.48 oster
1341 1.48 oster case RAIDFRAME_SET_ROOT:
1342 1.78 minoura d = rf_set_rootpartition(raidPtr, *(int *) data);
1343 1.186 perry printf("raid%d: New rootpartition value is: %d\n",
1344 1.123 oster raidPtr->raidid, d);
1345 1.78 minoura *(int *) data = d;
1346 1.48 oster return (retcode);
1347 1.9 oster
1348 1.1 oster /* initialize all parity */
1349 1.1 oster case RAIDFRAME_REWRITEPARITY:
1350 1.1 oster
1351 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1352 1.17 oster /* Parity for RAID 0 is trivially correct */
1353 1.42 oster raidPtr->parity_good = RF_RAID_CLEAN;
1354 1.17 oster return(0);
1355 1.17 oster }
1356 1.186 perry
1357 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1358 1.37 oster /* Re-write is already in progress! */
1359 1.37 oster return(EINVAL);
1360 1.37 oster }
1361 1.27 oster
1362 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1363 1.37 oster rf_RewriteParityThread,
1364 1.42 oster raidPtr,"raid_parity");
1365 1.9 oster return (retcode);
1366 1.9 oster
1367 1.11 oster
1368 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1369 1.12 oster sparePtr = (RF_SingleComponent_t *) data;
1370 1.209 oster memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1371 1.209 oster retcode = rf_add_hot_spare(raidPtr, &component);
1372 1.11 oster return(retcode);
1373 1.11 oster
1374 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1375 1.73 oster return(retcode);
1376 1.73 oster
1377 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1378 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1379 1.186 perry memcpy( &component, componentPtr,
1380 1.73 oster sizeof(RF_SingleComponent_t));
1381 1.73 oster retcode = rf_delete_component(raidPtr, &component);
1382 1.73 oster return(retcode);
1383 1.73 oster
1384 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1385 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1386 1.186 perry memcpy( &component, componentPtr,
1387 1.73 oster sizeof(RF_SingleComponent_t));
1388 1.73 oster retcode = rf_incorporate_hot_spare(raidPtr, &component);
1389 1.11 oster return(retcode);
1390 1.11 oster
1391 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1392 1.24 oster
1393 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1394 1.24 oster /* Can't do this on a RAID 0!! */
1395 1.24 oster return(EINVAL);
1396 1.24 oster }
1397 1.24 oster
1398 1.42 oster if (raidPtr->recon_in_progress == 1) {
1399 1.37 oster /* a reconstruct is already in progress! */
1400 1.37 oster return(EINVAL);
1401 1.37 oster }
1402 1.37 oster
1403 1.12 oster componentPtr = (RF_SingleComponent_t *) data;
1404 1.186 perry memcpy( &component, componentPtr,
1405 1.12 oster sizeof(RF_SingleComponent_t));
1406 1.166 oster component.row = 0; /* we don't support any more */
1407 1.12 oster column = component.column;
1408 1.147 oster
1409 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1410 1.12 oster return(EINVAL);
1411 1.12 oster }
1412 1.37 oster
1413 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1414 1.166 oster if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1415 1.186 perry (raidPtr->numFailures > 0)) {
1416 1.149 oster /* XXX 0 above shouldn't be constant!!! */
1417 1.149 oster /* some component other than this has failed.
1418 1.149 oster Let's not make things worse than they already
1419 1.149 oster are... */
1420 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1421 1.149 oster raidPtr->raidid);
1422 1.166 oster printf("raid%d: Col: %d Too many failures.\n",
1423 1.166 oster raidPtr->raidid, column);
1424 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1425 1.149 oster return (EINVAL);
1426 1.149 oster }
1427 1.186 perry if (raidPtr->Disks[column].status ==
1428 1.149 oster rf_ds_reconstructing) {
1429 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1430 1.149 oster raidPtr->raidid);
1431 1.299 oster printf("raid%d: Col: %d Reconstruction already occurring!\n", raidPtr->raidid, column);
1432 1.186 perry
1433 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1434 1.149 oster return (EINVAL);
1435 1.149 oster }
1436 1.166 oster if (raidPtr->Disks[column].status == rf_ds_spared) {
1437 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1438 1.149 oster return (EINVAL);
1439 1.149 oster }
1440 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1441 1.149 oster
1442 1.37 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1443 1.38 oster if (rrcopy == NULL)
1444 1.38 oster return(ENOMEM);
1445 1.37 oster
1446 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1447 1.37 oster rrcopy->col = column;
1448 1.37 oster
1449 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1450 1.37 oster rf_ReconstructInPlaceThread,
1451 1.37 oster rrcopy,"raid_reconip");
1452 1.12 oster return(retcode);
1453 1.12 oster
1454 1.1 oster case RAIDFRAME_GET_INFO:
1455 1.42 oster if (!raidPtr->valid)
1456 1.41 oster return (ENODEV);
1457 1.41 oster ucfgp = (RF_DeviceConfig_t **) data;
1458 1.41 oster RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1459 1.41 oster (RF_DeviceConfig_t *));
1460 1.41 oster if (d_cfg == NULL)
1461 1.41 oster return (ENOMEM);
1462 1.166 oster d_cfg->rows = 1; /* there is only 1 row now */
1463 1.42 oster d_cfg->cols = raidPtr->numCol;
1464 1.166 oster d_cfg->ndevs = raidPtr->numCol;
1465 1.41 oster if (d_cfg->ndevs >= RF_MAX_DISKS) {
1466 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1467 1.41 oster return (ENOMEM);
1468 1.41 oster }
1469 1.42 oster d_cfg->nspares = raidPtr->numSpare;
1470 1.41 oster if (d_cfg->nspares >= RF_MAX_DISKS) {
1471 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1472 1.41 oster return (ENOMEM);
1473 1.41 oster }
1474 1.42 oster d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1475 1.41 oster d = 0;
1476 1.166 oster for (j = 0; j < d_cfg->cols; j++) {
1477 1.166 oster d_cfg->devs[d] = raidPtr->Disks[j];
1478 1.166 oster d++;
1479 1.41 oster }
1480 1.41 oster for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1481 1.166 oster d_cfg->spares[i] = raidPtr->Disks[j];
1482 1.316 oster if (d_cfg->spares[i].status == rf_ds_rebuilding_spare) {
1483 1.316 oster /* XXX: raidctl(8) expects to see this as a used spare */
1484 1.316 oster d_cfg->spares[i].status = rf_ds_used_spare;
1485 1.316 oster }
1486 1.41 oster }
1487 1.156 dsl retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1488 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1489 1.41 oster
1490 1.41 oster return (retcode);
1491 1.9 oster
1492 1.22 oster case RAIDFRAME_CHECK_PARITY:
1493 1.42 oster *(int *) data = raidPtr->parity_good;
1494 1.22 oster return (0);
1495 1.41 oster
1496 1.269 jld case RAIDFRAME_PARITYMAP_STATUS:
1497 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1498 1.273 jld return EINVAL;
1499 1.269 jld rf_paritymap_status(raidPtr->parity_map,
1500 1.269 jld (struct rf_pmstat *)data);
1501 1.269 jld return 0;
1502 1.269 jld
1503 1.269 jld case RAIDFRAME_PARITYMAP_SET_PARAMS:
1504 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1505 1.273 jld return EINVAL;
1506 1.269 jld if (raidPtr->parity_map == NULL)
1507 1.269 jld return ENOENT; /* ??? */
1508 1.316.2.2 skrll if (0 != rf_paritymap_set_params(raidPtr->parity_map,
1509 1.269 jld (struct rf_pmparams *)data, 1))
1510 1.269 jld return EINVAL;
1511 1.269 jld return 0;
1512 1.269 jld
1513 1.269 jld case RAIDFRAME_PARITYMAP_GET_DISABLE:
1514 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1515 1.273 jld return EINVAL;
1516 1.269 jld *(int *) data = rf_paritymap_get_disable(raidPtr);
1517 1.269 jld return 0;
1518 1.269 jld
1519 1.269 jld case RAIDFRAME_PARITYMAP_SET_DISABLE:
1520 1.273 jld if (rf_paritymap_ineligible(raidPtr))
1521 1.273 jld return EINVAL;
1522 1.269 jld rf_paritymap_set_disable(raidPtr, *(int *)data);
1523 1.269 jld /* XXX should errors be passed up? */
1524 1.269 jld return 0;
1525 1.269 jld
1526 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1527 1.108 thorpej memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1528 1.41 oster return (0);
1529 1.9 oster
1530 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1531 1.41 oster totals = (RF_AccTotals_t *) data;
1532 1.42 oster *totals = raidPtr->acc_totals;
1533 1.41 oster return (0);
1534 1.9 oster
1535 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1536 1.42 oster raidPtr->keep_acc_totals = *(int *)data;
1537 1.41 oster return (0);
1538 1.9 oster
1539 1.1 oster case RAIDFRAME_GET_SIZE:
1540 1.42 oster *(int *) data = raidPtr->totalSectors;
1541 1.9 oster return (0);
1542 1.1 oster
1543 1.1 oster /* fail a disk & optionally start reconstruction */
1544 1.1 oster case RAIDFRAME_FAIL_DISK:
1545 1.24 oster
1546 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1547 1.24 oster /* Can't do this on a RAID 0!! */
1548 1.24 oster return(EINVAL);
1549 1.24 oster }
1550 1.24 oster
1551 1.1 oster rr = (struct rf_recon_req *) data;
1552 1.166 oster rr->row = 0;
1553 1.166 oster if (rr->col < 0 || rr->col >= raidPtr->numCol)
1554 1.9 oster return (EINVAL);
1555 1.149 oster
1556 1.149 oster
1557 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1558 1.185 oster if (raidPtr->status == rf_rs_reconstructing) {
1559 1.185 oster /* you can't fail a disk while we're reconstructing! */
1560 1.185 oster /* XXX wrong for RAID6 */
1561 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1562 1.185 oster return (EINVAL);
1563 1.185 oster }
1564 1.186 perry if ((raidPtr->Disks[rr->col].status ==
1565 1.186 perry rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1566 1.149 oster /* some other component has failed. Let's not make
1567 1.149 oster things worse. XXX wrong for RAID6 */
1568 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1569 1.149 oster return (EINVAL);
1570 1.149 oster }
1571 1.166 oster if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1572 1.149 oster /* Can't fail a spared disk! */
1573 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1574 1.149 oster return (EINVAL);
1575 1.149 oster }
1576 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1577 1.1 oster
1578 1.9 oster /* make a copy of the recon request so that we don't rely on
1579 1.9 oster * the user's buffer */
1580 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1581 1.38 oster if (rrcopy == NULL)
1582 1.38 oster return(ENOMEM);
1583 1.118 wiz memcpy(rrcopy, rr, sizeof(*rr));
1584 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1585 1.1 oster
1586 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1587 1.37 oster rf_ReconThread,
1588 1.37 oster rrcopy,"raid_recon");
1589 1.9 oster return (0);
1590 1.9 oster
1591 1.9 oster /* invoke a copyback operation after recon on whatever disk
1592 1.9 oster * needs it, if any */
1593 1.9 oster case RAIDFRAME_COPYBACK:
1594 1.24 oster
1595 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1596 1.24 oster /* This makes no sense on a RAID 0!! */
1597 1.24 oster return(EINVAL);
1598 1.24 oster }
1599 1.24 oster
1600 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1601 1.37 oster /* Copyback is already in progress! */
1602 1.37 oster return(EINVAL);
1603 1.37 oster }
1604 1.27 oster
1605 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1606 1.37 oster rf_CopybackThread,
1607 1.42 oster raidPtr,"raid_copyback");
1608 1.37 oster return (retcode);
1609 1.9 oster
1610 1.1 oster /* return the percentage completion of reconstruction */
1611 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1612 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1613 1.71 oster /* This makes no sense on a RAID 0, so tell the
1614 1.71 oster user it's done. */
1615 1.71 oster *(int *) data = 100;
1616 1.71 oster return(0);
1617 1.24 oster }
1618 1.166 oster if (raidPtr->status != rf_rs_reconstructing)
1619 1.1 oster *(int *) data = 100;
1620 1.171 oster else {
1621 1.171 oster if (raidPtr->reconControl->numRUsTotal > 0) {
1622 1.171 oster *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1623 1.171 oster } else {
1624 1.171 oster *(int *) data = 0;
1625 1.171 oster }
1626 1.171 oster }
1627 1.9 oster return (0);
1628 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1629 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1630 1.166 oster if (raidPtr->status != rf_rs_reconstructing) {
1631 1.83 oster progressInfo.remaining = 0;
1632 1.83 oster progressInfo.completed = 100;
1633 1.83 oster progressInfo.total = 100;
1634 1.83 oster } else {
1635 1.186 perry progressInfo.total =
1636 1.166 oster raidPtr->reconControl->numRUsTotal;
1637 1.186 perry progressInfo.completed =
1638 1.166 oster raidPtr->reconControl->numRUsComplete;
1639 1.83 oster progressInfo.remaining = progressInfo.total -
1640 1.83 oster progressInfo.completed;
1641 1.83 oster }
1642 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1643 1.83 oster sizeof(RF_ProgressInfo_t));
1644 1.83 oster return (retcode);
1645 1.9 oster
1646 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1647 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1648 1.80 oster /* This makes no sense on a RAID 0, so tell the
1649 1.80 oster user it's done. */
1650 1.80 oster *(int *) data = 100;
1651 1.80 oster return(0);
1652 1.37 oster }
1653 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1654 1.186 perry *(int *) data = 100 *
1655 1.186 perry raidPtr->parity_rewrite_stripes_done /
1656 1.83 oster raidPtr->Layout.numStripe;
1657 1.37 oster } else {
1658 1.37 oster *(int *) data = 100;
1659 1.37 oster }
1660 1.37 oster return (0);
1661 1.37 oster
1662 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1663 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1664 1.83 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1665 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1666 1.186 perry progressInfo.completed =
1667 1.83 oster raidPtr->parity_rewrite_stripes_done;
1668 1.83 oster progressInfo.remaining = progressInfo.total -
1669 1.83 oster progressInfo.completed;
1670 1.83 oster } else {
1671 1.83 oster progressInfo.remaining = 0;
1672 1.83 oster progressInfo.completed = 100;
1673 1.83 oster progressInfo.total = 100;
1674 1.83 oster }
1675 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1676 1.83 oster sizeof(RF_ProgressInfo_t));
1677 1.83 oster return (retcode);
1678 1.83 oster
1679 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1680 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1681 1.37 oster /* This makes no sense on a RAID 0 */
1682 1.83 oster *(int *) data = 100;
1683 1.83 oster return(0);
1684 1.37 oster }
1685 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1686 1.42 oster *(int *) data = 100 * raidPtr->copyback_stripes_done /
1687 1.42 oster raidPtr->Layout.numStripe;
1688 1.37 oster } else {
1689 1.37 oster *(int *) data = 100;
1690 1.37 oster }
1691 1.37 oster return (0);
1692 1.37 oster
1693 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1694 1.93 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1695 1.83 oster if (raidPtr->copyback_in_progress == 1) {
1696 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1697 1.186 perry progressInfo.completed =
1698 1.93 oster raidPtr->copyback_stripes_done;
1699 1.83 oster progressInfo.remaining = progressInfo.total -
1700 1.83 oster progressInfo.completed;
1701 1.83 oster } else {
1702 1.83 oster progressInfo.remaining = 0;
1703 1.83 oster progressInfo.completed = 100;
1704 1.83 oster progressInfo.total = 100;
1705 1.83 oster }
1706 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1707 1.83 oster sizeof(RF_ProgressInfo_t));
1708 1.83 oster return (retcode);
1709 1.37 oster
1710 1.316.2.5 skrll case RAIDFRAME_SET_LAST_UNIT:
1711 1.316.2.5 skrll for (column = 0; column < raidPtr->numCol; column++)
1712 1.316.2.5 skrll if (raidPtr->Disks[column].status != rf_ds_optimal)
1713 1.316.2.5 skrll return EBUSY;
1714 1.316.2.5 skrll
1715 1.316.2.5 skrll for (column = 0; column < raidPtr->numCol; column++) {
1716 1.316.2.5 skrll clabel = raidget_component_label(raidPtr, column);
1717 1.316.2.5 skrll clabel->last_unit = *(int *)data;
1718 1.316.2.5 skrll raidflush_component_label(raidPtr, column);
1719 1.316.2.5 skrll }
1720 1.316.2.5 skrll rs->sc_cflags |= RAIDF_UNIT_CHANGED;
1721 1.316.2.5 skrll return 0;
1722 1.316.2.5 skrll
1723 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1724 1.9 oster * need a spare table. this ioctl does not return until a
1725 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1726 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1727 1.9 oster * -- I should either compute the spare table in the kernel,
1728 1.9 oster * or have a different -- XXX XXX -- interface (a different
1729 1.42 oster * character device) for delivering the table -- XXX */
1730 1.250 oster #if 0
1731 1.1 oster case RAIDFRAME_SPARET_WAIT:
1732 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1733 1.9 oster while (!rf_sparet_wait_queue)
1734 1.287 mrg rf_wait_cond2(rf_sparet_wait_cv, rf_sparet_wait_mutex);
1735 1.1 oster waitreq = rf_sparet_wait_queue;
1736 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1737 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1738 1.9 oster
1739 1.42 oster /* structure assignment */
1740 1.186 perry *((RF_SparetWait_t *) data) = *waitreq;
1741 1.9 oster
1742 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1743 1.9 oster return (0);
1744 1.9 oster
1745 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1746 1.9 oster * code in it that will cause the dameon to exit */
1747 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1748 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1749 1.1 oster waitreq->fcol = -1;
1750 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1751 1.1 oster waitreq->next = rf_sparet_wait_queue;
1752 1.1 oster rf_sparet_wait_queue = waitreq;
1753 1.287 mrg rf_broadcast_conf2(rf_sparet_wait_cv);
1754 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1755 1.9 oster return (0);
1756 1.1 oster
1757 1.9 oster /* used by the spare table daemon to deliver a spare table
1758 1.9 oster * into the kernel */
1759 1.1 oster case RAIDFRAME_SEND_SPARET:
1760 1.9 oster
1761 1.1 oster /* install the spare table */
1762 1.42 oster retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1763 1.9 oster
1764 1.9 oster /* respond to the requestor. the return status of the spare
1765 1.9 oster * table installation is passed in the "fcol" field */
1766 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1767 1.1 oster waitreq->fcol = retcode;
1768 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1769 1.1 oster waitreq->next = rf_sparet_resp_queue;
1770 1.1 oster rf_sparet_resp_queue = waitreq;
1771 1.287 mrg rf_broadcast_cond2(rf_sparet_resp_cv);
1772 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1773 1.9 oster
1774 1.9 oster return (retcode);
1775 1.1 oster #endif
1776 1.1 oster
1777 1.9 oster default:
1778 1.36 oster break; /* fall through to the os-specific code below */
1779 1.1 oster
1780 1.1 oster }
1781 1.9 oster
1782 1.42 oster if (!raidPtr->valid)
1783 1.9 oster return (EINVAL);
1784 1.9 oster
1785 1.1 oster /*
1786 1.1 oster * Add support for "regular" device ioctls here.
1787 1.1 oster */
1788 1.9 oster
1789 1.1 oster switch (cmd) {
1790 1.252 oster case DIOCCACHESYNC:
1791 1.316.2.7 skrll retcode = rf_sync_component_caches(raidPtr);
1792 1.316.2.7 skrll break;
1793 1.298 buhrow
1794 1.1 oster default:
1795 1.316.2.7 skrll retcode = dk_ioctl(dksc, dev, cmd, data, flag, l);
1796 1.316.2.7 skrll break;
1797 1.1 oster }
1798 1.316.2.7 skrll
1799 1.9 oster return (retcode);
1800 1.1 oster
1801 1.1 oster }
1802 1.1 oster
1803 1.1 oster
1804 1.9 oster /* raidinit -- complete the rest of the initialization for the
1805 1.1 oster RAIDframe device. */
1806 1.1 oster
1807 1.1 oster
1808 1.59 oster static void
1809 1.300 christos raidinit(struct raid_softc *rs)
1810 1.1 oster {
1811 1.262 cegger cfdata_t cf;
1812 1.316.2.5 skrll unsigned int unit;
1813 1.316.2.5 skrll struct dk_softc *dksc = &rs->sc_dksc;
1814 1.300 christos RF_Raid_t *raidPtr = &rs->sc_r;
1815 1.316.2.5 skrll device_t dev;
1816 1.1 oster
1817 1.59 oster unit = raidPtr->raidid;
1818 1.1 oster
1819 1.179 itojun /* XXX doesn't check bounds. */
1820 1.316.2.5 skrll snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%u", unit);
1821 1.1 oster
1822 1.217 oster /* attach the pseudo device */
1823 1.217 oster cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1824 1.217 oster cf->cf_name = raid_cd.cd_name;
1825 1.217 oster cf->cf_atname = raid_cd.cd_name;
1826 1.217 oster cf->cf_unit = unit;
1827 1.217 oster cf->cf_fstate = FSTATE_STAR;
1828 1.217 oster
1829 1.316.2.5 skrll dev = config_attach_pseudo(cf);
1830 1.316.2.5 skrll if (dev == NULL) {
1831 1.217 oster printf("raid%d: config_attach_pseudo failed\n",
1832 1.270 christos raidPtr->raidid);
1833 1.265 pooka free(cf, M_RAIDFRAME);
1834 1.265 pooka return;
1835 1.217 oster }
1836 1.217 oster
1837 1.316.2.5 skrll /* provide a backpointer to the real softc */
1838 1.316.2.5 skrll raidsoftc(dev) = rs;
1839 1.316.2.5 skrll
1840 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1841 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1842 1.9 oster * with disklabels. */
1843 1.316.2.5 skrll dk_init(dksc, dev, DKTYPE_RAID);
1844 1.316.2.5 skrll disk_init(&dksc->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1845 1.1 oster
1846 1.1 oster /* XXX There may be a weird interaction here between this, and
1847 1.9 oster * protectedSectors, as used in RAIDframe. */
1848 1.11 oster
1849 1.9 oster rs->sc_size = raidPtr->totalSectors;
1850 1.234 oster
1851 1.316.2.5 skrll /* Attach dk and disk subsystems */
1852 1.316.2.5 skrll dk_attach(dksc);
1853 1.316.2.5 skrll disk_attach(&dksc->sc_dkdev);
1854 1.304 christos rf_set_geometry(rs, raidPtr);
1855 1.234 oster
1856 1.316.2.5 skrll bufq_alloc(&dksc->sc_bufq, "fcfs", BUFQ_SORT_RAWBLOCK);
1857 1.316.2.1 skrll
1858 1.316.2.5 skrll /* mark unit as usuable */
1859 1.316.2.5 skrll rs->sc_flags |= RAIDF_INITED;
1860 1.316.2.5 skrll
1861 1.316.2.5 skrll dkwedge_discover(&dksc->sc_dkdev);
1862 1.1 oster }
1863 1.316.2.5 skrll
1864 1.150 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1865 1.1 oster /* wake up the daemon & tell it to get us a spare table
1866 1.1 oster * XXX
1867 1.9 oster * the entries in the queues should be tagged with the raidPtr
1868 1.186 perry * so that in the extremely rare case that two recons happen at once,
1869 1.11 oster * we know for which device were requesting a spare table
1870 1.1 oster * XXX
1871 1.186 perry *
1872 1.39 oster * XXX This code is not currently used. GO
1873 1.1 oster */
1874 1.186 perry int
1875 1.169 oster rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1876 1.9 oster {
1877 1.9 oster int retcode;
1878 1.9 oster
1879 1.287 mrg rf_lock_mutex2(rf_sparet_wait_mutex);
1880 1.9 oster req->next = rf_sparet_wait_queue;
1881 1.9 oster rf_sparet_wait_queue = req;
1882 1.289 mrg rf_broadcast_cond2(rf_sparet_wait_cv);
1883 1.9 oster
1884 1.9 oster /* mpsleep unlocks the mutex */
1885 1.9 oster while (!rf_sparet_resp_queue) {
1886 1.289 mrg rf_wait_cond2(rf_sparet_resp_cv, rf_sparet_wait_mutex);
1887 1.9 oster }
1888 1.9 oster req = rf_sparet_resp_queue;
1889 1.9 oster rf_sparet_resp_queue = req->next;
1890 1.287 mrg rf_unlock_mutex2(rf_sparet_wait_mutex);
1891 1.9 oster
1892 1.9 oster retcode = req->fcol;
1893 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
1894 1.9 oster * alloc'd */
1895 1.9 oster return (retcode);
1896 1.1 oster }
1897 1.150 oster #endif
1898 1.39 oster
1899 1.186 perry /* a wrapper around rf_DoAccess that extracts appropriate info from the
1900 1.11 oster * bp & passes it down.
1901 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1902 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1903 1.1 oster * certain conditions (to make some standard utilities work)
1904 1.186 perry *
1905 1.34 oster * Formerly known as: rf_DoAccessKernel
1906 1.1 oster */
1907 1.34 oster void
1908 1.169 oster raidstart(RF_Raid_t *raidPtr)
1909 1.1 oster {
1910 1.1 oster struct raid_softc *rs;
1911 1.316.2.5 skrll struct dk_softc *dksc;
1912 1.1 oster
1913 1.300 christos rs = raidPtr->softc;
1914 1.316.2.5 skrll dksc = &rs->sc_dksc;
1915 1.56 oster /* quick check to see if anything has died recently */
1916 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1917 1.56 oster if (raidPtr->numNewFailures > 0) {
1918 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1919 1.186 perry rf_update_component_labels(raidPtr,
1920 1.91 oster RF_NORMAL_COMPONENT_UPDATE);
1921 1.291 mrg rf_lock_mutex2(raidPtr->mutex);
1922 1.56 oster raidPtr->numNewFailures--;
1923 1.56 oster }
1924 1.316.2.5 skrll rf_unlock_mutex2(raidPtr->mutex);
1925 1.56 oster
1926 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_INITED) == 0) {
1927 1.316.2.5 skrll printf("raid%d: raidstart not ready\n", raidPtr->raidid);
1928 1.316.2.5 skrll return;
1929 1.316.2.5 skrll }
1930 1.186 perry
1931 1.316.2.5 skrll dk_start(dksc, NULL);
1932 1.316.2.5 skrll }
1933 1.186 perry
1934 1.316.2.5 skrll static int
1935 1.316.2.5 skrll raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp)
1936 1.316.2.5 skrll {
1937 1.316.2.5 skrll RF_SectorCount_t num_blocks, pb, sum;
1938 1.316.2.5 skrll RF_RaidAddr_t raid_addr;
1939 1.316.2.5 skrll daddr_t blocknum;
1940 1.316.2.5 skrll int do_async;
1941 1.316.2.5 skrll int rc;
1942 1.1 oster
1943 1.316.2.5 skrll rf_lock_mutex2(raidPtr->mutex);
1944 1.316.2.5 skrll if (raidPtr->openings == 0) {
1945 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1946 1.316.2.5 skrll return EAGAIN;
1947 1.316.2.5 skrll }
1948 1.316.2.5 skrll rf_unlock_mutex2(raidPtr->mutex);
1949 1.1 oster
1950 1.316.2.5 skrll blocknum = bp->b_rawblkno;
1951 1.186 perry
1952 1.316.2.5 skrll db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1953 1.316.2.5 skrll (int) blocknum));
1954 1.99 oster
1955 1.316.2.5 skrll db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1956 1.316.2.5 skrll db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1957 1.20 oster
1958 1.316.2.5 skrll /* *THIS* is where we adjust what block we're going to...
1959 1.316.2.5 skrll * but DO NOT TOUCH bp->b_blkno!!! */
1960 1.316.2.5 skrll raid_addr = blocknum;
1961 1.316.2.5 skrll
1962 1.316.2.5 skrll num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1963 1.316.2.5 skrll pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1964 1.316.2.5 skrll sum = raid_addr + num_blocks + pb;
1965 1.316.2.5 skrll if (1 || rf_debugKernelAccess) {
1966 1.316.2.5 skrll db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1967 1.316.2.5 skrll (int) raid_addr, (int) sum, (int) num_blocks,
1968 1.316.2.5 skrll (int) pb, (int) bp->b_resid));
1969 1.316.2.5 skrll }
1970 1.316.2.5 skrll if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1971 1.316.2.5 skrll || (sum < num_blocks) || (sum < pb)) {
1972 1.316.2.5 skrll rc = ENOSPC;
1973 1.316.2.5 skrll goto done;
1974 1.316.2.5 skrll }
1975 1.316.2.5 skrll /*
1976 1.316.2.5 skrll * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1977 1.316.2.5 skrll */
1978 1.20 oster
1979 1.316.2.5 skrll if (bp->b_bcount & raidPtr->sectorMask) {
1980 1.316.2.5 skrll rc = ENOSPC;
1981 1.316.2.5 skrll goto done;
1982 1.20 oster }
1983 1.316.2.5 skrll db1_printf(("Calling DoAccess..\n"));
1984 1.316.2.5 skrll
1985 1.316.2.5 skrll
1986 1.316.2.5 skrll rf_lock_mutex2(raidPtr->mutex);
1987 1.316.2.5 skrll raidPtr->openings--;
1988 1.291 mrg rf_unlock_mutex2(raidPtr->mutex);
1989 1.20 oster
1990 1.316.2.5 skrll /*
1991 1.316.2.5 skrll * Everything is async.
1992 1.316.2.5 skrll */
1993 1.316.2.5 skrll do_async = 1;
1994 1.316.2.5 skrll
1995 1.316.2.5 skrll /* don't ever condition on bp->b_flags & B_WRITE.
1996 1.316.2.5 skrll * always condition on B_READ instead */
1997 1.20 oster
1998 1.316.2.5 skrll rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1999 1.316.2.5 skrll RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2000 1.316.2.5 skrll do_async, raid_addr, num_blocks,
2001 1.316.2.5 skrll bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2002 1.7 explorer
2003 1.316.2.5 skrll done:
2004 1.316.2.5 skrll return rc;
2005 1.316.2.5 skrll }
2006 1.7 explorer
2007 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2008 1.1 oster
2009 1.186 perry int
2010 1.169 oster rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2011 1.1 oster {
2012 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2013 1.1 oster struct buf *bp;
2014 1.9 oster
2015 1.1 oster req->queue = queue;
2016 1.1 oster bp = req->bp;
2017 1.1 oster
2018 1.1 oster switch (req->type) {
2019 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2020 1.1 oster /* XXX need to do something extra here.. */
2021 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
2022 1.9 oster * and I'd like folks to report it... GO */
2023 1.1 oster printf(("WAKEUP CALLED\n"));
2024 1.1 oster queue->numOutstanding++;
2025 1.1 oster
2026 1.197 oster bp->b_flags = 0;
2027 1.207 simonb bp->b_private = req;
2028 1.1 oster
2029 1.194 oster KernelWakeupFunc(bp);
2030 1.1 oster break;
2031 1.9 oster
2032 1.1 oster case RF_IO_TYPE_READ:
2033 1.1 oster case RF_IO_TYPE_WRITE:
2034 1.175 oster #if RF_ACC_TRACE > 0
2035 1.1 oster if (req->tracerec) {
2036 1.1 oster RF_ETIMER_START(req->tracerec->timer);
2037 1.1 oster }
2038 1.175 oster #endif
2039 1.194 oster InitBP(bp, queue->rf_cinfo->ci_vp,
2040 1.197 oster op, queue->rf_cinfo->ci_dev,
2041 1.9 oster req->sectorOffset, req->numSector,
2042 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
2043 1.9 oster queue->raidPtr->logBytesPerSector, req->b_proc);
2044 1.1 oster
2045 1.1 oster if (rf_debugKernelAccess) {
2046 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
2047 1.9 oster (long) bp->b_blkno));
2048 1.1 oster }
2049 1.1 oster queue->numOutstanding++;
2050 1.1 oster queue->last_deq_sector = req->sectorOffset;
2051 1.9 oster /* acc wouldn't have been let in if there were any pending
2052 1.9 oster * reqs at any other priority */
2053 1.1 oster queue->curPriority = req->priority;
2054 1.1 oster
2055 1.166 oster db1_printf(("Going for %c to unit %d col %d\n",
2056 1.186 perry req->type, queue->raidPtr->raidid,
2057 1.166 oster queue->col));
2058 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
2059 1.9 oster (int) req->sectorOffset, (int) req->numSector,
2060 1.9 oster (int) (req->numSector <<
2061 1.9 oster queue->raidPtr->logBytesPerSector),
2062 1.9 oster (int) queue->raidPtr->logBytesPerSector));
2063 1.256 oster
2064 1.256 oster /*
2065 1.316.2.2 skrll * XXX: drop lock here since this can block at
2066 1.256 oster * least with backing SCSI devices. Retake it
2067 1.256 oster * to minimize fuss with calling interfaces.
2068 1.256 oster */
2069 1.256 oster
2070 1.256 oster RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2071 1.247 oster bdev_strategy(bp);
2072 1.256 oster RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2073 1.1 oster break;
2074 1.9 oster
2075 1.1 oster default:
2076 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
2077 1.1 oster }
2078 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
2079 1.134 oster
2080 1.9 oster return (0);
2081 1.1 oster }
2082 1.9 oster /* this is the callback function associated with a I/O invoked from
2083 1.1 oster kernel code.
2084 1.1 oster */
2085 1.186 perry static void
2086 1.194 oster KernelWakeupFunc(struct buf *bp)
2087 1.9 oster {
2088 1.9 oster RF_DiskQueueData_t *req = NULL;
2089 1.9 oster RF_DiskQueue_t *queue;
2090 1.9 oster
2091 1.9 oster db1_printf(("recovering the request queue:\n"));
2092 1.285 mrg
2093 1.207 simonb req = bp->b_private;
2094 1.1 oster
2095 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
2096 1.1 oster
2097 1.286 mrg rf_lock_mutex2(queue->raidPtr->iodone_lock);
2098 1.285 mrg
2099 1.175 oster #if RF_ACC_TRACE > 0
2100 1.9 oster if (req->tracerec) {
2101 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
2102 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
2103 1.288 mrg rf_lock_mutex2(rf_tracing_mutex);
2104 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2105 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2106 1.9 oster req->tracerec->num_phys_ios++;
2107 1.288 mrg rf_unlock_mutex2(rf_tracing_mutex);
2108 1.9 oster }
2109 1.175 oster #endif
2110 1.1 oster
2111 1.230 ad /* XXX Ok, let's get aggressive... If b_error is set, let's go
2112 1.9 oster * ballistic, and mark the component as hosed... */
2113 1.36 oster
2114 1.230 ad if (bp->b_error != 0) {
2115 1.9 oster /* Mark the disk as dead */
2116 1.9 oster /* but only mark it once... */
2117 1.186 perry /* and only if it wouldn't leave this RAID set
2118 1.183 oster completely broken */
2119 1.193 oster if (((queue->raidPtr->Disks[queue->col].status ==
2120 1.193 oster rf_ds_optimal) ||
2121 1.193 oster (queue->raidPtr->Disks[queue->col].status ==
2122 1.316.2.2 skrll rf_ds_used_spare)) &&
2123 1.193 oster (queue->raidPtr->numFailures <
2124 1.204 simonb queue->raidPtr->Layout.map->faultsTolerated)) {
2125 1.316.2.1 skrll printf("raid%d: IO Error (%d). Marking %s as failed.\n",
2126 1.136 oster queue->raidPtr->raidid,
2127 1.316.2.1 skrll bp->b_error,
2128 1.166 oster queue->raidPtr->Disks[queue->col].devname);
2129 1.166 oster queue->raidPtr->Disks[queue->col].status =
2130 1.9 oster rf_ds_failed;
2131 1.166 oster queue->raidPtr->status = rf_rs_degraded;
2132 1.9 oster queue->raidPtr->numFailures++;
2133 1.56 oster queue->raidPtr->numNewFailures++;
2134 1.9 oster } else { /* Disk is already dead... */
2135 1.9 oster /* printf("Disk already marked as dead!\n"); */
2136 1.9 oster }
2137 1.4 oster
2138 1.9 oster }
2139 1.4 oster
2140 1.143 oster /* Fill in the error value */
2141 1.230 ad req->error = bp->b_error;
2142 1.143 oster
2143 1.143 oster /* Drop this one on the "finished" queue... */
2144 1.143 oster TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2145 1.143 oster
2146 1.143 oster /* Let the raidio thread know there is work to be done. */
2147 1.286 mrg rf_signal_cond2(queue->raidPtr->iodone_cv);
2148 1.143 oster
2149 1.286 mrg rf_unlock_mutex2(queue->raidPtr->iodone_lock);
2150 1.1 oster }
2151 1.1 oster
2152 1.1 oster
2153 1.1 oster /*
2154 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
2155 1.1 oster */
2156 1.186 perry static void
2157 1.169 oster InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2158 1.225 christos RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2159 1.169 oster void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2160 1.169 oster struct proc *b_proc)
2161 1.9 oster {
2162 1.9 oster /* bp->b_flags = B_PHYS | rw_flag; */
2163 1.242 ad bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
2164 1.242 ad bp->b_oflags = 0;
2165 1.242 ad bp->b_cflags = 0;
2166 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
2167 1.9 oster bp->b_bufsize = bp->b_bcount;
2168 1.9 oster bp->b_error = 0;
2169 1.9 oster bp->b_dev = dev;
2170 1.187 christos bp->b_data = bf;
2171 1.275 mrg bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
2172 1.9 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2173 1.1 oster if (bp->b_bcount == 0) {
2174 1.141 provos panic("bp->b_bcount is zero in InitBP!!");
2175 1.1 oster }
2176 1.161 fvdl bp->b_proc = b_proc;
2177 1.9 oster bp->b_iodone = cbFunc;
2178 1.207 simonb bp->b_private = cbArg;
2179 1.1 oster }
2180 1.1 oster
2181 1.1 oster /*
2182 1.1 oster * Wait interruptibly for an exclusive lock.
2183 1.1 oster *
2184 1.1 oster * XXX
2185 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
2186 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
2187 1.1 oster */
2188 1.1 oster static int
2189 1.169 oster raidlock(struct raid_softc *rs)
2190 1.1 oster {
2191 1.9 oster int error;
2192 1.1 oster
2193 1.316.2.5 skrll error = 0;
2194 1.316.2.4 skrll mutex_enter(&rs->sc_mutex);
2195 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2196 1.1 oster rs->sc_flags |= RAIDF_WANTED;
2197 1.316.2.4 skrll error = cv_wait_sig(&rs->sc_cv, &rs->sc_mutex);
2198 1.316.2.4 skrll if (error != 0)
2199 1.316.2.5 skrll goto done;
2200 1.1 oster }
2201 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
2202 1.316.2.5 skrll done:
2203 1.316.2.4 skrll mutex_exit(&rs->sc_mutex);
2204 1.316.2.5 skrll return (error);
2205 1.1 oster }
2206 1.1 oster /*
2207 1.1 oster * Unlock and wake up any waiters.
2208 1.1 oster */
2209 1.1 oster static void
2210 1.169 oster raidunlock(struct raid_softc *rs)
2211 1.1 oster {
2212 1.1 oster
2213 1.316.2.4 skrll mutex_enter(&rs->sc_mutex);
2214 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
2215 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2216 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
2217 1.316.2.4 skrll cv_broadcast(&rs->sc_cv);
2218 1.1 oster }
2219 1.316.2.4 skrll mutex_exit(&rs->sc_mutex);
2220 1.11 oster }
2221 1.186 perry
2222 1.11 oster
2223 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2224 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2225 1.269 jld #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2226 1.11 oster
2227 1.276 mrg static daddr_t
2228 1.276 mrg rf_component_info_offset(void)
2229 1.276 mrg {
2230 1.276 mrg
2231 1.276 mrg return RF_COMPONENT_INFO_OFFSET;
2232 1.276 mrg }
2233 1.276 mrg
2234 1.276 mrg static daddr_t
2235 1.276 mrg rf_component_info_size(unsigned secsize)
2236 1.276 mrg {
2237 1.276 mrg daddr_t info_size;
2238 1.276 mrg
2239 1.276 mrg KASSERT(secsize);
2240 1.276 mrg if (secsize > RF_COMPONENT_INFO_SIZE)
2241 1.276 mrg info_size = secsize;
2242 1.276 mrg else
2243 1.276 mrg info_size = RF_COMPONENT_INFO_SIZE;
2244 1.276 mrg
2245 1.276 mrg return info_size;
2246 1.276 mrg }
2247 1.276 mrg
2248 1.276 mrg static daddr_t
2249 1.276 mrg rf_parity_map_offset(RF_Raid_t *raidPtr)
2250 1.276 mrg {
2251 1.276 mrg daddr_t map_offset;
2252 1.276 mrg
2253 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2254 1.276 mrg if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
2255 1.276 mrg map_offset = raidPtr->bytesPerSector;
2256 1.276 mrg else
2257 1.276 mrg map_offset = RF_COMPONENT_INFO_SIZE;
2258 1.276 mrg map_offset += rf_component_info_offset();
2259 1.276 mrg
2260 1.276 mrg return map_offset;
2261 1.276 mrg }
2262 1.276 mrg
2263 1.276 mrg static daddr_t
2264 1.276 mrg rf_parity_map_size(RF_Raid_t *raidPtr)
2265 1.276 mrg {
2266 1.276 mrg daddr_t map_size;
2267 1.276 mrg
2268 1.276 mrg if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
2269 1.276 mrg map_size = raidPtr->bytesPerSector;
2270 1.276 mrg else
2271 1.276 mrg map_size = RF_PARITY_MAP_SIZE;
2272 1.276 mrg
2273 1.276 mrg return map_size;
2274 1.276 mrg }
2275 1.276 mrg
2276 1.186 perry int
2277 1.269 jld raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2278 1.12 oster {
2279 1.269 jld RF_ComponentLabel_t *clabel;
2280 1.269 jld
2281 1.269 jld clabel = raidget_component_label(raidPtr, col);
2282 1.269 jld clabel->clean = RF_RAID_CLEAN;
2283 1.269 jld raidflush_component_label(raidPtr, col);
2284 1.12 oster return(0);
2285 1.12 oster }
2286 1.12 oster
2287 1.12 oster
2288 1.186 perry int
2289 1.269 jld raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2290 1.11 oster {
2291 1.269 jld RF_ComponentLabel_t *clabel;
2292 1.269 jld
2293 1.269 jld clabel = raidget_component_label(raidPtr, col);
2294 1.269 jld clabel->clean = RF_RAID_DIRTY;
2295 1.269 jld raidflush_component_label(raidPtr, col);
2296 1.11 oster return(0);
2297 1.11 oster }
2298 1.11 oster
2299 1.11 oster int
2300 1.269 jld raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2301 1.269 jld {
2302 1.276 mrg KASSERT(raidPtr->bytesPerSector);
2303 1.276 mrg return raidread_component_label(raidPtr->bytesPerSector,
2304 1.276 mrg raidPtr->Disks[col].dev,
2305 1.316.2.2 skrll raidPtr->raid_cinfo[col].ci_vp,
2306 1.269 jld &raidPtr->raid_cinfo[col].ci_label);
2307 1.269 jld }
2308 1.269 jld
2309 1.269 jld RF_ComponentLabel_t *
2310 1.269 jld raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2311 1.269 jld {
2312 1.269 jld return &raidPtr->raid_cinfo[col].ci_label;
2313 1.269 jld }
2314 1.269 jld
2315 1.269 jld int
2316 1.269 jld raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2317 1.269 jld {
2318 1.269 jld RF_ComponentLabel_t *label;
2319 1.269 jld
2320 1.269 jld label = &raidPtr->raid_cinfo[col].ci_label;
2321 1.269 jld label->mod_counter = raidPtr->mod_counter;
2322 1.269 jld #ifndef RF_NO_PARITY_MAP
2323 1.269 jld label->parity_map_modcount = label->mod_counter;
2324 1.269 jld #endif
2325 1.276 mrg return raidwrite_component_label(raidPtr->bytesPerSector,
2326 1.276 mrg raidPtr->Disks[col].dev,
2327 1.269 jld raidPtr->raid_cinfo[col].ci_vp, label);
2328 1.269 jld }
2329 1.269 jld
2330 1.269 jld
2331 1.269 jld static int
2332 1.276 mrg raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2333 1.269 jld RF_ComponentLabel_t *clabel)
2334 1.269 jld {
2335 1.316.2.2 skrll return raidread_component_area(dev, b_vp, clabel,
2336 1.269 jld sizeof(RF_ComponentLabel_t),
2337 1.276 mrg rf_component_info_offset(),
2338 1.276 mrg rf_component_info_size(secsize));
2339 1.269 jld }
2340 1.269 jld
2341 1.269 jld /* ARGSUSED */
2342 1.269 jld static int
2343 1.269 jld raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2344 1.269 jld size_t msize, daddr_t offset, daddr_t dsize)
2345 1.11 oster {
2346 1.11 oster struct buf *bp;
2347 1.11 oster int error;
2348 1.186 perry
2349 1.11 oster /* XXX should probably ensure that we don't try to do this if
2350 1.186 perry someone has changed rf_protected_sectors. */
2351 1.11 oster
2352 1.98 oster if (b_vp == NULL) {
2353 1.98 oster /* For whatever reason, this component is not valid.
2354 1.98 oster Don't try to read a component label from it. */
2355 1.98 oster return(EINVAL);
2356 1.98 oster }
2357 1.98 oster
2358 1.11 oster /* get a block of the appropriate size... */
2359 1.269 jld bp = geteblk((int)dsize);
2360 1.11 oster bp->b_dev = dev;
2361 1.11 oster
2362 1.11 oster /* get our ducks in a row for the read */
2363 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2364 1.269 jld bp->b_bcount = dsize;
2365 1.100 chs bp->b_flags |= B_READ;
2366 1.269 jld bp->b_resid = dsize;
2367 1.11 oster
2368 1.316.2.5 skrll bdev_strategy(bp);
2369 1.186 perry error = biowait(bp);
2370 1.11 oster
2371 1.11 oster if (!error) {
2372 1.269 jld memcpy(data, bp->b_data, msize);
2373 1.204 simonb }
2374 1.11 oster
2375 1.233 ad brelse(bp, 0);
2376 1.11 oster return(error);
2377 1.11 oster }
2378 1.269 jld
2379 1.269 jld
2380 1.269 jld static int
2381 1.276 mrg raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
2382 1.276 mrg RF_ComponentLabel_t *clabel)
2383 1.269 jld {
2384 1.269 jld return raidwrite_component_area(dev, b_vp, clabel,
2385 1.269 jld sizeof(RF_ComponentLabel_t),
2386 1.276 mrg rf_component_info_offset(),
2387 1.276 mrg rf_component_info_size(secsize), 0);
2388 1.269 jld }
2389 1.269 jld
2390 1.11 oster /* ARGSUSED */
2391 1.269 jld static int
2392 1.316.2.2 skrll raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2393 1.269 jld size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2394 1.11 oster {
2395 1.11 oster struct buf *bp;
2396 1.11 oster int error;
2397 1.11 oster
2398 1.11 oster /* get a block of the appropriate size... */
2399 1.269 jld bp = geteblk((int)dsize);
2400 1.11 oster bp->b_dev = dev;
2401 1.11 oster
2402 1.11 oster /* get our ducks in a row for the write */
2403 1.269 jld bp->b_blkno = offset / DEV_BSIZE;
2404 1.269 jld bp->b_bcount = dsize;
2405 1.269 jld bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2406 1.269 jld bp->b_resid = dsize;
2407 1.11 oster
2408 1.269 jld memset(bp->b_data, 0, dsize);
2409 1.269 jld memcpy(bp->b_data, data, msize);
2410 1.11 oster
2411 1.316.2.5 skrll bdev_strategy(bp);
2412 1.269 jld if (asyncp)
2413 1.269 jld return 0;
2414 1.186 perry error = biowait(bp);
2415 1.233 ad brelse(bp, 0);
2416 1.11 oster if (error) {
2417 1.48 oster #if 1
2418 1.11 oster printf("Failed to write RAID component info!\n");
2419 1.48 oster #endif
2420 1.11 oster }
2421 1.11 oster
2422 1.11 oster return(error);
2423 1.1 oster }
2424 1.12 oster
2425 1.186 perry void
2426 1.269 jld rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2427 1.269 jld {
2428 1.269 jld int c;
2429 1.269 jld
2430 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2431 1.269 jld /* Skip dead disks. */
2432 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2433 1.269 jld continue;
2434 1.269 jld /* XXXjld: what if an error occurs here? */
2435 1.269 jld raidwrite_component_area(raidPtr->Disks[c].dev,
2436 1.269 jld raidPtr->raid_cinfo[c].ci_vp, map,
2437 1.269 jld RF_PARITYMAP_NBYTE,
2438 1.276 mrg rf_parity_map_offset(raidPtr),
2439 1.276 mrg rf_parity_map_size(raidPtr), 0);
2440 1.269 jld }
2441 1.269 jld }
2442 1.269 jld
2443 1.269 jld void
2444 1.269 jld rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2445 1.269 jld {
2446 1.269 jld struct rf_paritymap_ondisk tmp;
2447 1.272 oster int c,first;
2448 1.269 jld
2449 1.272 oster first=1;
2450 1.269 jld for (c = 0; c < raidPtr->numCol; c++) {
2451 1.269 jld /* Skip dead disks. */
2452 1.269 jld if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2453 1.269 jld continue;
2454 1.269 jld raidread_component_area(raidPtr->Disks[c].dev,
2455 1.269 jld raidPtr->raid_cinfo[c].ci_vp, &tmp,
2456 1.269 jld RF_PARITYMAP_NBYTE,
2457 1.276 mrg rf_parity_map_offset(raidPtr),
2458 1.276 mrg rf_parity_map_size(raidPtr));
2459 1.272 oster if (first) {
2460 1.269 jld memcpy(map, &tmp, sizeof(*map));
2461 1.272 oster first = 0;
2462 1.269 jld } else {
2463 1.269 jld rf_paritymap_merge(map, &tmp);
2464 1.269 jld }
2465 1.269 jld }
2466 1.269 jld }
2467 1.269 jld
2468 1.269 jld void
2469 1.169 oster rf_markalldirty(RF_Raid_t *raidPtr)
2470 1.12 oster {
2471 1.269 jld RF_ComponentLabel_t *clabel;
2472 1.146 oster int sparecol;
2473 1.166 oster int c;
2474 1.166 oster int j;
2475 1.166 oster int scol = -1;
2476 1.12 oster
2477 1.12 oster raidPtr->mod_counter++;
2478 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2479 1.166 oster /* we don't want to touch (at all) a disk that has
2480 1.166 oster failed */
2481 1.166 oster if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2482 1.269 jld clabel = raidget_component_label(raidPtr, c);
2483 1.269 jld if (clabel->status == rf_ds_spared) {
2484 1.186 perry /* XXX do something special...
2485 1.186 perry but whatever you do, don't
2486 1.166 oster try to access it!! */
2487 1.166 oster } else {
2488 1.269 jld raidmarkdirty(raidPtr, c);
2489 1.12 oster }
2490 1.166 oster }
2491 1.186 perry }
2492 1.146 oster
2493 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2494 1.12 oster sparecol = raidPtr->numCol + c;
2495 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2496 1.186 perry /*
2497 1.186 perry
2498 1.186 perry we claim this disk is "optimal" if it's
2499 1.186 perry rf_ds_used_spare, as that means it should be
2500 1.186 perry directly substitutable for the disk it replaced.
2501 1.12 oster We note that too...
2502 1.12 oster
2503 1.12 oster */
2504 1.12 oster
2505 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2506 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2507 1.166 oster scol = j;
2508 1.166 oster break;
2509 1.12 oster }
2510 1.12 oster }
2511 1.186 perry
2512 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2513 1.12 oster /* make sure status is noted */
2514 1.146 oster
2515 1.269 jld raid_init_component_label(raidPtr, clabel);
2516 1.146 oster
2517 1.269 jld clabel->row = 0;
2518 1.269 jld clabel->column = scol;
2519 1.146 oster /* Note: we *don't* change status from rf_ds_used_spare
2520 1.146 oster to rf_ds_optimal */
2521 1.146 oster /* clabel.status = rf_ds_optimal; */
2522 1.186 perry
2523 1.269 jld raidmarkdirty(raidPtr, sparecol);
2524 1.12 oster }
2525 1.12 oster }
2526 1.12 oster }
2527 1.12 oster
2528 1.13 oster
2529 1.13 oster void
2530 1.169 oster rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2531 1.13 oster {
2532 1.269 jld RF_ComponentLabel_t *clabel;
2533 1.13 oster int sparecol;
2534 1.166 oster int c;
2535 1.166 oster int j;
2536 1.166 oster int scol;
2537 1.316.2.5 skrll struct raid_softc *rs = raidPtr->softc;
2538 1.13 oster
2539 1.13 oster scol = -1;
2540 1.13 oster
2541 1.186 perry /* XXX should do extra checks to make sure things really are clean,
2542 1.13 oster rather than blindly setting the clean bit... */
2543 1.13 oster
2544 1.13 oster raidPtr->mod_counter++;
2545 1.13 oster
2546 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2547 1.166 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
2548 1.269 jld clabel = raidget_component_label(raidPtr, c);
2549 1.201 oster /* make sure status is noted */
2550 1.269 jld clabel->status = rf_ds_optimal;
2551 1.316.2.2 skrll
2552 1.214 oster /* note what unit we are configured as */
2553 1.316.2.5 skrll if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2554 1.316.2.5 skrll clabel->last_unit = raidPtr->raidid;
2555 1.214 oster
2556 1.269 jld raidflush_component_label(raidPtr, c);
2557 1.166 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2558 1.166 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2559 1.269 jld raidmarkclean(raidPtr, c);
2560 1.91 oster }
2561 1.166 oster }
2562 1.186 perry }
2563 1.166 oster /* else we don't touch it.. */
2564 1.186 perry }
2565 1.63 oster
2566 1.63 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2567 1.63 oster sparecol = raidPtr->numCol + c;
2568 1.110 oster /* Need to ensure that the reconstruct actually completed! */
2569 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2570 1.186 perry /*
2571 1.186 perry
2572 1.186 perry we claim this disk is "optimal" if it's
2573 1.186 perry rf_ds_used_spare, as that means it should be
2574 1.186 perry directly substitutable for the disk it replaced.
2575 1.63 oster We note that too...
2576 1.63 oster
2577 1.63 oster */
2578 1.63 oster
2579 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2580 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2581 1.166 oster scol = j;
2582 1.166 oster break;
2583 1.63 oster }
2584 1.63 oster }
2585 1.186 perry
2586 1.63 oster /* XXX shouldn't *really* need this... */
2587 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
2588 1.63 oster /* make sure status is noted */
2589 1.63 oster
2590 1.269 jld raid_init_component_label(raidPtr, clabel);
2591 1.269 jld
2592 1.269 jld clabel->column = scol;
2593 1.269 jld clabel->status = rf_ds_optimal;
2594 1.316.2.5 skrll if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0)
2595 1.316.2.5 skrll clabel->last_unit = raidPtr->raidid;
2596 1.63 oster
2597 1.269 jld raidflush_component_label(raidPtr, sparecol);
2598 1.91 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2599 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2600 1.269 jld raidmarkclean(raidPtr, sparecol);
2601 1.13 oster }
2602 1.13 oster }
2603 1.13 oster }
2604 1.13 oster }
2605 1.68 oster }
2606 1.68 oster
2607 1.68 oster void
2608 1.169 oster rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2609 1.69 oster {
2610 1.69 oster
2611 1.69 oster if (vp != NULL) {
2612 1.69 oster if (auto_configured == 1) {
2613 1.96 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2614 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2615 1.69 oster vput(vp);
2616 1.186 perry
2617 1.186 perry } else {
2618 1.244 ad (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2619 1.69 oster }
2620 1.186 perry }
2621 1.69 oster }
2622 1.69 oster
2623 1.69 oster
2624 1.69 oster void
2625 1.169 oster rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2626 1.68 oster {
2627 1.186 perry int r,c;
2628 1.69 oster struct vnode *vp;
2629 1.69 oster int acd;
2630 1.68 oster
2631 1.68 oster
2632 1.68 oster /* We take this opportunity to close the vnodes like we should.. */
2633 1.68 oster
2634 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2635 1.166 oster vp = raidPtr->raid_cinfo[c].ci_vp;
2636 1.166 oster acd = raidPtr->Disks[c].auto_configured;
2637 1.166 oster rf_close_component(raidPtr, vp, acd);
2638 1.166 oster raidPtr->raid_cinfo[c].ci_vp = NULL;
2639 1.166 oster raidPtr->Disks[c].auto_configured = 0;
2640 1.68 oster }
2641 1.166 oster
2642 1.68 oster for (r = 0; r < raidPtr->numSpare; r++) {
2643 1.166 oster vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2644 1.166 oster acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2645 1.69 oster rf_close_component(raidPtr, vp, acd);
2646 1.166 oster raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2647 1.166 oster raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2648 1.68 oster }
2649 1.37 oster }
2650 1.63 oster
2651 1.37 oster
2652 1.186 perry void
2653 1.169 oster rf_ReconThread(struct rf_recon_req *req)
2654 1.37 oster {
2655 1.37 oster int s;
2656 1.37 oster RF_Raid_t *raidPtr;
2657 1.37 oster
2658 1.37 oster s = splbio();
2659 1.37 oster raidPtr = (RF_Raid_t *) req->raidPtr;
2660 1.37 oster raidPtr->recon_in_progress = 1;
2661 1.37 oster
2662 1.166 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2663 1.37 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2664 1.37 oster
2665 1.37 oster RF_Free(req, sizeof(*req));
2666 1.37 oster
2667 1.37 oster raidPtr->recon_in_progress = 0;
2668 1.37 oster splx(s);
2669 1.37 oster
2670 1.37 oster /* That's all... */
2671 1.204 simonb kthread_exit(0); /* does not return */
2672 1.37 oster }
2673 1.37 oster
2674 1.37 oster void
2675 1.169 oster rf_RewriteParityThread(RF_Raid_t *raidPtr)
2676 1.37 oster {
2677 1.37 oster int retcode;
2678 1.37 oster int s;
2679 1.37 oster
2680 1.184 oster raidPtr->parity_rewrite_stripes_done = 0;
2681 1.37 oster raidPtr->parity_rewrite_in_progress = 1;
2682 1.37 oster s = splbio();
2683 1.37 oster retcode = rf_RewriteParity(raidPtr);
2684 1.37 oster splx(s);
2685 1.37 oster if (retcode) {
2686 1.279 christos printf("raid%d: Error re-writing parity (%d)!\n",
2687 1.279 christos raidPtr->raidid, retcode);
2688 1.37 oster } else {
2689 1.37 oster /* set the clean bit! If we shutdown correctly,
2690 1.37 oster the clean bit on each component label will get
2691 1.37 oster set */
2692 1.37 oster raidPtr->parity_good = RF_RAID_CLEAN;
2693 1.37 oster }
2694 1.37 oster raidPtr->parity_rewrite_in_progress = 0;
2695 1.85 oster
2696 1.85 oster /* Anyone waiting for us to stop? If so, inform them... */
2697 1.85 oster if (raidPtr->waitShutdown) {
2698 1.85 oster wakeup(&raidPtr->parity_rewrite_in_progress);
2699 1.85 oster }
2700 1.37 oster
2701 1.37 oster /* That's all... */
2702 1.204 simonb kthread_exit(0); /* does not return */
2703 1.37 oster }
2704 1.37 oster
2705 1.37 oster
2706 1.37 oster void
2707 1.169 oster rf_CopybackThread(RF_Raid_t *raidPtr)
2708 1.37 oster {
2709 1.37 oster int s;
2710 1.37 oster
2711 1.37 oster raidPtr->copyback_in_progress = 1;
2712 1.37 oster s = splbio();
2713 1.37 oster rf_CopybackReconstructedData(raidPtr);
2714 1.37 oster splx(s);
2715 1.37 oster raidPtr->copyback_in_progress = 0;
2716 1.37 oster
2717 1.37 oster /* That's all... */
2718 1.204 simonb kthread_exit(0); /* does not return */
2719 1.37 oster }
2720 1.37 oster
2721 1.37 oster
2722 1.37 oster void
2723 1.169 oster rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2724 1.37 oster {
2725 1.37 oster int s;
2726 1.37 oster RF_Raid_t *raidPtr;
2727 1.186 perry
2728 1.37 oster s = splbio();
2729 1.37 oster raidPtr = req->raidPtr;
2730 1.37 oster raidPtr->recon_in_progress = 1;
2731 1.166 oster rf_ReconstructInPlace(raidPtr, req->col);
2732 1.37 oster RF_Free(req, sizeof(*req));
2733 1.37 oster raidPtr->recon_in_progress = 0;
2734 1.37 oster splx(s);
2735 1.37 oster
2736 1.37 oster /* That's all... */
2737 1.204 simonb kthread_exit(0); /* does not return */
2738 1.48 oster }
2739 1.48 oster
2740 1.213 christos static RF_AutoConfig_t *
2741 1.213 christos rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2742 1.276 mrg const char *cname, RF_SectorCount_t size, uint64_t numsecs,
2743 1.276 mrg unsigned secsize)
2744 1.213 christos {
2745 1.213 christos int good_one = 0;
2746 1.316.2.2 skrll RF_ComponentLabel_t *clabel;
2747 1.213 christos RF_AutoConfig_t *ac;
2748 1.213 christos
2749 1.213 christos clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2750 1.213 christos if (clabel == NULL) {
2751 1.213 christos oomem:
2752 1.213 christos while(ac_list) {
2753 1.213 christos ac = ac_list;
2754 1.213 christos if (ac->clabel)
2755 1.213 christos free(ac->clabel, M_RAIDFRAME);
2756 1.213 christos ac_list = ac_list->next;
2757 1.213 christos free(ac, M_RAIDFRAME);
2758 1.213 christos }
2759 1.213 christos printf("RAID auto config: out of memory!\n");
2760 1.213 christos return NULL; /* XXX probably should panic? */
2761 1.213 christos }
2762 1.213 christos
2763 1.276 mrg if (!raidread_component_label(secsize, dev, vp, clabel)) {
2764 1.276 mrg /* Got the label. Does it look reasonable? */
2765 1.316.2.2 skrll if (rf_reasonable_label(clabel, numsecs) &&
2766 1.282 enami (rf_component_label_partitionsize(clabel) <= size)) {
2767 1.224 oster #ifdef DEBUG
2768 1.276 mrg printf("Component on: %s: %llu\n",
2769 1.213 christos cname, (unsigned long long)size);
2770 1.276 mrg rf_print_component_label(clabel);
2771 1.213 christos #endif
2772 1.276 mrg /* if it's reasonable, add it, else ignore it. */
2773 1.276 mrg ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2774 1.213 christos M_NOWAIT);
2775 1.276 mrg if (ac == NULL) {
2776 1.276 mrg free(clabel, M_RAIDFRAME);
2777 1.276 mrg goto oomem;
2778 1.276 mrg }
2779 1.276 mrg strlcpy(ac->devname, cname, sizeof(ac->devname));
2780 1.276 mrg ac->dev = dev;
2781 1.276 mrg ac->vp = vp;
2782 1.276 mrg ac->clabel = clabel;
2783 1.276 mrg ac->next = ac_list;
2784 1.276 mrg ac_list = ac;
2785 1.276 mrg good_one = 1;
2786 1.276 mrg }
2787 1.213 christos }
2788 1.213 christos if (!good_one) {
2789 1.213 christos /* cleanup */
2790 1.213 christos free(clabel, M_RAIDFRAME);
2791 1.213 christos vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2792 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2793 1.213 christos vput(vp);
2794 1.213 christos }
2795 1.213 christos return ac_list;
2796 1.213 christos }
2797 1.213 christos
2798 1.48 oster RF_AutoConfig_t *
2799 1.259 cegger rf_find_raid_components(void)
2800 1.48 oster {
2801 1.48 oster struct vnode *vp;
2802 1.48 oster struct disklabel label;
2803 1.261 dyoung device_t dv;
2804 1.268 dyoung deviter_t di;
2805 1.48 oster dev_t dev;
2806 1.296 buhrow int bmajor, bminor, wedge, rf_part_found;
2807 1.48 oster int error;
2808 1.48 oster int i;
2809 1.48 oster RF_AutoConfig_t *ac_list;
2810 1.276 mrg uint64_t numsecs;
2811 1.276 mrg unsigned secsize;
2812 1.316.2.5 skrll int dowedges;
2813 1.48 oster
2814 1.48 oster /* initialize the AutoConfig list */
2815 1.48 oster ac_list = NULL;
2816 1.48 oster
2817 1.316.2.5 skrll /*
2818 1.316.2.5 skrll * we begin by trolling through *all* the devices on the system *twice*
2819 1.316.2.5 skrll * first we scan for wedges, second for other devices. This avoids
2820 1.316.2.5 skrll * using a raw partition instead of a wedge that covers the whole disk
2821 1.316.2.5 skrll */
2822 1.48 oster
2823 1.316.2.5 skrll for (dowedges=1; dowedges>=0; --dowedges) {
2824 1.316.2.5 skrll for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
2825 1.316.2.5 skrll dv = deviter_next(&di)) {
2826 1.48 oster
2827 1.316.2.5 skrll /* we are only interested in disks... */
2828 1.316.2.5 skrll if (device_class(dv) != DV_DISK)
2829 1.316.2.5 skrll continue;
2830 1.129 oster
2831 1.316.2.5 skrll /* we don't care about floppies... */
2832 1.316.2.5 skrll if (device_is_a(dv, "fd")) {
2833 1.316.2.5 skrll continue;
2834 1.316.2.5 skrll }
2835 1.129 oster
2836 1.316.2.5 skrll /* we don't care about CD's... */
2837 1.316.2.5 skrll if (device_is_a(dv, "cd")) {
2838 1.316.2.5 skrll continue;
2839 1.316.2.5 skrll }
2840 1.248 oster
2841 1.316.2.5 skrll /* we don't care about md's... */
2842 1.316.2.5 skrll if (device_is_a(dv, "md")) {
2843 1.316.2.5 skrll continue;
2844 1.316.2.5 skrll }
2845 1.206 thorpej
2846 1.316.2.5 skrll /* hdfd is the Atari/Hades floppy driver */
2847 1.316.2.5 skrll if (device_is_a(dv, "hdfd")) {
2848 1.316.2.5 skrll continue;
2849 1.316.2.5 skrll }
2850 1.186 perry
2851 1.316.2.5 skrll /* fdisa is the Atari/Milan floppy driver */
2852 1.316.2.5 skrll if (device_is_a(dv, "fdisa")) {
2853 1.316.2.5 skrll continue;
2854 1.316.2.5 skrll }
2855 1.48 oster
2856 1.316.2.5 skrll /* are we in the wedges pass ? */
2857 1.316.2.5 skrll wedge = device_is_a(dv, "dk");
2858 1.316.2.5 skrll if (wedge != dowedges) {
2859 1.316.2.5 skrll continue;
2860 1.316.2.5 skrll }
2861 1.296 buhrow
2862 1.316.2.5 skrll /* need to find the device_name_to_block_device_major stuff */
2863 1.316.2.5 skrll bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
2864 1.48 oster
2865 1.316.2.5 skrll rf_part_found = 0; /*No raid partition as yet*/
2866 1.48 oster
2867 1.316.2.5 skrll /* get a vnode for the raw partition of this disk */
2868 1.316.2.5 skrll bminor = minor(device_unit(dv));
2869 1.316.2.5 skrll dev = wedge ? makedev(bmajor, bminor) :
2870 1.316.2.5 skrll MAKEDISKDEV(bmajor, bminor, RAW_PART);
2871 1.316.2.5 skrll if (bdevvp(dev, &vp))
2872 1.316.2.5 skrll panic("RAID can't alloc vnode");
2873 1.48 oster
2874 1.316.2.5 skrll error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
2875 1.48 oster
2876 1.213 christos if (error) {
2877 1.316.2.5 skrll /* "Who cares." Continue looking
2878 1.316.2.5 skrll for something that exists*/
2879 1.241 oster vput(vp);
2880 1.213 christos continue;
2881 1.213 christos }
2882 1.213 christos
2883 1.316.2.5 skrll error = getdisksize(vp, &numsecs, &secsize);
2884 1.316.2.5 skrll if (error) {
2885 1.316.2.5 skrll /*
2886 1.316.2.5 skrll * Pseudo devices like vnd and cgd can be
2887 1.316.2.5 skrll * opened but may still need some configuration.
2888 1.316.2.5 skrll * Ignore these quietly.
2889 1.316.2.5 skrll */
2890 1.316.2.5 skrll if (error != ENXIO)
2891 1.316.2.5 skrll printf("RAIDframe: can't get disk size"
2892 1.316.2.5 skrll " for dev %s (%d)\n",
2893 1.316.2.5 skrll device_xname(dv), error);
2894 1.241 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2895 1.241 oster VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2896 1.241 oster vput(vp);
2897 1.228 christos continue;
2898 1.241 oster }
2899 1.316.2.5 skrll if (wedge) {
2900 1.316.2.5 skrll struct dkwedge_info dkw;
2901 1.316.2.5 skrll error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
2902 1.316.2.5 skrll NOCRED);
2903 1.316.2.5 skrll if (error) {
2904 1.316.2.5 skrll printf("RAIDframe: can't get wedge info for "
2905 1.316.2.5 skrll "dev %s (%d)\n", device_xname(dv), error);
2906 1.316.2.5 skrll vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2907 1.316.2.5 skrll VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2908 1.316.2.5 skrll vput(vp);
2909 1.316.2.5 skrll continue;
2910 1.316.2.5 skrll }
2911 1.316.2.2 skrll
2912 1.316.2.5 skrll if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
2913 1.316.2.5 skrll vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2914 1.316.2.5 skrll VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2915 1.316.2.5 skrll vput(vp);
2916 1.316.2.5 skrll continue;
2917 1.316.2.5 skrll }
2918 1.316.2.7 skrll
2919 1.316.2.5 skrll ac_list = rf_get_component(ac_list, dev, vp,
2920 1.316.2.5 skrll device_xname(dv), dkw.dkw_size, numsecs, secsize);
2921 1.316.2.5 skrll rf_part_found = 1; /*There is a raid component on this disk*/
2922 1.316.2.5 skrll continue;
2923 1.316.2.5 skrll }
2924 1.48 oster
2925 1.316.2.5 skrll /* Ok, the disk exists. Go get the disklabel. */
2926 1.316.2.5 skrll error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
2927 1.316.2.5 skrll if (error) {
2928 1.316.2.5 skrll /*
2929 1.316.2.5 skrll * XXX can't happen - open() would
2930 1.316.2.5 skrll * have errored out (or faked up one)
2931 1.316.2.5 skrll */
2932 1.316.2.5 skrll if (error != ENOTTY)
2933 1.316.2.5 skrll printf("RAIDframe: can't get label for dev "
2934 1.316.2.5 skrll "%s (%d)\n", device_xname(dv), error);
2935 1.316.2.5 skrll }
2936 1.181 thorpej
2937 1.316.2.5 skrll /* don't need this any more. We'll allocate it again
2938 1.316.2.5 skrll a little later if we really do... */
2939 1.316.2.5 skrll vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2940 1.316.2.5 skrll VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2941 1.316.2.5 skrll vput(vp);
2942 1.213 christos
2943 1.316.2.5 skrll if (error)
2944 1.48 oster continue;
2945 1.48 oster
2946 1.316.2.5 skrll rf_part_found = 0; /*No raid partitions yet*/
2947 1.316.2.5 skrll for (i = 0; i < label.d_npartitions; i++) {
2948 1.316.2.5 skrll char cname[sizeof(ac_list->devname)];
2949 1.316.2.5 skrll
2950 1.316.2.5 skrll /* We only support partitions marked as RAID */
2951 1.316.2.5 skrll if (label.d_partitions[i].p_fstype != FS_RAID)
2952 1.316.2.5 skrll continue;
2953 1.316.2.5 skrll
2954 1.316.2.5 skrll dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
2955 1.316.2.5 skrll if (bdevvp(dev, &vp))
2956 1.316.2.5 skrll panic("RAID can't alloc vnode");
2957 1.316.2.5 skrll
2958 1.316.2.5 skrll error = VOP_OPEN(vp, FREAD, NOCRED);
2959 1.316.2.5 skrll if (error) {
2960 1.316.2.5 skrll /* Whatever... */
2961 1.316.2.5 skrll vput(vp);
2962 1.316.2.5 skrll continue;
2963 1.316.2.5 skrll }
2964 1.316.2.5 skrll snprintf(cname, sizeof(cname), "%s%c",
2965 1.316.2.5 skrll device_xname(dv), 'a' + i);
2966 1.316.2.5 skrll ac_list = rf_get_component(ac_list, dev, vp, cname,
2967 1.316.2.5 skrll label.d_partitions[i].p_size, numsecs, secsize);
2968 1.296 buhrow rf_part_found = 1; /*There is at least one raid partition on this disk*/
2969 1.316.2.5 skrll }
2970 1.296 buhrow
2971 1.316.2.5 skrll /*
2972 1.316.2.5 skrll *If there is no raid component on this disk, either in a
2973 1.316.2.5 skrll *disklabel or inside a wedge, check the raw partition as well,
2974 1.316.2.5 skrll *as it is possible to configure raid components on raw disk
2975 1.316.2.5 skrll *devices.
2976 1.316.2.5 skrll */
2977 1.296 buhrow
2978 1.316.2.5 skrll if (!rf_part_found) {
2979 1.316.2.5 skrll char cname[sizeof(ac_list->devname)];
2980 1.296 buhrow
2981 1.316.2.5 skrll dev = MAKEDISKDEV(bmajor, device_unit(dv), RAW_PART);
2982 1.316.2.5 skrll if (bdevvp(dev, &vp))
2983 1.316.2.5 skrll panic("RAID can't alloc vnode");
2984 1.316.2.5 skrll
2985 1.316.2.5 skrll error = VOP_OPEN(vp, FREAD, NOCRED);
2986 1.316.2.5 skrll if (error) {
2987 1.316.2.5 skrll /* Whatever... */
2988 1.316.2.5 skrll vput(vp);
2989 1.316.2.5 skrll continue;
2990 1.316.2.5 skrll }
2991 1.316.2.5 skrll snprintf(cname, sizeof(cname), "%s%c",
2992 1.316.2.5 skrll device_xname(dv), 'a' + RAW_PART);
2993 1.316.2.5 skrll ac_list = rf_get_component(ac_list, dev, vp, cname,
2994 1.316.2.5 skrll label.d_partitions[RAW_PART].p_size, numsecs, secsize);
2995 1.296 buhrow }
2996 1.48 oster }
2997 1.316.2.5 skrll deviter_release(&di);
2998 1.48 oster }
2999 1.213 christos return ac_list;
3000 1.48 oster }
3001 1.186 perry
3002 1.213 christos
3003 1.292 oster int
3004 1.284 mrg rf_reasonable_label(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3005 1.48 oster {
3006 1.186 perry
3007 1.48 oster if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3008 1.48 oster (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3009 1.48 oster ((clabel->clean == RF_RAID_CLEAN) ||
3010 1.48 oster (clabel->clean == RF_RAID_DIRTY)) &&
3011 1.186 perry clabel->row >=0 &&
3012 1.186 perry clabel->column >= 0 &&
3013 1.48 oster clabel->num_rows > 0 &&
3014 1.48 oster clabel->num_columns > 0 &&
3015 1.186 perry clabel->row < clabel->num_rows &&
3016 1.48 oster clabel->column < clabel->num_columns &&
3017 1.48 oster clabel->blockSize > 0 &&
3018 1.282 enami /*
3019 1.282 enami * numBlocksHi may contain garbage, but it is ok since
3020 1.282 enami * the type is unsigned. If it is really garbage,
3021 1.282 enami * rf_fix_old_label_size() will fix it.
3022 1.282 enami */
3023 1.282 enami rf_component_label_numblocks(clabel) > 0) {
3024 1.284 mrg /*
3025 1.284 mrg * label looks reasonable enough...
3026 1.284 mrg * let's make sure it has no old garbage.
3027 1.284 mrg */
3028 1.292 oster if (numsecs)
3029 1.292 oster rf_fix_old_label_size(clabel, numsecs);
3030 1.48 oster return(1);
3031 1.48 oster }
3032 1.48 oster return(0);
3033 1.48 oster }
3034 1.48 oster
3035 1.48 oster
3036 1.278 mrg /*
3037 1.278 mrg * For reasons yet unknown, some old component labels have garbage in
3038 1.278 mrg * the newer numBlocksHi region, and this causes lossage. Since those
3039 1.278 mrg * disks will also have numsecs set to less than 32 bits of sectors,
3040 1.299 oster * we can determine when this corruption has occurred, and fix it.
3041 1.284 mrg *
3042 1.284 mrg * The exact same problem, with the same unknown reason, happens to
3043 1.284 mrg * the partitionSizeHi member as well.
3044 1.278 mrg */
3045 1.278 mrg static void
3046 1.278 mrg rf_fix_old_label_size(RF_ComponentLabel_t *clabel, uint64_t numsecs)
3047 1.278 mrg {
3048 1.278 mrg
3049 1.284 mrg if (numsecs < ((uint64_t)1 << 32)) {
3050 1.284 mrg if (clabel->numBlocksHi) {
3051 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3052 1.284 mrg "numBlocksHi set\n"
3053 1.284 mrg "WARNING: resetting numBlocksHi to zero.\n");
3054 1.284 mrg clabel->numBlocksHi = 0;
3055 1.284 mrg }
3056 1.284 mrg
3057 1.284 mrg if (clabel->partitionSizeHi) {
3058 1.284 mrg printf("WARNING: total sectors < 32 bits, yet "
3059 1.284 mrg "partitionSizeHi set\n"
3060 1.284 mrg "WARNING: resetting partitionSizeHi to zero.\n");
3061 1.284 mrg clabel->partitionSizeHi = 0;
3062 1.284 mrg }
3063 1.278 mrg }
3064 1.278 mrg }
3065 1.278 mrg
3066 1.278 mrg
3067 1.224 oster #ifdef DEBUG
3068 1.48 oster void
3069 1.169 oster rf_print_component_label(RF_ComponentLabel_t *clabel)
3070 1.48 oster {
3071 1.282 enami uint64_t numBlocks;
3072 1.308 christos static const char *rp[] = {
3073 1.308 christos "No", "Force", "Soft", "*invalid*"
3074 1.308 christos };
3075 1.308 christos
3076 1.275 mrg
3077 1.282 enami numBlocks = rf_component_label_numblocks(clabel);
3078 1.275 mrg
3079 1.48 oster printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3080 1.186 perry clabel->row, clabel->column,
3081 1.48 oster clabel->num_rows, clabel->num_columns);
3082 1.48 oster printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3083 1.48 oster clabel->version, clabel->serial_number,
3084 1.48 oster clabel->mod_counter);
3085 1.48 oster printf(" Clean: %s Status: %d\n",
3086 1.271 dyoung clabel->clean ? "Yes" : "No", clabel->status);
3087 1.48 oster printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3088 1.48 oster clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3089 1.275 mrg printf(" RAID Level: %c blocksize: %d numBlocks: %"PRIu64"\n",
3090 1.275 mrg (char) clabel->parityConfig, clabel->blockSize, numBlocks);
3091 1.271 dyoung printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3092 1.308 christos printf(" Root partition: %s\n", rp[clabel->root_partition & 3]);
3093 1.271 dyoung printf(" Last configured as: raid%d\n", clabel->last_unit);
3094 1.51 oster #if 0
3095 1.51 oster printf(" Config order: %d\n", clabel->config_order);
3096 1.51 oster #endif
3097 1.186 perry
3098 1.48 oster }
3099 1.133 oster #endif
3100 1.48 oster
3101 1.48 oster RF_ConfigSet_t *
3102 1.169 oster rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3103 1.48 oster {
3104 1.48 oster RF_AutoConfig_t *ac;
3105 1.48 oster RF_ConfigSet_t *config_sets;
3106 1.48 oster RF_ConfigSet_t *cset;
3107 1.48 oster RF_AutoConfig_t *ac_next;
3108 1.48 oster
3109 1.48 oster
3110 1.48 oster config_sets = NULL;
3111 1.48 oster
3112 1.48 oster /* Go through the AutoConfig list, and figure out which components
3113 1.48 oster belong to what sets. */
3114 1.48 oster ac = ac_list;
3115 1.48 oster while(ac!=NULL) {
3116 1.48 oster /* we're going to putz with ac->next, so save it here
3117 1.48 oster for use at the end of the loop */
3118 1.48 oster ac_next = ac->next;
3119 1.48 oster
3120 1.48 oster if (config_sets == NULL) {
3121 1.48 oster /* will need at least this one... */
3122 1.48 oster config_sets = (RF_ConfigSet_t *)
3123 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3124 1.48 oster M_RAIDFRAME, M_NOWAIT);
3125 1.48 oster if (config_sets == NULL) {
3126 1.141 provos panic("rf_create_auto_sets: No memory!");
3127 1.48 oster }
3128 1.48 oster /* this one is easy :) */
3129 1.48 oster config_sets->ac = ac;
3130 1.48 oster config_sets->next = NULL;
3131 1.51 oster config_sets->rootable = 0;
3132 1.48 oster ac->next = NULL;
3133 1.48 oster } else {
3134 1.48 oster /* which set does this component fit into? */
3135 1.48 oster cset = config_sets;
3136 1.48 oster while(cset!=NULL) {
3137 1.49 oster if (rf_does_it_fit(cset, ac)) {
3138 1.86 oster /* looks like it matches... */
3139 1.86 oster ac->next = cset->ac;
3140 1.86 oster cset->ac = ac;
3141 1.48 oster break;
3142 1.48 oster }
3143 1.48 oster cset = cset->next;
3144 1.48 oster }
3145 1.48 oster if (cset==NULL) {
3146 1.48 oster /* didn't find a match above... new set..*/
3147 1.48 oster cset = (RF_ConfigSet_t *)
3148 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3149 1.48 oster M_RAIDFRAME, M_NOWAIT);
3150 1.48 oster if (cset == NULL) {
3151 1.141 provos panic("rf_create_auto_sets: No memory!");
3152 1.48 oster }
3153 1.48 oster cset->ac = ac;
3154 1.48 oster ac->next = NULL;
3155 1.48 oster cset->next = config_sets;
3156 1.51 oster cset->rootable = 0;
3157 1.48 oster config_sets = cset;
3158 1.48 oster }
3159 1.48 oster }
3160 1.48 oster ac = ac_next;
3161 1.48 oster }
3162 1.48 oster
3163 1.48 oster
3164 1.48 oster return(config_sets);
3165 1.48 oster }
3166 1.48 oster
3167 1.48 oster static int
3168 1.169 oster rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3169 1.48 oster {
3170 1.48 oster RF_ComponentLabel_t *clabel1, *clabel2;
3171 1.48 oster
3172 1.48 oster /* If this one matches the *first* one in the set, that's good
3173 1.48 oster enough, since the other members of the set would have been
3174 1.48 oster through here too... */
3175 1.60 oster /* note that we are not checking partitionSize here..
3176 1.60 oster
3177 1.60 oster Note that we are also not checking the mod_counters here.
3178 1.299 oster If everything else matches except the mod_counter, that's
3179 1.60 oster good enough for this test. We will deal with the mod_counters
3180 1.186 perry a little later in the autoconfiguration process.
3181 1.60 oster
3182 1.60 oster (clabel1->mod_counter == clabel2->mod_counter) &&
3183 1.81 oster
3184 1.81 oster The reason we don't check for this is that failed disks
3185 1.81 oster will have lower modification counts. If those disks are
3186 1.81 oster not added to the set they used to belong to, then they will
3187 1.81 oster form their own set, which may result in 2 different sets,
3188 1.81 oster for example, competing to be configured at raid0, and
3189 1.81 oster perhaps competing to be the root filesystem set. If the
3190 1.81 oster wrong ones get configured, or both attempt to become /,
3191 1.81 oster weird behaviour and or serious lossage will occur. Thus we
3192 1.81 oster need to bring them into the fold here, and kick them out at
3193 1.81 oster a later point.
3194 1.60 oster
3195 1.60 oster */
3196 1.48 oster
3197 1.48 oster clabel1 = cset->ac->clabel;
3198 1.48 oster clabel2 = ac->clabel;
3199 1.48 oster if ((clabel1->version == clabel2->version) &&
3200 1.48 oster (clabel1->serial_number == clabel2->serial_number) &&
3201 1.48 oster (clabel1->num_rows == clabel2->num_rows) &&
3202 1.48 oster (clabel1->num_columns == clabel2->num_columns) &&
3203 1.48 oster (clabel1->sectPerSU == clabel2->sectPerSU) &&
3204 1.48 oster (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3205 1.48 oster (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3206 1.48 oster (clabel1->parityConfig == clabel2->parityConfig) &&
3207 1.48 oster (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3208 1.48 oster (clabel1->blockSize == clabel2->blockSize) &&
3209 1.282 enami rf_component_label_numblocks(clabel1) ==
3210 1.282 enami rf_component_label_numblocks(clabel2) &&
3211 1.48 oster (clabel1->autoconfigure == clabel2->autoconfigure) &&
3212 1.48 oster (clabel1->root_partition == clabel2->root_partition) &&
3213 1.48 oster (clabel1->last_unit == clabel2->last_unit) &&
3214 1.48 oster (clabel1->config_order == clabel2->config_order)) {
3215 1.48 oster /* if it get's here, it almost *has* to be a match */
3216 1.48 oster } else {
3217 1.186 perry /* it's not consistent with somebody in the set..
3218 1.48 oster punt */
3219 1.48 oster return(0);
3220 1.48 oster }
3221 1.48 oster /* all was fine.. it must fit... */
3222 1.48 oster return(1);
3223 1.48 oster }
3224 1.48 oster
3225 1.48 oster int
3226 1.169 oster rf_have_enough_components(RF_ConfigSet_t *cset)
3227 1.48 oster {
3228 1.51 oster RF_AutoConfig_t *ac;
3229 1.51 oster RF_AutoConfig_t *auto_config;
3230 1.51 oster RF_ComponentLabel_t *clabel;
3231 1.166 oster int c;
3232 1.51 oster int num_cols;
3233 1.51 oster int num_missing;
3234 1.86 oster int mod_counter;
3235 1.87 oster int mod_counter_found;
3236 1.88 oster int even_pair_failed;
3237 1.88 oster char parity_type;
3238 1.186 perry
3239 1.51 oster
3240 1.48 oster /* check to see that we have enough 'live' components
3241 1.48 oster of this set. If so, we can configure it if necessary */
3242 1.48 oster
3243 1.51 oster num_cols = cset->ac->clabel->num_columns;
3244 1.88 oster parity_type = cset->ac->clabel->parityConfig;
3245 1.51 oster
3246 1.51 oster /* XXX Check for duplicate components!?!?!? */
3247 1.51 oster
3248 1.86 oster /* Determine what the mod_counter is supposed to be for this set. */
3249 1.86 oster
3250 1.87 oster mod_counter_found = 0;
3251 1.101 oster mod_counter = 0;
3252 1.86 oster ac = cset->ac;
3253 1.86 oster while(ac!=NULL) {
3254 1.87 oster if (mod_counter_found==0) {
3255 1.86 oster mod_counter = ac->clabel->mod_counter;
3256 1.87 oster mod_counter_found = 1;
3257 1.87 oster } else {
3258 1.87 oster if (ac->clabel->mod_counter > mod_counter) {
3259 1.87 oster mod_counter = ac->clabel->mod_counter;
3260 1.87 oster }
3261 1.86 oster }
3262 1.86 oster ac = ac->next;
3263 1.86 oster }
3264 1.86 oster
3265 1.51 oster num_missing = 0;
3266 1.51 oster auto_config = cset->ac;
3267 1.51 oster
3268 1.166 oster even_pair_failed = 0;
3269 1.166 oster for(c=0; c<num_cols; c++) {
3270 1.166 oster ac = auto_config;
3271 1.166 oster while(ac!=NULL) {
3272 1.186 perry if ((ac->clabel->column == c) &&
3273 1.166 oster (ac->clabel->mod_counter == mod_counter)) {
3274 1.166 oster /* it's this one... */
3275 1.224 oster #ifdef DEBUG
3276 1.166 oster printf("Found: %s at %d\n",
3277 1.166 oster ac->devname,c);
3278 1.51 oster #endif
3279 1.166 oster break;
3280 1.51 oster }
3281 1.166 oster ac=ac->next;
3282 1.166 oster }
3283 1.166 oster if (ac==NULL) {
3284 1.51 oster /* Didn't find one here! */
3285 1.88 oster /* special case for RAID 1, especially
3286 1.88 oster where there are more than 2
3287 1.88 oster components (where RAIDframe treats
3288 1.88 oster things a little differently :( ) */
3289 1.166 oster if (parity_type == '1') {
3290 1.166 oster if (c%2 == 0) { /* even component */
3291 1.166 oster even_pair_failed = 1;
3292 1.166 oster } else { /* odd component. If
3293 1.166 oster we're failed, and
3294 1.166 oster so is the even
3295 1.166 oster component, it's
3296 1.166 oster "Good Night, Charlie" */
3297 1.166 oster if (even_pair_failed == 1) {
3298 1.166 oster return(0);
3299 1.88 oster }
3300 1.88 oster }
3301 1.166 oster } else {
3302 1.166 oster /* normal accounting */
3303 1.166 oster num_missing++;
3304 1.88 oster }
3305 1.166 oster }
3306 1.166 oster if ((parity_type == '1') && (c%2 == 1)) {
3307 1.88 oster /* Just did an even component, and we didn't
3308 1.186 perry bail.. reset the even_pair_failed flag,
3309 1.88 oster and go on to the next component.... */
3310 1.166 oster even_pair_failed = 0;
3311 1.51 oster }
3312 1.51 oster }
3313 1.51 oster
3314 1.51 oster clabel = cset->ac->clabel;
3315 1.51 oster
3316 1.51 oster if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3317 1.51 oster ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3318 1.51 oster ((clabel->parityConfig == '5') && (num_missing > 1))) {
3319 1.51 oster /* XXX this needs to be made *much* more general */
3320 1.51 oster /* Too many failures */
3321 1.51 oster return(0);
3322 1.51 oster }
3323 1.51 oster /* otherwise, all is well, and we've got enough to take a kick
3324 1.51 oster at autoconfiguring this set */
3325 1.51 oster return(1);
3326 1.48 oster }
3327 1.48 oster
3328 1.48 oster void
3329 1.169 oster rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3330 1.222 christos RF_Raid_t *raidPtr)
3331 1.48 oster {
3332 1.48 oster RF_ComponentLabel_t *clabel;
3333 1.77 oster int i;
3334 1.48 oster
3335 1.48 oster clabel = ac->clabel;
3336 1.48 oster
3337 1.48 oster /* 1. Fill in the common stuff */
3338 1.166 oster config->numRow = clabel->num_rows = 1;
3339 1.48 oster config->numCol = clabel->num_columns;
3340 1.48 oster config->numSpare = 0; /* XXX should this be set here? */
3341 1.48 oster config->sectPerSU = clabel->sectPerSU;
3342 1.48 oster config->SUsPerPU = clabel->SUsPerPU;
3343 1.48 oster config->SUsPerRU = clabel->SUsPerRU;
3344 1.48 oster config->parityConfig = clabel->parityConfig;
3345 1.48 oster /* XXX... */
3346 1.48 oster strcpy(config->diskQueueType,"fifo");
3347 1.48 oster config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3348 1.48 oster config->layoutSpecificSize = 0; /* XXX ?? */
3349 1.48 oster
3350 1.48 oster while(ac!=NULL) {
3351 1.48 oster /* row/col values will be in range due to the checks
3352 1.48 oster in reasonable_label() */
3353 1.166 oster strcpy(config->devnames[0][ac->clabel->column],
3354 1.48 oster ac->devname);
3355 1.48 oster ac = ac->next;
3356 1.48 oster }
3357 1.48 oster
3358 1.77 oster for(i=0;i<RF_MAXDBGV;i++) {
3359 1.163 fvdl config->debugVars[i][0] = 0;
3360 1.77 oster }
3361 1.48 oster }
3362 1.48 oster
3363 1.48 oster int
3364 1.169 oster rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3365 1.48 oster {
3366 1.269 jld RF_ComponentLabel_t *clabel;
3367 1.166 oster int column;
3368 1.148 oster int sparecol;
3369 1.48 oster
3370 1.54 oster raidPtr->autoconfigure = new_value;
3371 1.166 oster
3372 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3373 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3374 1.269 jld clabel = raidget_component_label(raidPtr, column);
3375 1.269 jld clabel->autoconfigure = new_value;
3376 1.269 jld raidflush_component_label(raidPtr, column);
3377 1.48 oster }
3378 1.48 oster }
3379 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3380 1.148 oster sparecol = raidPtr->numCol + column;
3381 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3382 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3383 1.269 jld clabel->autoconfigure = new_value;
3384 1.269 jld raidflush_component_label(raidPtr, sparecol);
3385 1.148 oster }
3386 1.148 oster }
3387 1.48 oster return(new_value);
3388 1.48 oster }
3389 1.48 oster
3390 1.48 oster int
3391 1.169 oster rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3392 1.48 oster {
3393 1.269 jld RF_ComponentLabel_t *clabel;
3394 1.166 oster int column;
3395 1.148 oster int sparecol;
3396 1.48 oster
3397 1.54 oster raidPtr->root_partition = new_value;
3398 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3399 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3400 1.269 jld clabel = raidget_component_label(raidPtr, column);
3401 1.269 jld clabel->root_partition = new_value;
3402 1.269 jld raidflush_component_label(raidPtr, column);
3403 1.148 oster }
3404 1.148 oster }
3405 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3406 1.148 oster sparecol = raidPtr->numCol + column;
3407 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3408 1.269 jld clabel = raidget_component_label(raidPtr, sparecol);
3409 1.269 jld clabel->root_partition = new_value;
3410 1.269 jld raidflush_component_label(raidPtr, sparecol);
3411 1.48 oster }
3412 1.48 oster }
3413 1.48 oster return(new_value);
3414 1.48 oster }
3415 1.48 oster
3416 1.48 oster void
3417 1.169 oster rf_release_all_vps(RF_ConfigSet_t *cset)
3418 1.48 oster {
3419 1.48 oster RF_AutoConfig_t *ac;
3420 1.186 perry
3421 1.48 oster ac = cset->ac;
3422 1.48 oster while(ac!=NULL) {
3423 1.48 oster /* Close the vp, and give it back */
3424 1.48 oster if (ac->vp) {
3425 1.96 oster vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3426 1.316.2.5 skrll VOP_CLOSE(ac->vp, FREAD | FWRITE, NOCRED);
3427 1.48 oster vput(ac->vp);
3428 1.86 oster ac->vp = NULL;
3429 1.48 oster }
3430 1.48 oster ac = ac->next;
3431 1.48 oster }
3432 1.48 oster }
3433 1.48 oster
3434 1.48 oster
3435 1.48 oster void
3436 1.169 oster rf_cleanup_config_set(RF_ConfigSet_t *cset)
3437 1.48 oster {
3438 1.48 oster RF_AutoConfig_t *ac;
3439 1.48 oster RF_AutoConfig_t *next_ac;
3440 1.186 perry
3441 1.48 oster ac = cset->ac;
3442 1.48 oster while(ac!=NULL) {
3443 1.48 oster next_ac = ac->next;
3444 1.48 oster /* nuke the label */
3445 1.48 oster free(ac->clabel, M_RAIDFRAME);
3446 1.48 oster /* cleanup the config structure */
3447 1.48 oster free(ac, M_RAIDFRAME);
3448 1.48 oster /* "next.." */
3449 1.48 oster ac = next_ac;
3450 1.48 oster }
3451 1.48 oster /* and, finally, nuke the config set */
3452 1.48 oster free(cset, M_RAIDFRAME);
3453 1.48 oster }
3454 1.48 oster
3455 1.48 oster
3456 1.48 oster void
3457 1.169 oster raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3458 1.48 oster {
3459 1.48 oster /* current version number */
3460 1.186 perry clabel->version = RF_COMPONENT_LABEL_VERSION;
3461 1.57 oster clabel->serial_number = raidPtr->serial_number;
3462 1.48 oster clabel->mod_counter = raidPtr->mod_counter;
3463 1.269 jld
3464 1.166 oster clabel->num_rows = 1;
3465 1.48 oster clabel->num_columns = raidPtr->numCol;
3466 1.48 oster clabel->clean = RF_RAID_DIRTY; /* not clean */
3467 1.48 oster clabel->status = rf_ds_optimal; /* "It's good!" */
3468 1.186 perry
3469 1.48 oster clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3470 1.48 oster clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3471 1.48 oster clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3472 1.54 oster
3473 1.54 oster clabel->blockSize = raidPtr->bytesPerSector;
3474 1.282 enami rf_component_label_set_numblocks(clabel, raidPtr->sectorsPerDisk);
3475 1.54 oster
3476 1.48 oster /* XXX not portable */
3477 1.48 oster clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3478 1.54 oster clabel->maxOutstanding = raidPtr->maxOutstanding;
3479 1.54 oster clabel->autoconfigure = raidPtr->autoconfigure;
3480 1.54 oster clabel->root_partition = raidPtr->root_partition;
3481 1.48 oster clabel->last_unit = raidPtr->raidid;
3482 1.54 oster clabel->config_order = raidPtr->config_order;
3483 1.269 jld
3484 1.269 jld #ifndef RF_NO_PARITY_MAP
3485 1.269 jld rf_paritymap_init_label(raidPtr->parity_map, clabel);
3486 1.269 jld #endif
3487 1.51 oster }
3488 1.51 oster
3489 1.300 christos struct raid_softc *
3490 1.300 christos rf_auto_config_set(RF_ConfigSet_t *cset)
3491 1.51 oster {
3492 1.51 oster RF_Raid_t *raidPtr;
3493 1.51 oster RF_Config_t *config;
3494 1.51 oster int raidID;
3495 1.300 christos struct raid_softc *sc;
3496 1.51 oster
3497 1.224 oster #ifdef DEBUG
3498 1.72 oster printf("RAID autoconfigure\n");
3499 1.127 oster #endif
3500 1.51 oster
3501 1.51 oster /* 1. Create a config structure */
3502 1.300 christos config = malloc(sizeof(*config), M_RAIDFRAME, M_NOWAIT|M_ZERO);
3503 1.300 christos if (config == NULL) {
3504 1.316.2.4 skrll printf("%s: Out of mem - config!?!?\n", __func__);
3505 1.51 oster /* XXX do something more intelligent here. */
3506 1.300 christos return NULL;
3507 1.51 oster }
3508 1.77 oster
3509 1.186 perry /*
3510 1.186 perry 2. Figure out what RAID ID this one is supposed to live at
3511 1.51 oster See if we can get the same RAID dev that it was configured
3512 1.186 perry on last time..
3513 1.51 oster */
3514 1.51 oster
3515 1.51 oster raidID = cset->ac->clabel->last_unit;
3516 1.316.2.4 skrll for (sc = raidget(raidID, false); sc && sc->sc_r.valid != 0;
3517 1.316.2.4 skrll sc = raidget(++raidID, false))
3518 1.300 christos continue;
3519 1.224 oster #ifdef DEBUG
3520 1.72 oster printf("Configuring raid%d:\n",raidID);
3521 1.127 oster #endif
3522 1.127 oster
3523 1.316.2.4 skrll if (sc == NULL)
3524 1.316.2.4 skrll sc = raidget(raidID, true);
3525 1.316.2.4 skrll if (sc == NULL) {
3526 1.316.2.4 skrll printf("%s: Out of mem - softc!?!?\n", __func__);
3527 1.316.2.4 skrll /* XXX do something more intelligent here. */
3528 1.316.2.4 skrll free(config, M_RAIDFRAME);
3529 1.316.2.4 skrll return NULL;
3530 1.316.2.4 skrll }
3531 1.316.2.4 skrll
3532 1.300 christos raidPtr = &sc->sc_r;
3533 1.51 oster
3534 1.51 oster /* XXX all this stuff should be done SOMEWHERE ELSE! */
3535 1.302 christos raidPtr->softc = sc;
3536 1.51 oster raidPtr->raidid = raidID;
3537 1.51 oster raidPtr->openings = RAIDOUTSTANDING;
3538 1.51 oster
3539 1.51 oster /* 3. Build the configuration structure */
3540 1.51 oster rf_create_configuration(cset->ac, config, raidPtr);
3541 1.51 oster
3542 1.51 oster /* 4. Do the configuration */
3543 1.300 christos if (rf_Configure(raidPtr, config, cset->ac) == 0) {
3544 1.300 christos raidinit(sc);
3545 1.186 perry
3546 1.300 christos rf_markalldirty(raidPtr);
3547 1.300 christos raidPtr->autoconfigure = 1; /* XXX do this here? */
3548 1.308 christos switch (cset->ac->clabel->root_partition) {
3549 1.308 christos case 1: /* Force Root */
3550 1.308 christos case 2: /* Soft Root: root when boot partition part of raid */
3551 1.308 christos /*
3552 1.308 christos * everything configured just fine. Make a note
3553 1.308 christos * that this set is eligible to be root,
3554 1.308 christos * or forced to be root
3555 1.308 christos */
3556 1.308 christos cset->rootable = cset->ac->clabel->root_partition;
3557 1.54 oster /* XXX do this here? */
3558 1.308 christos raidPtr->root_partition = cset->rootable;
3559 1.308 christos break;
3560 1.308 christos default:
3561 1.308 christos break;
3562 1.51 oster }
3563 1.300 christos } else {
3564 1.300 christos raidput(sc);
3565 1.300 christos sc = NULL;
3566 1.51 oster }
3567 1.51 oster
3568 1.51 oster /* 5. Cleanup */
3569 1.51 oster free(config, M_RAIDFRAME);
3570 1.300 christos return sc;
3571 1.99 oster }
3572 1.99 oster
3573 1.99 oster void
3574 1.187 christos rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3575 1.187 christos size_t xmin, size_t xmax)
3576 1.177 oster {
3577 1.227 ad pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3578 1.187 christos pool_sethiwat(p, xmax);
3579 1.187 christos pool_prime(p, xmin);
3580 1.187 christos pool_setlowat(p, xmin);
3581 1.177 oster }
3582 1.190 oster
3583 1.190 oster /*
3584 1.316.2.5 skrll * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
3585 1.316.2.5 skrll * to see if there is IO pending and if that IO could possibly be done
3586 1.316.2.5 skrll * for a given RAID set. Returns 0 if IO is waiting and can be done, 1
3587 1.190 oster * otherwise.
3588 1.190 oster *
3589 1.190 oster */
3590 1.190 oster int
3591 1.300 christos rf_buf_queue_check(RF_Raid_t *raidPtr)
3592 1.190 oster {
3593 1.316.2.5 skrll struct raid_softc *rs;
3594 1.316.2.5 skrll struct dk_softc *dksc;
3595 1.316.2.5 skrll
3596 1.316.2.5 skrll rs = raidPtr->softc;
3597 1.316.2.5 skrll dksc = &rs->sc_dksc;
3598 1.316.2.5 skrll
3599 1.316.2.5 skrll if ((rs->sc_flags & RAIDF_INITED) == 0)
3600 1.316.2.5 skrll return 1;
3601 1.316.2.5 skrll
3602 1.316.2.5 skrll if (dk_strategy_pending(dksc) && raidPtr->openings > 0) {
3603 1.190 oster /* there is work to do */
3604 1.190 oster return 0;
3605 1.316.2.2 skrll }
3606 1.190 oster /* default is nothing to do */
3607 1.190 oster return 1;
3608 1.190 oster }
3609 1.213 christos
3610 1.213 christos int
3611 1.294 oster rf_getdisksize(struct vnode *vp, RF_RaidDisk_t *diskPtr)
3612 1.213 christos {
3613 1.275 mrg uint64_t numsecs;
3614 1.275 mrg unsigned secsize;
3615 1.213 christos int error;
3616 1.213 christos
3617 1.275 mrg error = getdisksize(vp, &numsecs, &secsize);
3618 1.213 christos if (error == 0) {
3619 1.275 mrg diskPtr->blockSize = secsize;
3620 1.275 mrg diskPtr->numBlocks = numsecs - rf_protectedSectors;
3621 1.275 mrg diskPtr->partitionSize = numsecs;
3622 1.213 christos return 0;
3623 1.213 christos }
3624 1.213 christos return error;
3625 1.213 christos }
3626 1.217 oster
3627 1.217 oster static int
3628 1.261 dyoung raid_match(device_t self, cfdata_t cfdata, void *aux)
3629 1.217 oster {
3630 1.217 oster return 1;
3631 1.217 oster }
3632 1.217 oster
3633 1.217 oster static void
3634 1.261 dyoung raid_attach(device_t parent, device_t self, void *aux)
3635 1.217 oster {
3636 1.217 oster }
3637 1.217 oster
3638 1.217 oster
3639 1.217 oster static int
3640 1.261 dyoung raid_detach(device_t self, int flags)
3641 1.217 oster {
3642 1.266 dyoung int error;
3643 1.316.2.5 skrll struct raid_softc *rs = raidsoftc(self);
3644 1.303 christos
3645 1.303 christos if (rs == NULL)
3646 1.303 christos return ENXIO;
3647 1.266 dyoung
3648 1.266 dyoung if ((error = raidlock(rs)) != 0)
3649 1.266 dyoung return (error);
3650 1.217 oster
3651 1.266 dyoung error = raid_detach_unlocked(rs);
3652 1.266 dyoung
3653 1.316.2.5 skrll raidunlock(rs);
3654 1.303 christos
3655 1.316.2.5 skrll /* XXX raid can be referenced here */
3656 1.316.2.5 skrll
3657 1.316.2.5 skrll if (error)
3658 1.316.2.5 skrll return error;
3659 1.316.2.5 skrll
3660 1.316.2.5 skrll /* Free the softc */
3661 1.316.2.5 skrll raidput(rs);
3662 1.316.2.5 skrll
3663 1.316.2.5 skrll return 0;
3664 1.217 oster }
3665 1.217 oster
3666 1.234 oster static void
3667 1.304 christos rf_set_geometry(struct raid_softc *rs, RF_Raid_t *raidPtr)
3668 1.234 oster {
3669 1.316.2.5 skrll struct dk_softc *dksc = &rs->sc_dksc;
3670 1.316.2.5 skrll struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
3671 1.304 christos
3672 1.304 christos memset(dg, 0, sizeof(*dg));
3673 1.304 christos
3674 1.304 christos dg->dg_secperunit = raidPtr->totalSectors;
3675 1.304 christos dg->dg_secsize = raidPtr->bytesPerSector;
3676 1.304 christos dg->dg_nsectors = raidPtr->Layout.dataSectorsPerStripe;
3677 1.304 christos dg->dg_ntracks = 4 * raidPtr->numCol;
3678 1.304 christos
3679 1.316.2.5 skrll disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
3680 1.234 oster }
3681 1.252 oster
3682 1.316.2.2 skrll /*
3683 1.252 oster * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3684 1.252 oster * We end up returning whatever error was returned by the first cache flush
3685 1.252 oster * that fails.
3686 1.252 oster */
3687 1.252 oster
3688 1.269 jld int
3689 1.252 oster rf_sync_component_caches(RF_Raid_t *raidPtr)
3690 1.252 oster {
3691 1.252 oster int c, sparecol;
3692 1.252 oster int e,error;
3693 1.252 oster int force = 1;
3694 1.316.2.2 skrll
3695 1.252 oster error = 0;
3696 1.252 oster for (c = 0; c < raidPtr->numCol; c++) {
3697 1.252 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
3698 1.316.2.2 skrll e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3699 1.252 oster &force, FWRITE, NOCRED);
3700 1.252 oster if (e) {
3701 1.255 oster if (e != ENODEV)
3702 1.255 oster printf("raid%d: cache flush to component %s failed.\n",
3703 1.255 oster raidPtr->raidid, raidPtr->Disks[c].devname);
3704 1.252 oster if (error == 0) {
3705 1.252 oster error = e;
3706 1.252 oster }
3707 1.252 oster }
3708 1.252 oster }
3709 1.252 oster }
3710 1.252 oster
3711 1.252 oster for( c = 0; c < raidPtr->numSpare ; c++) {
3712 1.252 oster sparecol = raidPtr->numCol + c;
3713 1.252 oster /* Need to ensure that the reconstruct actually completed! */
3714 1.252 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3715 1.252 oster e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3716 1.252 oster DIOCCACHESYNC, &force, FWRITE, NOCRED);
3717 1.252 oster if (e) {
3718 1.255 oster if (e != ENODEV)
3719 1.255 oster printf("raid%d: cache flush to component %s failed.\n",
3720 1.255 oster raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3721 1.252 oster if (error == 0) {
3722 1.252 oster error = e;
3723 1.252 oster }
3724 1.252 oster }
3725 1.252 oster }
3726 1.252 oster }
3727 1.252 oster return error;
3728 1.252 oster }
3729 1.316.2.4 skrll
3730 1.316.2.4 skrll /*
3731 1.316.2.4 skrll * Module interface
3732 1.316.2.4 skrll */
3733 1.316.2.4 skrll
3734 1.316.2.4 skrll MODULE(MODULE_CLASS_DRIVER, raid, "dk_subr");
3735 1.316.2.4 skrll
3736 1.316.2.4 skrll #ifdef _MODULE
3737 1.316.2.4 skrll CFDRIVER_DECL(raid, DV_DISK, NULL);
3738 1.316.2.4 skrll #endif
3739 1.316.2.4 skrll
3740 1.316.2.4 skrll static int raid_modcmd(modcmd_t, void *);
3741 1.316.2.4 skrll static int raid_modcmd_init(void);
3742 1.316.2.4 skrll static int raid_modcmd_fini(void);
3743 1.316.2.4 skrll
3744 1.316.2.4 skrll static int
3745 1.316.2.4 skrll raid_modcmd(modcmd_t cmd, void *data)
3746 1.316.2.4 skrll {
3747 1.316.2.4 skrll int error;
3748 1.316.2.4 skrll
3749 1.316.2.4 skrll error = 0;
3750 1.316.2.4 skrll switch (cmd) {
3751 1.316.2.4 skrll case MODULE_CMD_INIT:
3752 1.316.2.4 skrll error = raid_modcmd_init();
3753 1.316.2.4 skrll break;
3754 1.316.2.4 skrll case MODULE_CMD_FINI:
3755 1.316.2.4 skrll error = raid_modcmd_fini();
3756 1.316.2.4 skrll break;
3757 1.316.2.4 skrll default:
3758 1.316.2.4 skrll error = ENOTTY;
3759 1.316.2.4 skrll break;
3760 1.316.2.4 skrll }
3761 1.316.2.4 skrll return error;
3762 1.316.2.4 skrll }
3763 1.316.2.4 skrll
3764 1.316.2.4 skrll static int
3765 1.316.2.4 skrll raid_modcmd_init(void)
3766 1.316.2.4 skrll {
3767 1.316.2.4 skrll int error;
3768 1.316.2.4 skrll int bmajor, cmajor;
3769 1.316.2.4 skrll
3770 1.316.2.4 skrll mutex_init(&raid_lock, MUTEX_DEFAULT, IPL_NONE);
3771 1.316.2.4 skrll mutex_enter(&raid_lock);
3772 1.316.2.4 skrll #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
3773 1.316.2.4 skrll rf_init_mutex2(rf_sparet_wait_mutex, IPL_VM);
3774 1.316.2.4 skrll rf_init_cond2(rf_sparet_wait_cv, "sparetw");
3775 1.316.2.4 skrll rf_init_cond2(rf_sparet_resp_cv, "rfgst");
3776 1.316.2.4 skrll
3777 1.316.2.4 skrll rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
3778 1.316.2.4 skrll #endif
3779 1.316.2.4 skrll
3780 1.316.2.4 skrll bmajor = cmajor = -1;
3781 1.316.2.4 skrll error = devsw_attach("raid", &raid_bdevsw, &bmajor,
3782 1.316.2.4 skrll &raid_cdevsw, &cmajor);
3783 1.316.2.4 skrll if (error != 0 && error != EEXIST) {
3784 1.316.2.4 skrll aprint_error("%s: devsw_attach failed %d\n", __func__, error);
3785 1.316.2.4 skrll mutex_exit(&raid_lock);
3786 1.316.2.4 skrll return error;
3787 1.316.2.4 skrll }
3788 1.316.2.4 skrll #ifdef _MODULE
3789 1.316.2.4 skrll error = config_cfdriver_attach(&raid_cd);
3790 1.316.2.4 skrll if (error != 0) {
3791 1.316.2.4 skrll aprint_error("%s: config_cfdriver_attach failed %d\n",
3792 1.316.2.4 skrll __func__, error);
3793 1.316.2.4 skrll devsw_detach(&raid_bdevsw, &raid_cdevsw);
3794 1.316.2.4 skrll mutex_exit(&raid_lock);
3795 1.316.2.4 skrll return error;
3796 1.316.2.4 skrll }
3797 1.316.2.4 skrll #endif
3798 1.316.2.4 skrll error = config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3799 1.316.2.4 skrll if (error != 0) {
3800 1.316.2.4 skrll aprint_error("%s: config_cfattach_attach failed %d\n",
3801 1.316.2.4 skrll __func__, error);
3802 1.316.2.4 skrll #ifdef _MODULE
3803 1.316.2.4 skrll config_cfdriver_detach(&raid_cd);
3804 1.316.2.4 skrll #endif
3805 1.316.2.4 skrll devsw_detach(&raid_bdevsw, &raid_cdevsw);
3806 1.316.2.4 skrll mutex_exit(&raid_lock);
3807 1.316.2.4 skrll return error;
3808 1.316.2.4 skrll }
3809 1.316.2.4 skrll
3810 1.316.2.4 skrll raidautoconfigdone = false;
3811 1.316.2.4 skrll
3812 1.316.2.4 skrll mutex_exit(&raid_lock);
3813 1.316.2.4 skrll
3814 1.316.2.4 skrll if (error == 0) {
3815 1.316.2.4 skrll if (rf_BootRaidframe(true) == 0)
3816 1.316.2.4 skrll aprint_verbose("Kernelized RAIDframe activated\n");
3817 1.316.2.4 skrll else
3818 1.316.2.4 skrll panic("Serious error activating RAID!!");
3819 1.316.2.4 skrll }
3820 1.316.2.4 skrll
3821 1.316.2.4 skrll /*
3822 1.316.2.4 skrll * Register a finalizer which will be used to auto-config RAID
3823 1.316.2.4 skrll * sets once all real hardware devices have been found.
3824 1.316.2.4 skrll */
3825 1.316.2.4 skrll error = config_finalize_register(NULL, rf_autoconfig);
3826 1.316.2.4 skrll if (error != 0) {
3827 1.316.2.4 skrll aprint_error("WARNING: unable to register RAIDframe "
3828 1.316.2.4 skrll "finalizer\n");
3829 1.316.2.5 skrll error = 0;
3830 1.316.2.4 skrll }
3831 1.316.2.4 skrll
3832 1.316.2.4 skrll return error;
3833 1.316.2.4 skrll }
3834 1.316.2.4 skrll
3835 1.316.2.4 skrll static int
3836 1.316.2.4 skrll raid_modcmd_fini(void)
3837 1.316.2.4 skrll {
3838 1.316.2.4 skrll int error;
3839 1.316.2.4 skrll
3840 1.316.2.4 skrll mutex_enter(&raid_lock);
3841 1.316.2.4 skrll
3842 1.316.2.4 skrll /* Don't allow unload if raid device(s) exist. */
3843 1.316.2.4 skrll if (!LIST_EMPTY(&raids)) {
3844 1.316.2.4 skrll mutex_exit(&raid_lock);
3845 1.316.2.4 skrll return EBUSY;
3846 1.316.2.4 skrll }
3847 1.316.2.4 skrll
3848 1.316.2.4 skrll error = config_cfattach_detach(raid_cd.cd_name, &raid_ca);
3849 1.316.2.4 skrll if (error != 0) {
3850 1.316.2.5 skrll aprint_error("%s: cannot detach cfattach\n",__func__);
3851 1.316.2.4 skrll mutex_exit(&raid_lock);
3852 1.316.2.4 skrll return error;
3853 1.316.2.4 skrll }
3854 1.316.2.4 skrll #ifdef _MODULE
3855 1.316.2.4 skrll error = config_cfdriver_detach(&raid_cd);
3856 1.316.2.4 skrll if (error != 0) {
3857 1.316.2.5 skrll aprint_error("%s: cannot detach cfdriver\n",__func__);
3858 1.316.2.4 skrll config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3859 1.316.2.4 skrll mutex_exit(&raid_lock);
3860 1.316.2.4 skrll return error;
3861 1.316.2.4 skrll }
3862 1.316.2.4 skrll #endif
3863 1.316.2.4 skrll error = devsw_detach(&raid_bdevsw, &raid_cdevsw);
3864 1.316.2.4 skrll if (error != 0) {
3865 1.316.2.5 skrll aprint_error("%s: cannot detach devsw\n",__func__);
3866 1.316.2.4 skrll #ifdef _MODULE
3867 1.316.2.4 skrll config_cfdriver_attach(&raid_cd);
3868 1.316.2.4 skrll #endif
3869 1.316.2.4 skrll config_cfattach_attach(raid_cd.cd_name, &raid_ca);
3870 1.316.2.4 skrll mutex_exit(&raid_lock);
3871 1.316.2.4 skrll return error;
3872 1.316.2.4 skrll }
3873 1.316.2.4 skrll rf_BootRaidframe(false);
3874 1.316.2.4 skrll #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
3875 1.316.2.4 skrll rf_destroy_mutex2(rf_sparet_wait_mutex);
3876 1.316.2.4 skrll rf_destroy_cond2(rf_sparet_wait_cv);
3877 1.316.2.4 skrll rf_destroy_cond2(rf_sparet_resp_cv);
3878 1.316.2.4 skrll #endif
3879 1.316.2.4 skrll mutex_exit(&raid_lock);
3880 1.316.2.4 skrll mutex_destroy(&raid_lock);
3881 1.316.2.4 skrll
3882 1.316.2.4 skrll return error;
3883 1.316.2.4 skrll }
3884