rf_netbsdkintf.c revision 1.245.4.6 1 1.245.4.6 yamt /* $NetBSD: rf_netbsdkintf.c,v 1.245.4.6 2010/03/11 15:04:01 yamt Exp $ */
2 1.1 oster /*-
3 1.244 ad * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
7 1.1 oster * by Greg Oster; Jason R. Thorpe.
8 1.1 oster *
9 1.1 oster * Redistribution and use in source and binary forms, with or without
10 1.1 oster * modification, are permitted provided that the following conditions
11 1.1 oster * are met:
12 1.1 oster * 1. Redistributions of source code must retain the above copyright
13 1.1 oster * notice, this list of conditions and the following disclaimer.
14 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 oster * notice, this list of conditions and the following disclaimer in the
16 1.1 oster * documentation and/or other materials provided with the distribution.
17 1.1 oster *
18 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
29 1.1 oster */
30 1.1 oster
31 1.1 oster /*
32 1.1 oster * Copyright (c) 1990, 1993
33 1.1 oster * The Regents of the University of California. All rights reserved.
34 1.1 oster *
35 1.1 oster * This code is derived from software contributed to Berkeley by
36 1.1 oster * the Systems Programming Group of the University of Utah Computer
37 1.1 oster * Science Department.
38 1.1 oster *
39 1.1 oster * Redistribution and use in source and binary forms, with or without
40 1.1 oster * modification, are permitted provided that the following conditions
41 1.1 oster * are met:
42 1.1 oster * 1. Redistributions of source code must retain the above copyright
43 1.1 oster * notice, this list of conditions and the following disclaimer.
44 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
45 1.1 oster * notice, this list of conditions and the following disclaimer in the
46 1.1 oster * documentation and/or other materials provided with the distribution.
47 1.162 agc * 3. Neither the name of the University nor the names of its contributors
48 1.162 agc * may be used to endorse or promote products derived from this software
49 1.162 agc * without specific prior written permission.
50 1.162 agc *
51 1.162 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 1.162 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 1.162 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 1.162 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 1.162 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 1.162 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 1.162 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 1.162 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 1.162 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 1.162 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 1.162 agc * SUCH DAMAGE.
62 1.162 agc *
63 1.162 agc * from: Utah $Hdr: cd.c 1.6 90/11/28$
64 1.162 agc *
65 1.162 agc * @(#)cd.c 8.2 (Berkeley) 11/16/93
66 1.162 agc */
67 1.162 agc
68 1.162 agc /*
69 1.162 agc * Copyright (c) 1988 University of Utah.
70 1.162 agc *
71 1.162 agc * This code is derived from software contributed to Berkeley by
72 1.162 agc * the Systems Programming Group of the University of Utah Computer
73 1.162 agc * Science Department.
74 1.162 agc *
75 1.162 agc * Redistribution and use in source and binary forms, with or without
76 1.162 agc * modification, are permitted provided that the following conditions
77 1.162 agc * are met:
78 1.162 agc * 1. Redistributions of source code must retain the above copyright
79 1.162 agc * notice, this list of conditions and the following disclaimer.
80 1.162 agc * 2. Redistributions in binary form must reproduce the above copyright
81 1.162 agc * notice, this list of conditions and the following disclaimer in the
82 1.162 agc * documentation and/or other materials provided with the distribution.
83 1.1 oster * 3. All advertising materials mentioning features or use of this software
84 1.1 oster * must display the following acknowledgement:
85 1.1 oster * This product includes software developed by the University of
86 1.1 oster * California, Berkeley and its contributors.
87 1.1 oster * 4. Neither the name of the University nor the names of its contributors
88 1.1 oster * may be used to endorse or promote products derived from this software
89 1.1 oster * without specific prior written permission.
90 1.1 oster *
91 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 1.1 oster * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 1.1 oster * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 1.1 oster * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 1.1 oster * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 1.1 oster * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 1.1 oster * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 1.1 oster * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 1.1 oster * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 1.1 oster * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 1.1 oster * SUCH DAMAGE.
102 1.1 oster *
103 1.1 oster * from: Utah $Hdr: cd.c 1.6 90/11/28$
104 1.1 oster *
105 1.1 oster * @(#)cd.c 8.2 (Berkeley) 11/16/93
106 1.1 oster */
107 1.1 oster
108 1.1 oster /*
109 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
110 1.1 oster * All rights reserved.
111 1.1 oster *
112 1.1 oster * Authors: Mark Holland, Jim Zelenka
113 1.1 oster *
114 1.1 oster * Permission to use, copy, modify and distribute this software and
115 1.1 oster * its documentation is hereby granted, provided that both the copyright
116 1.1 oster * notice and this permission notice appear in all copies of the
117 1.1 oster * software, derivative works or modified versions, and any portions
118 1.1 oster * thereof, and that both notices appear in supporting documentation.
119 1.1 oster *
120 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
121 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
122 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
123 1.1 oster *
124 1.1 oster * Carnegie Mellon requests users of this software to return to
125 1.1 oster *
126 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
127 1.1 oster * School of Computer Science
128 1.1 oster * Carnegie Mellon University
129 1.1 oster * Pittsburgh PA 15213-3890
130 1.1 oster *
131 1.1 oster * any improvements or extensions that they make and grant Carnegie the
132 1.1 oster * rights to redistribute these changes.
133 1.1 oster */
134 1.1 oster
135 1.1 oster /***********************************************************
136 1.1 oster *
137 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
138 1.1 oster *
139 1.1 oster ***********************************************************/
140 1.112 lukem
141 1.112 lukem #include <sys/cdefs.h>
142 1.245.4.6 yamt __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.245.4.6 2010/03/11 15:04:01 yamt Exp $");
143 1.245.4.2 yamt
144 1.245.4.2 yamt #ifdef _KERNEL_OPT
145 1.245.4.2 yamt #include "opt_compat_netbsd.h"
146 1.245.4.2 yamt #include "opt_raid_autoconfig.h"
147 1.245.4.2 yamt #include "raid.h"
148 1.245.4.2 yamt #endif
149 1.1 oster
150 1.113 lukem #include <sys/param.h>
151 1.1 oster #include <sys/errno.h>
152 1.1 oster #include <sys/pool.h>
153 1.152 thorpej #include <sys/proc.h>
154 1.1 oster #include <sys/queue.h>
155 1.1 oster #include <sys/disk.h>
156 1.1 oster #include <sys/device.h>
157 1.1 oster #include <sys/stat.h>
158 1.1 oster #include <sys/ioctl.h>
159 1.1 oster #include <sys/fcntl.h>
160 1.1 oster #include <sys/systm.h>
161 1.1 oster #include <sys/vnode.h>
162 1.1 oster #include <sys/disklabel.h>
163 1.1 oster #include <sys/conf.h>
164 1.1 oster #include <sys/buf.h>
165 1.182 yamt #include <sys/bufq.h>
166 1.65 oster #include <sys/reboot.h>
167 1.208 elad #include <sys/kauth.h>
168 1.8 oster
169 1.234 oster #include <prop/proplib.h>
170 1.234 oster
171 1.110 oster #include <dev/raidframe/raidframevar.h>
172 1.110 oster #include <dev/raidframe/raidframeio.h>
173 1.245.4.6 yamt #include <dev/raidframe/rf_paritymap.h>
174 1.245.4.2 yamt
175 1.1 oster #include "rf_raid.h"
176 1.44 oster #include "rf_copyback.h"
177 1.1 oster #include "rf_dag.h"
178 1.1 oster #include "rf_dagflags.h"
179 1.99 oster #include "rf_desc.h"
180 1.1 oster #include "rf_diskqueue.h"
181 1.1 oster #include "rf_etimer.h"
182 1.1 oster #include "rf_general.h"
183 1.1 oster #include "rf_kintf.h"
184 1.1 oster #include "rf_options.h"
185 1.1 oster #include "rf_driver.h"
186 1.1 oster #include "rf_parityscan.h"
187 1.1 oster #include "rf_threadstuff.h"
188 1.1 oster
189 1.245.4.2 yamt #ifdef COMPAT_50
190 1.245.4.2 yamt #include "rf_compat50.h"
191 1.245.4.2 yamt #endif
192 1.245.4.2 yamt
193 1.133 oster #ifdef DEBUG
194 1.9 oster int rf_kdebug_level = 0;
195 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
196 1.9 oster #else /* DEBUG */
197 1.1 oster #define db1_printf(a) { }
198 1.9 oster #endif /* DEBUG */
199 1.1 oster
200 1.9 oster static RF_Raid_t **raidPtrs; /* global raid device descriptors */
201 1.1 oster
202 1.245.4.2 yamt #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
203 1.11 oster RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
204 1.1 oster
205 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
206 1.10 oster * spare table */
207 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
208 1.10 oster * installation process */
209 1.245.4.2 yamt #endif
210 1.153 thorpej
211 1.153 thorpej MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
212 1.10 oster
213 1.1 oster /* prototypes */
214 1.187 christos static void KernelWakeupFunc(struct buf *);
215 1.187 christos static void InitBP(struct buf *, struct vnode *, unsigned,
216 1.225 christos dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
217 1.187 christos void *, int, struct proc *);
218 1.219 oster static void raidinit(RF_Raid_t *);
219 1.1 oster
220 1.104 oster void raidattach(int);
221 1.245.4.2 yamt static int raid_match(device_t, cfdata_t, void *);
222 1.245.4.2 yamt static void raid_attach(device_t, device_t, void *);
223 1.245.4.2 yamt static int raid_detach(device_t, int);
224 1.130 gehenna
225 1.245.4.6 yamt static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
226 1.245.4.6 yamt daddr_t, daddr_t);
227 1.245.4.6 yamt static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
228 1.245.4.6 yamt daddr_t, daddr_t, int);
229 1.245.4.6 yamt
230 1.245.4.6 yamt static int raidwrite_component_label(dev_t, struct vnode *,
231 1.245.4.6 yamt RF_ComponentLabel_t *);
232 1.245.4.6 yamt static int raidread_component_label(dev_t, struct vnode *,
233 1.245.4.6 yamt RF_ComponentLabel_t *);
234 1.245.4.6 yamt
235 1.245.4.6 yamt
236 1.130 gehenna dev_type_open(raidopen);
237 1.130 gehenna dev_type_close(raidclose);
238 1.130 gehenna dev_type_read(raidread);
239 1.130 gehenna dev_type_write(raidwrite);
240 1.130 gehenna dev_type_ioctl(raidioctl);
241 1.130 gehenna dev_type_strategy(raidstrategy);
242 1.130 gehenna dev_type_dump(raiddump);
243 1.130 gehenna dev_type_size(raidsize);
244 1.130 gehenna
245 1.130 gehenna const struct bdevsw raid_bdevsw = {
246 1.130 gehenna raidopen, raidclose, raidstrategy, raidioctl,
247 1.130 gehenna raiddump, raidsize, D_DISK
248 1.130 gehenna };
249 1.130 gehenna
250 1.130 gehenna const struct cdevsw raid_cdevsw = {
251 1.130 gehenna raidopen, raidclose, raidread, raidwrite, raidioctl,
252 1.144 jdolecek nostop, notty, nopoll, nommap, nokqfilter, D_DISK
253 1.130 gehenna };
254 1.1 oster
255 1.235 oster static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
256 1.235 oster
257 1.9 oster /* XXX Not sure if the following should be replacing the raidPtrs above,
258 1.186 perry or if it should be used in conjunction with that...
259 1.59 oster */
260 1.1 oster
261 1.10 oster struct raid_softc {
262 1.245.4.2 yamt device_t sc_dev;
263 1.10 oster int sc_flags; /* flags */
264 1.10 oster int sc_cflags; /* configuration flags */
265 1.212 oster uint64_t sc_size; /* size of the raid device */
266 1.10 oster char sc_xname[20]; /* XXX external name */
267 1.10 oster struct disk sc_dkdev; /* generic disk device info */
268 1.191 yamt struct bufq_state *buf_queue; /* used for the device queue */
269 1.10 oster };
270 1.1 oster /* sc_flags */
271 1.1 oster #define RAIDF_INITED 0x01 /* unit has been initialized */
272 1.1 oster #define RAIDF_WLABEL 0x02 /* label area is writable */
273 1.1 oster #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
274 1.245.4.5 yamt #define RAIDF_SHUTDOWN 0x08 /* unit is being shutdown */
275 1.1 oster #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
276 1.1 oster #define RAIDF_LOCKED 0x80 /* unit is locked */
277 1.1 oster
278 1.1 oster #define raidunit(x) DISKUNIT(x)
279 1.48 oster int numraid = 0;
280 1.1 oster
281 1.202 oster extern struct cfdriver raid_cd;
282 1.245.4.5 yamt CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
283 1.245.4.5 yamt raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
284 1.245.4.5 yamt DVF_DETACH_SHUTDOWN);
285 1.202 oster
286 1.186 perry /*
287 1.186 perry * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
288 1.186 perry * Be aware that large numbers can allow the driver to consume a lot of
289 1.28 oster * kernel memory, especially on writes, and in degraded mode reads.
290 1.186 perry *
291 1.186 perry * For example: with a stripe width of 64 blocks (32k) and 5 disks,
292 1.186 perry * a single 64K write will typically require 64K for the old data,
293 1.186 perry * 64K for the old parity, and 64K for the new parity, for a total
294 1.28 oster * of 192K (if the parity buffer is not re-used immediately).
295 1.110 oster * Even it if is used immediately, that's still 128K, which when multiplied
296 1.28 oster * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
297 1.186 perry *
298 1.28 oster * Now in degraded mode, for example, a 64K read on the above setup may
299 1.186 perry * require data reconstruction, which will require *all* of the 4 remaining
300 1.28 oster * disks to participate -- 4 * 32K/disk == 128K again.
301 1.20 oster */
302 1.20 oster
303 1.20 oster #ifndef RAIDOUTSTANDING
304 1.28 oster #define RAIDOUTSTANDING 6
305 1.20 oster #endif
306 1.20 oster
307 1.1 oster #define RAIDLABELDEV(dev) \
308 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
309 1.1 oster
310 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
311 1.10 oster struct raid_softc *raid_softc;
312 1.9 oster
313 1.186 perry static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
314 1.104 oster struct disklabel *);
315 1.104 oster static void raidgetdisklabel(dev_t);
316 1.104 oster static void raidmakedisklabel(struct raid_softc *);
317 1.1 oster
318 1.104 oster static int raidlock(struct raid_softc *);
319 1.104 oster static void raidunlock(struct raid_softc *);
320 1.1 oster
321 1.245.4.5 yamt static int raid_detach_unlocked(struct raid_softc *);
322 1.245.4.5 yamt
323 1.104 oster static void rf_markalldirty(RF_Raid_t *);
324 1.234 oster static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
325 1.48 oster
326 1.104 oster void rf_ReconThread(struct rf_recon_req *);
327 1.104 oster void rf_RewriteParityThread(RF_Raid_t *raidPtr);
328 1.104 oster void rf_CopybackThread(RF_Raid_t *raidPtr);
329 1.104 oster void rf_ReconstructInPlaceThread(struct rf_recon_req *);
330 1.245.4.2 yamt int rf_autoconfig(device_t);
331 1.142 thorpej void rf_buildroothack(RF_ConfigSet_t *);
332 1.104 oster
333 1.104 oster RF_AutoConfig_t *rf_find_raid_components(void);
334 1.104 oster RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
335 1.104 oster static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
336 1.104 oster static int rf_reasonable_label(RF_ComponentLabel_t *);
337 1.104 oster void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
338 1.104 oster int rf_set_autoconfig(RF_Raid_t *, int);
339 1.104 oster int rf_set_rootpartition(RF_Raid_t *, int);
340 1.104 oster void rf_release_all_vps(RF_ConfigSet_t *);
341 1.104 oster void rf_cleanup_config_set(RF_ConfigSet_t *);
342 1.104 oster int rf_have_enough_components(RF_ConfigSet_t *);
343 1.104 oster int rf_auto_config_set(RF_ConfigSet_t *, int *);
344 1.48 oster
345 1.48 oster static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
346 1.62 oster allow autoconfig to take place.
347 1.204 simonb Note that this is overridden by having
348 1.204 simonb RAID_AUTOCONFIG as an option in the
349 1.204 simonb kernel config file. */
350 1.37 oster
351 1.177 oster struct RF_Pools_s rf_pools;
352 1.177 oster
353 1.10 oster void
354 1.169 oster raidattach(int num)
355 1.1 oster {
356 1.14 oster int raidID;
357 1.14 oster int i, rc;
358 1.1 oster
359 1.245.4.2 yamt aprint_debug("raidattach: Asked for %d units\n", num);
360 1.1 oster
361 1.1 oster if (num <= 0) {
362 1.1 oster #ifdef DIAGNOSTIC
363 1.1 oster panic("raidattach: count <= 0");
364 1.1 oster #endif
365 1.1 oster return;
366 1.1 oster }
367 1.9 oster /* This is where all the initialization stuff gets done. */
368 1.1 oster
369 1.50 oster numraid = num;
370 1.50 oster
371 1.1 oster /* Make some space for requested number of units... */
372 1.1 oster
373 1.167 oster RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
374 1.1 oster if (raidPtrs == NULL) {
375 1.141 provos panic("raidPtrs is NULL!!");
376 1.1 oster }
377 1.116 thorpej
378 1.245.4.2 yamt #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
379 1.168 oster rf_mutex_init(&rf_sparet_wait_mutex);
380 1.14 oster
381 1.14 oster rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
382 1.245.4.2 yamt #endif
383 1.14 oster
384 1.58 oster for (i = 0; i < num; i++)
385 1.14 oster raidPtrs[i] = NULL;
386 1.14 oster rc = rf_BootRaidframe();
387 1.14 oster if (rc == 0)
388 1.239 jmcneill aprint_normal("Kernelized RAIDframe activated\n");
389 1.14 oster else
390 1.141 provos panic("Serious error booting RAID!!");
391 1.14 oster
392 1.9 oster /* put together some datastructures like the CCD device does.. This
393 1.9 oster * lets us lock the device and what-not when it gets opened. */
394 1.1 oster
395 1.1 oster raid_softc = (struct raid_softc *)
396 1.48 oster malloc(num * sizeof(struct raid_softc),
397 1.48 oster M_RAIDFRAME, M_NOWAIT);
398 1.1 oster if (raid_softc == NULL) {
399 1.239 jmcneill aprint_error("WARNING: no memory for RAIDframe driver\n");
400 1.1 oster return;
401 1.1 oster }
402 1.50 oster
403 1.108 thorpej memset(raid_softc, 0, num * sizeof(struct raid_softc));
404 1.34 oster
405 1.9 oster for (raidID = 0; raidID < num; raidID++) {
406 1.191 yamt bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
407 1.48 oster
408 1.167 oster RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
409 1.11 oster (RF_Raid_t *));
410 1.9 oster if (raidPtrs[raidID] == NULL) {
411 1.239 jmcneill aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
412 1.39 oster numraid = raidID;
413 1.39 oster return;
414 1.1 oster }
415 1.1 oster }
416 1.48 oster
417 1.217 oster if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
418 1.239 jmcneill aprint_error("raidattach: config_cfattach_attach failed?\n");
419 1.217 oster }
420 1.217 oster
421 1.114 lukem #ifdef RAID_AUTOCONFIG
422 1.62 oster raidautoconfig = 1;
423 1.62 oster #endif
424 1.62 oster
425 1.142 thorpej /*
426 1.142 thorpej * Register a finalizer which will be used to auto-config RAID
427 1.142 thorpej * sets once all real hardware devices have been found.
428 1.142 thorpej */
429 1.142 thorpej if (config_finalize_register(NULL, rf_autoconfig) != 0)
430 1.239 jmcneill aprint_error("WARNING: unable to register RAIDframe finalizer\n");
431 1.142 thorpej }
432 1.142 thorpej
433 1.142 thorpej int
434 1.245.4.2 yamt rf_autoconfig(device_t self)
435 1.142 thorpej {
436 1.142 thorpej RF_AutoConfig_t *ac_list;
437 1.142 thorpej RF_ConfigSet_t *config_sets;
438 1.142 thorpej
439 1.142 thorpej if (raidautoconfig == 0)
440 1.142 thorpej return (0);
441 1.142 thorpej
442 1.142 thorpej /* XXX This code can only be run once. */
443 1.142 thorpej raidautoconfig = 0;
444 1.142 thorpej
445 1.48 oster /* 1. locate all RAID components on the system */
446 1.245.4.2 yamt aprint_debug("Searching for RAID components...\n");
447 1.48 oster ac_list = rf_find_raid_components();
448 1.48 oster
449 1.142 thorpej /* 2. Sort them into their respective sets. */
450 1.48 oster config_sets = rf_create_auto_sets(ac_list);
451 1.48 oster
452 1.142 thorpej /*
453 1.142 thorpej * 3. Evaluate each set andconfigure the valid ones.
454 1.142 thorpej * This gets done in rf_buildroothack().
455 1.142 thorpej */
456 1.142 thorpej rf_buildroothack(config_sets);
457 1.48 oster
458 1.213 christos return 1;
459 1.48 oster }
460 1.48 oster
461 1.48 oster void
462 1.142 thorpej rf_buildroothack(RF_ConfigSet_t *config_sets)
463 1.48 oster {
464 1.48 oster RF_ConfigSet_t *cset;
465 1.48 oster RF_ConfigSet_t *next_cset;
466 1.51 oster int retcode;
467 1.48 oster int raidID;
468 1.51 oster int rootID;
469 1.226 oster int col;
470 1.51 oster int num_root;
471 1.226 oster char *devname;
472 1.48 oster
473 1.101 oster rootID = 0;
474 1.51 oster num_root = 0;
475 1.48 oster cset = config_sets;
476 1.245.4.6 yamt while (cset != NULL) {
477 1.48 oster next_cset = cset->next;
478 1.186 perry if (rf_have_enough_components(cset) &&
479 1.51 oster cset->ac->clabel->autoconfigure==1) {
480 1.51 oster retcode = rf_auto_config_set(cset,&raidID);
481 1.51 oster if (!retcode) {
482 1.245.4.2 yamt aprint_debug("raid%d: configured ok\n", raidID);
483 1.51 oster if (cset->rootable) {
484 1.51 oster rootID = raidID;
485 1.51 oster num_root++;
486 1.51 oster }
487 1.51 oster } else {
488 1.51 oster /* The autoconfig didn't work :( */
489 1.245.4.2 yamt aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
490 1.51 oster rf_release_all_vps(cset);
491 1.48 oster }
492 1.48 oster } else {
493 1.186 perry /* we're not autoconfiguring this set...
494 1.48 oster release the associated resources */
495 1.49 oster rf_release_all_vps(cset);
496 1.48 oster }
497 1.48 oster /* cleanup */
498 1.49 oster rf_cleanup_config_set(cset);
499 1.48 oster cset = next_cset;
500 1.48 oster }
501 1.122 oster
502 1.223 oster /* if the user has specified what the root device should be
503 1.223 oster then we don't touch booted_device or boothowto... */
504 1.223 oster
505 1.223 oster if (rootspec != NULL)
506 1.223 oster return;
507 1.223 oster
508 1.122 oster /* we found something bootable... */
509 1.122 oster
510 1.122 oster if (num_root == 1) {
511 1.217 oster booted_device = raid_softc[rootID].sc_dev;
512 1.122 oster } else if (num_root > 1) {
513 1.226 oster
514 1.226 oster /*
515 1.226 oster * Maybe the MD code can help. If it cannot, then
516 1.226 oster * setroot() will discover that we have no
517 1.226 oster * booted_device and will ask the user if nothing was
518 1.226 oster * hardwired in the kernel config file
519 1.226 oster */
520 1.226 oster
521 1.226 oster if (booted_device == NULL)
522 1.226 oster cpu_rootconf();
523 1.226 oster if (booted_device == NULL)
524 1.226 oster return;
525 1.226 oster
526 1.226 oster num_root = 0;
527 1.226 oster for (raidID = 0; raidID < numraid; raidID++) {
528 1.226 oster if (raidPtrs[raidID]->valid == 0)
529 1.226 oster continue;
530 1.226 oster
531 1.226 oster if (raidPtrs[raidID]->root_partition == 0)
532 1.226 oster continue;
533 1.226 oster
534 1.226 oster for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
535 1.226 oster devname = raidPtrs[raidID]->Disks[col].devname;
536 1.226 oster devname += sizeof("/dev/") - 1;
537 1.245 cegger if (strncmp(devname, device_xname(booted_device),
538 1.245 cegger strlen(device_xname(booted_device))) != 0)
539 1.226 oster continue;
540 1.245.4.2 yamt aprint_debug("raid%d includes boot device %s\n",
541 1.226 oster raidID, devname);
542 1.226 oster num_root++;
543 1.226 oster rootID = raidID;
544 1.226 oster }
545 1.226 oster }
546 1.226 oster
547 1.226 oster if (num_root == 1) {
548 1.226 oster booted_device = raid_softc[rootID].sc_dev;
549 1.226 oster } else {
550 1.226 oster /* we can't guess.. require the user to answer... */
551 1.226 oster boothowto |= RB_ASKNAME;
552 1.226 oster }
553 1.51 oster }
554 1.1 oster }
555 1.1 oster
556 1.1 oster
557 1.1 oster int
558 1.169 oster raidsize(dev_t dev)
559 1.1 oster {
560 1.1 oster struct raid_softc *rs;
561 1.1 oster struct disklabel *lp;
562 1.9 oster int part, unit, omask, size;
563 1.1 oster
564 1.1 oster unit = raidunit(dev);
565 1.1 oster if (unit >= numraid)
566 1.1 oster return (-1);
567 1.1 oster rs = &raid_softc[unit];
568 1.1 oster
569 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
570 1.1 oster return (-1);
571 1.1 oster
572 1.1 oster part = DISKPART(dev);
573 1.1 oster omask = rs->sc_dkdev.dk_openmask & (1 << part);
574 1.1 oster lp = rs->sc_dkdev.dk_label;
575 1.1 oster
576 1.192 christos if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
577 1.1 oster return (-1);
578 1.1 oster
579 1.1 oster if (lp->d_partitions[part].p_fstype != FS_SWAP)
580 1.1 oster size = -1;
581 1.1 oster else
582 1.1 oster size = lp->d_partitions[part].p_size *
583 1.1 oster (lp->d_secsize / DEV_BSIZE);
584 1.1 oster
585 1.192 christos if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
586 1.1 oster return (-1);
587 1.1 oster
588 1.1 oster return (size);
589 1.1 oster
590 1.1 oster }
591 1.1 oster
592 1.1 oster int
593 1.231 oster raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
594 1.1 oster {
595 1.231 oster int unit = raidunit(dev);
596 1.231 oster struct raid_softc *rs;
597 1.231 oster const struct bdevsw *bdev;
598 1.231 oster struct disklabel *lp;
599 1.231 oster RF_Raid_t *raidPtr;
600 1.231 oster daddr_t offset;
601 1.231 oster int part, c, sparecol, j, scol, dumpto;
602 1.231 oster int error = 0;
603 1.231 oster
604 1.231 oster if (unit >= numraid)
605 1.231 oster return (ENXIO);
606 1.231 oster
607 1.231 oster rs = &raid_softc[unit];
608 1.231 oster raidPtr = raidPtrs[unit];
609 1.231 oster
610 1.231 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
611 1.231 oster return ENXIO;
612 1.231 oster
613 1.231 oster /* we only support dumping to RAID 1 sets */
614 1.231 oster if (raidPtr->Layout.numDataCol != 1 ||
615 1.231 oster raidPtr->Layout.numParityCol != 1)
616 1.231 oster return EINVAL;
617 1.231 oster
618 1.231 oster
619 1.231 oster if ((error = raidlock(rs)) != 0)
620 1.231 oster return error;
621 1.231 oster
622 1.231 oster if (size % DEV_BSIZE != 0) {
623 1.231 oster error = EINVAL;
624 1.231 oster goto out;
625 1.231 oster }
626 1.231 oster
627 1.231 oster if (blkno + size / DEV_BSIZE > rs->sc_size) {
628 1.231 oster printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
629 1.231 oster "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
630 1.231 oster size / DEV_BSIZE, rs->sc_size);
631 1.231 oster error = EINVAL;
632 1.231 oster goto out;
633 1.231 oster }
634 1.231 oster
635 1.231 oster part = DISKPART(dev);
636 1.231 oster lp = rs->sc_dkdev.dk_label;
637 1.231 oster offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
638 1.231 oster
639 1.231 oster /* figure out what device is alive.. */
640 1.231 oster
641 1.231 oster /*
642 1.231 oster Look for a component to dump to. The preference for the
643 1.231 oster component to dump to is as follows:
644 1.231 oster 1) the master
645 1.231 oster 2) a used_spare of the master
646 1.231 oster 3) the slave
647 1.231 oster 4) a used_spare of the slave
648 1.231 oster */
649 1.231 oster
650 1.231 oster dumpto = -1;
651 1.231 oster for (c = 0; c < raidPtr->numCol; c++) {
652 1.231 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
653 1.231 oster /* this might be the one */
654 1.231 oster dumpto = c;
655 1.231 oster break;
656 1.231 oster }
657 1.231 oster }
658 1.231 oster
659 1.231 oster /*
660 1.231 oster At this point we have possibly selected a live master or a
661 1.231 oster live slave. We now check to see if there is a spared
662 1.231 oster master (or a spared slave), if we didn't find a live master
663 1.231 oster or a live slave.
664 1.231 oster */
665 1.231 oster
666 1.231 oster for (c = 0; c < raidPtr->numSpare; c++) {
667 1.231 oster sparecol = raidPtr->numCol + c;
668 1.231 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
669 1.231 oster /* How about this one? */
670 1.231 oster scol = -1;
671 1.231 oster for(j=0;j<raidPtr->numCol;j++) {
672 1.231 oster if (raidPtr->Disks[j].spareCol == sparecol) {
673 1.231 oster scol = j;
674 1.231 oster break;
675 1.231 oster }
676 1.231 oster }
677 1.231 oster if (scol == 0) {
678 1.231 oster /*
679 1.231 oster We must have found a spared master!
680 1.231 oster We'll take that over anything else
681 1.231 oster found so far. (We couldn't have
682 1.231 oster found a real master before, since
683 1.231 oster this is a used spare, and it's
684 1.231 oster saying that it's replacing the
685 1.231 oster master.) On reboot (with
686 1.231 oster autoconfiguration turned on)
687 1.231 oster sparecol will become the 1st
688 1.231 oster component (component0) of this set.
689 1.231 oster */
690 1.231 oster dumpto = sparecol;
691 1.231 oster break;
692 1.231 oster } else if (scol != -1) {
693 1.231 oster /*
694 1.231 oster Must be a spared slave. We'll dump
695 1.231 oster to that if we havn't found anything
696 1.231 oster else so far.
697 1.231 oster */
698 1.231 oster if (dumpto == -1)
699 1.231 oster dumpto = sparecol;
700 1.231 oster }
701 1.231 oster }
702 1.231 oster }
703 1.231 oster
704 1.231 oster if (dumpto == -1) {
705 1.231 oster /* we couldn't find any live components to dump to!?!?
706 1.231 oster */
707 1.231 oster error = EINVAL;
708 1.231 oster goto out;
709 1.231 oster }
710 1.231 oster
711 1.231 oster bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
712 1.231 oster
713 1.231 oster /*
714 1.231 oster Note that blkno is relative to this particular partition.
715 1.231 oster By adding the offset of this partition in the RAID
716 1.231 oster set, and also adding RF_PROTECTED_SECTORS, we get a
717 1.231 oster value that is relative to the partition used for the
718 1.231 oster underlying component.
719 1.231 oster */
720 1.231 oster
721 1.231 oster error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
722 1.231 oster blkno + offset, va, size);
723 1.231 oster
724 1.231 oster out:
725 1.231 oster raidunlock(rs);
726 1.231 oster
727 1.231 oster return error;
728 1.1 oster }
729 1.1 oster /* ARGSUSED */
730 1.1 oster int
731 1.222 christos raidopen(dev_t dev, int flags, int fmt,
732 1.222 christos struct lwp *l)
733 1.1 oster {
734 1.9 oster int unit = raidunit(dev);
735 1.1 oster struct raid_softc *rs;
736 1.1 oster struct disklabel *lp;
737 1.9 oster int part, pmask;
738 1.9 oster int error = 0;
739 1.9 oster
740 1.1 oster if (unit >= numraid)
741 1.1 oster return (ENXIO);
742 1.1 oster rs = &raid_softc[unit];
743 1.1 oster
744 1.1 oster if ((error = raidlock(rs)) != 0)
745 1.9 oster return (error);
746 1.245.4.5 yamt
747 1.245.4.5 yamt if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
748 1.245.4.5 yamt error = EBUSY;
749 1.245.4.5 yamt goto bad;
750 1.245.4.5 yamt }
751 1.245.4.5 yamt
752 1.1 oster lp = rs->sc_dkdev.dk_label;
753 1.1 oster
754 1.1 oster part = DISKPART(dev);
755 1.213 christos
756 1.213 christos /*
757 1.213 christos * If there are wedges, and this is not RAW_PART, then we
758 1.213 christos * need to fail.
759 1.213 christos */
760 1.213 christos if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
761 1.213 christos error = EBUSY;
762 1.213 christos goto bad;
763 1.213 christos }
764 1.1 oster pmask = (1 << part);
765 1.1 oster
766 1.1 oster if ((rs->sc_flags & RAIDF_INITED) &&
767 1.1 oster (rs->sc_dkdev.dk_openmask == 0))
768 1.9 oster raidgetdisklabel(dev);
769 1.1 oster
770 1.1 oster /* make sure that this partition exists */
771 1.1 oster
772 1.1 oster if (part != RAW_PART) {
773 1.1 oster if (((rs->sc_flags & RAIDF_INITED) == 0) ||
774 1.1 oster ((part >= lp->d_npartitions) ||
775 1.9 oster (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
776 1.1 oster error = ENXIO;
777 1.213 christos goto bad;
778 1.1 oster }
779 1.1 oster }
780 1.1 oster /* Prevent this unit from being unconfigured while open. */
781 1.1 oster switch (fmt) {
782 1.1 oster case S_IFCHR:
783 1.1 oster rs->sc_dkdev.dk_copenmask |= pmask;
784 1.1 oster break;
785 1.1 oster
786 1.1 oster case S_IFBLK:
787 1.1 oster rs->sc_dkdev.dk_bopenmask |= pmask;
788 1.1 oster break;
789 1.1 oster }
790 1.13 oster
791 1.186 perry if ((rs->sc_dkdev.dk_openmask == 0) &&
792 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
793 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
794 1.13 oster have done a configure before this. I DO NOT WANT TO BE
795 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
796 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
797 1.13 oster /* XXX should check to see if we're only open for reading
798 1.13 oster here... If so, we needn't do this, but then need some
799 1.13 oster other way of keeping track of what's happened.. */
800 1.13 oster
801 1.245.4.6 yamt rf_markalldirty(raidPtrs[unit]);
802 1.13 oster }
803 1.13 oster
804 1.13 oster
805 1.1 oster rs->sc_dkdev.dk_openmask =
806 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
807 1.1 oster
808 1.213 christos bad:
809 1.1 oster raidunlock(rs);
810 1.1 oster
811 1.9 oster return (error);
812 1.1 oster
813 1.1 oster
814 1.1 oster }
815 1.1 oster /* ARGSUSED */
816 1.1 oster int
817 1.222 christos raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
818 1.1 oster {
819 1.9 oster int unit = raidunit(dev);
820 1.1 oster struct raid_softc *rs;
821 1.9 oster int error = 0;
822 1.9 oster int part;
823 1.1 oster
824 1.1 oster if (unit >= numraid)
825 1.1 oster return (ENXIO);
826 1.1 oster rs = &raid_softc[unit];
827 1.1 oster
828 1.1 oster if ((error = raidlock(rs)) != 0)
829 1.1 oster return (error);
830 1.1 oster
831 1.1 oster part = DISKPART(dev);
832 1.1 oster
833 1.1 oster /* ...that much closer to allowing unconfiguration... */
834 1.1 oster switch (fmt) {
835 1.1 oster case S_IFCHR:
836 1.1 oster rs->sc_dkdev.dk_copenmask &= ~(1 << part);
837 1.1 oster break;
838 1.1 oster
839 1.1 oster case S_IFBLK:
840 1.1 oster rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
841 1.1 oster break;
842 1.1 oster }
843 1.1 oster rs->sc_dkdev.dk_openmask =
844 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
845 1.186 perry
846 1.13 oster if ((rs->sc_dkdev.dk_openmask == 0) &&
847 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
848 1.186 perry /* Last one... device is not unconfigured yet.
849 1.186 perry Device shutdown has taken care of setting the
850 1.186 perry clean bits if RAIDF_INITED is not set
851 1.13 oster mark things as clean... */
852 1.147 oster
853 1.91 oster rf_update_component_labels(raidPtrs[unit],
854 1.91 oster RF_FINAL_COMPONENT_UPDATE);
855 1.186 perry
856 1.245.4.5 yamt /* If the kernel is shutting down, it will detach
857 1.245.4.5 yamt * this RAID set soon enough.
858 1.245.4.5 yamt */
859 1.13 oster }
860 1.1 oster
861 1.1 oster raidunlock(rs);
862 1.1 oster return (0);
863 1.1 oster
864 1.1 oster }
865 1.1 oster
866 1.1 oster void
867 1.169 oster raidstrategy(struct buf *bp)
868 1.1 oster {
869 1.74 augustss int s;
870 1.1 oster
871 1.1 oster unsigned int raidID = raidunit(bp->b_dev);
872 1.1 oster RF_Raid_t *raidPtr;
873 1.1 oster struct raid_softc *rs = &raid_softc[raidID];
874 1.9 oster int wlabel;
875 1.1 oster
876 1.30 oster if ((rs->sc_flags & RAIDF_INITED) ==0) {
877 1.30 oster bp->b_error = ENXIO;
878 1.196 yamt goto done;
879 1.30 oster }
880 1.1 oster if (raidID >= numraid || !raidPtrs[raidID]) {
881 1.1 oster bp->b_error = ENODEV;
882 1.196 yamt goto done;
883 1.1 oster }
884 1.1 oster raidPtr = raidPtrs[raidID];
885 1.1 oster if (!raidPtr->valid) {
886 1.1 oster bp->b_error = ENODEV;
887 1.196 yamt goto done;
888 1.1 oster }
889 1.1 oster if (bp->b_bcount == 0) {
890 1.1 oster db1_printf(("b_bcount is zero..\n"));
891 1.196 yamt goto done;
892 1.1 oster }
893 1.1 oster
894 1.1 oster /*
895 1.1 oster * Do bounds checking and adjust transfer. If there's an
896 1.1 oster * error, the bounds check will flag that for us.
897 1.1 oster */
898 1.1 oster
899 1.9 oster wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
900 1.196 yamt if (DISKPART(bp->b_dev) == RAW_PART) {
901 1.196 yamt uint64_t size; /* device size in DEV_BSIZE unit */
902 1.196 yamt
903 1.196 yamt if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
904 1.196 yamt size = raidPtr->totalSectors <<
905 1.196 yamt (raidPtr->logBytesPerSector - DEV_BSHIFT);
906 1.196 yamt } else {
907 1.196 yamt size = raidPtr->totalSectors >>
908 1.196 yamt (DEV_BSHIFT - raidPtr->logBytesPerSector);
909 1.196 yamt }
910 1.196 yamt if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
911 1.196 yamt goto done;
912 1.196 yamt }
913 1.196 yamt } else {
914 1.159 thorpej if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
915 1.1 oster db1_printf(("Bounds check failed!!:%d %d\n",
916 1.9 oster (int) bp->b_blkno, (int) wlabel));
917 1.196 yamt goto done;
918 1.1 oster }
919 1.196 yamt }
920 1.34 oster s = splbio();
921 1.1 oster
922 1.1 oster bp->b_resid = 0;
923 1.34 oster
924 1.34 oster /* stuff it onto our queue */
925 1.245.4.2 yamt bufq_put(rs->buf_queue, bp);
926 1.34 oster
927 1.190 oster /* scheduled the IO to happen at the next convenient time */
928 1.190 oster wakeup(&(raidPtrs[raidID]->iodone));
929 1.34 oster
930 1.1 oster splx(s);
931 1.196 yamt return;
932 1.196 yamt
933 1.196 yamt done:
934 1.196 yamt bp->b_resid = bp->b_bcount;
935 1.196 yamt biodone(bp);
936 1.1 oster }
937 1.1 oster /* ARGSUSED */
938 1.1 oster int
939 1.222 christos raidread(dev_t dev, struct uio *uio, int flags)
940 1.1 oster {
941 1.9 oster int unit = raidunit(dev);
942 1.1 oster struct raid_softc *rs;
943 1.1 oster
944 1.1 oster if (unit >= numraid)
945 1.1 oster return (ENXIO);
946 1.1 oster rs = &raid_softc[unit];
947 1.1 oster
948 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
949 1.1 oster return (ENXIO);
950 1.1 oster
951 1.1 oster return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
952 1.1 oster
953 1.1 oster }
954 1.1 oster /* ARGSUSED */
955 1.1 oster int
956 1.222 christos raidwrite(dev_t dev, struct uio *uio, int flags)
957 1.1 oster {
958 1.9 oster int unit = raidunit(dev);
959 1.1 oster struct raid_softc *rs;
960 1.1 oster
961 1.1 oster if (unit >= numraid)
962 1.1 oster return (ENXIO);
963 1.1 oster rs = &raid_softc[unit];
964 1.1 oster
965 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
966 1.1 oster return (ENXIO);
967 1.147 oster
968 1.1 oster return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
969 1.1 oster
970 1.1 oster }
971 1.1 oster
972 1.245.4.5 yamt static int
973 1.245.4.5 yamt raid_detach_unlocked(struct raid_softc *rs)
974 1.245.4.5 yamt {
975 1.245.4.5 yamt int error;
976 1.245.4.5 yamt RF_Raid_t *raidPtr;
977 1.245.4.5 yamt
978 1.245.4.5 yamt raidPtr = raidPtrs[device_unit(rs->sc_dev)];
979 1.245.4.5 yamt
980 1.245.4.5 yamt /*
981 1.245.4.5 yamt * If somebody has a partition mounted, we shouldn't
982 1.245.4.5 yamt * shutdown.
983 1.245.4.5 yamt */
984 1.245.4.5 yamt if (rs->sc_dkdev.dk_openmask != 0)
985 1.245.4.5 yamt return EBUSY;
986 1.245.4.5 yamt
987 1.245.4.5 yamt if ((rs->sc_flags & RAIDF_INITED) == 0)
988 1.245.4.5 yamt ; /* not initialized: nothing to do */
989 1.245.4.5 yamt else if ((error = rf_Shutdown(raidPtr)) != 0)
990 1.245.4.5 yamt return error;
991 1.245.4.5 yamt else
992 1.245.4.5 yamt rs->sc_flags &= ~(RAIDF_INITED|RAIDF_SHUTDOWN);
993 1.245.4.5 yamt
994 1.245.4.5 yamt /* Detach the disk. */
995 1.245.4.5 yamt disk_detach(&rs->sc_dkdev);
996 1.245.4.5 yamt disk_destroy(&rs->sc_dkdev);
997 1.245.4.5 yamt
998 1.245.4.5 yamt return 0;
999 1.245.4.5 yamt }
1000 1.245.4.5 yamt
1001 1.1 oster int
1002 1.225 christos raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1003 1.1 oster {
1004 1.9 oster int unit = raidunit(dev);
1005 1.9 oster int error = 0;
1006 1.9 oster int part, pmask;
1007 1.245.4.3 yamt cfdata_t cf;
1008 1.1 oster struct raid_softc *rs;
1009 1.1 oster RF_Config_t *k_cfg, *u_cfg;
1010 1.42 oster RF_Raid_t *raidPtr;
1011 1.48 oster RF_RaidDisk_t *diskPtr;
1012 1.41 oster RF_AccTotals_t *totals;
1013 1.41 oster RF_DeviceConfig_t *d_cfg, **ucfgp;
1014 1.1 oster u_char *specific_buf;
1015 1.11 oster int retcode = 0;
1016 1.11 oster int column;
1017 1.245.4.6 yamt /* int raidid; */
1018 1.1 oster struct rf_recon_req *rrcopy, *rr;
1019 1.48 oster RF_ComponentLabel_t *clabel;
1020 1.209 oster RF_ComponentLabel_t *ci_label;
1021 1.48 oster RF_ComponentLabel_t **clabel_ptr;
1022 1.12 oster RF_SingleComponent_t *sparePtr,*componentPtr;
1023 1.12 oster RF_SingleComponent_t component;
1024 1.83 oster RF_ProgressInfo_t progressInfo, **progressInfoPtr;
1025 1.41 oster int i, j, d;
1026 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1027 1.102 fvdl struct disklabel newlabel;
1028 1.102 fvdl #endif
1029 1.213 christos struct dkwedge_info *dkw;
1030 1.1 oster
1031 1.1 oster if (unit >= numraid)
1032 1.1 oster return (ENXIO);
1033 1.1 oster rs = &raid_softc[unit];
1034 1.42 oster raidPtr = raidPtrs[unit];
1035 1.1 oster
1036 1.9 oster db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
1037 1.9 oster (int) DISKPART(dev), (int) unit, (int) cmd));
1038 1.1 oster
1039 1.1 oster /* Must be open for writes for these commands... */
1040 1.1 oster switch (cmd) {
1041 1.213 christos #ifdef DIOCGSECTORSIZE
1042 1.213 christos case DIOCGSECTORSIZE:
1043 1.213 christos *(u_int *)data = raidPtr->bytesPerSector;
1044 1.213 christos return 0;
1045 1.213 christos case DIOCGMEDIASIZE:
1046 1.213 christos *(off_t *)data =
1047 1.213 christos (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
1048 1.213 christos return 0;
1049 1.213 christos #endif
1050 1.1 oster case DIOCSDINFO:
1051 1.1 oster case DIOCWDINFO:
1052 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1053 1.102 fvdl case ODIOCWDINFO:
1054 1.102 fvdl case ODIOCSDINFO:
1055 1.102 fvdl #endif
1056 1.1 oster case DIOCWLABEL:
1057 1.213 christos case DIOCAWEDGE:
1058 1.213 christos case DIOCDWEDGE:
1059 1.1 oster if ((flag & FWRITE) == 0)
1060 1.1 oster return (EBADF);
1061 1.1 oster }
1062 1.1 oster
1063 1.1 oster /* Must be initialized for these... */
1064 1.1 oster switch (cmd) {
1065 1.1 oster case DIOCGDINFO:
1066 1.1 oster case DIOCSDINFO:
1067 1.1 oster case DIOCWDINFO:
1068 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1069 1.102 fvdl case ODIOCGDINFO:
1070 1.102 fvdl case ODIOCWDINFO:
1071 1.102 fvdl case ODIOCSDINFO:
1072 1.102 fvdl case ODIOCGDEFLABEL:
1073 1.102 fvdl #endif
1074 1.1 oster case DIOCGPART:
1075 1.1 oster case DIOCWLABEL:
1076 1.1 oster case DIOCGDEFLABEL:
1077 1.213 christos case DIOCAWEDGE:
1078 1.213 christos case DIOCDWEDGE:
1079 1.213 christos case DIOCLWEDGES:
1080 1.245.4.2 yamt case DIOCCACHESYNC:
1081 1.1 oster case RAIDFRAME_SHUTDOWN:
1082 1.1 oster case RAIDFRAME_REWRITEPARITY:
1083 1.1 oster case RAIDFRAME_GET_INFO:
1084 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1085 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1086 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1087 1.1 oster case RAIDFRAME_GET_SIZE:
1088 1.1 oster case RAIDFRAME_FAIL_DISK:
1089 1.1 oster case RAIDFRAME_COPYBACK:
1090 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1091 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1092 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1093 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1094 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1095 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1096 1.11 oster case RAIDFRAME_INIT_LABELS:
1097 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1098 1.23 oster case RAIDFRAME_CHECK_PARITY:
1099 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1100 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1101 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1102 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1103 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1104 1.48 oster case RAIDFRAME_SET_ROOT:
1105 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1106 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1107 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_STATUS:
1108 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_GET_DISABLE:
1109 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_SET_DISABLE:
1110 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_SET_PARAMS:
1111 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
1112 1.1 oster return (ENXIO);
1113 1.1 oster }
1114 1.9 oster
1115 1.1 oster switch (cmd) {
1116 1.245.4.2 yamt #ifdef COMPAT_50
1117 1.245.4.2 yamt case RAIDFRAME_GET_INFO50:
1118 1.245.4.2 yamt return rf_get_info50(raidPtr, data);
1119 1.245.4.2 yamt
1120 1.245.4.2 yamt case RAIDFRAME_CONFIGURE50:
1121 1.245.4.2 yamt if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1122 1.245.4.2 yamt return retcode;
1123 1.245.4.2 yamt goto config;
1124 1.245.4.2 yamt #endif
1125 1.1 oster /* configure the system */
1126 1.1 oster case RAIDFRAME_CONFIGURE:
1127 1.48 oster
1128 1.48 oster if (raidPtr->valid) {
1129 1.48 oster /* There is a valid RAID set running on this unit! */
1130 1.48 oster printf("raid%d: Device already configured!\n",unit);
1131 1.66 oster return(EINVAL);
1132 1.48 oster }
1133 1.48 oster
1134 1.1 oster /* copy-in the configuration information */
1135 1.1 oster /* data points to a pointer to the configuration structure */
1136 1.43 oster
1137 1.9 oster u_cfg = *((RF_Config_t **) data);
1138 1.9 oster RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1139 1.1 oster if (k_cfg == NULL) {
1140 1.9 oster return (ENOMEM);
1141 1.1 oster }
1142 1.156 dsl retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1143 1.1 oster if (retcode) {
1144 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1145 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1146 1.9 oster retcode));
1147 1.9 oster return (retcode);
1148 1.1 oster }
1149 1.245.4.2 yamt goto config;
1150 1.245.4.2 yamt config:
1151 1.9 oster /* allocate a buffer for the layout-specific data, and copy it
1152 1.9 oster * in */
1153 1.1 oster if (k_cfg->layoutSpecificSize) {
1154 1.9 oster if (k_cfg->layoutSpecificSize > 10000) {
1155 1.1 oster /* sanity check */
1156 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1157 1.9 oster return (EINVAL);
1158 1.1 oster }
1159 1.9 oster RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1160 1.9 oster (u_char *));
1161 1.1 oster if (specific_buf == NULL) {
1162 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1163 1.9 oster return (ENOMEM);
1164 1.1 oster }
1165 1.156 dsl retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1166 1.9 oster k_cfg->layoutSpecificSize);
1167 1.1 oster if (retcode) {
1168 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1169 1.186 perry RF_Free(specific_buf,
1170 1.42 oster k_cfg->layoutSpecificSize);
1171 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1172 1.9 oster retcode));
1173 1.9 oster return (retcode);
1174 1.1 oster }
1175 1.9 oster } else
1176 1.9 oster specific_buf = NULL;
1177 1.1 oster k_cfg->layoutSpecific = specific_buf;
1178 1.9 oster
1179 1.9 oster /* should do some kind of sanity check on the configuration.
1180 1.9 oster * Store the sum of all the bytes in the last byte? */
1181 1.1 oster
1182 1.1 oster /* configure the system */
1183 1.1 oster
1184 1.48 oster /*
1185 1.48 oster * Clear the entire RAID descriptor, just to make sure
1186 1.186 perry * there is no stale data left in the case of a
1187 1.186 perry * reconfiguration
1188 1.48 oster */
1189 1.108 thorpej memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
1190 1.42 oster raidPtr->raidid = unit;
1191 1.20 oster
1192 1.48 oster retcode = rf_Configure(raidPtr, k_cfg, NULL);
1193 1.1 oster
1194 1.40 oster if (retcode == 0) {
1195 1.37 oster
1196 1.186 perry /* allow this many simultaneous IO's to
1197 1.40 oster this RAID device */
1198 1.42 oster raidPtr->openings = RAIDOUTSTANDING;
1199 1.186 perry
1200 1.219 oster raidinit(raidPtr);
1201 1.59 oster rf_markalldirty(raidPtr);
1202 1.9 oster }
1203 1.1 oster /* free the buffers. No return code here. */
1204 1.1 oster if (k_cfg->layoutSpecificSize) {
1205 1.9 oster RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1206 1.1 oster }
1207 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
1208 1.9 oster
1209 1.9 oster return (retcode);
1210 1.9 oster
1211 1.9 oster /* shutdown the system */
1212 1.1 oster case RAIDFRAME_SHUTDOWN:
1213 1.9 oster
1214 1.245.4.5 yamt part = DISKPART(dev);
1215 1.245.4.5 yamt pmask = (1 << part);
1216 1.245.4.5 yamt
1217 1.9 oster if ((error = raidlock(rs)) != 0)
1218 1.9 oster return (error);
1219 1.1 oster
1220 1.9 oster if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1221 1.9 oster ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1222 1.245.4.5 yamt (rs->sc_dkdev.dk_copenmask & pmask)))
1223 1.245.4.5 yamt retcode = EBUSY;
1224 1.245.4.5 yamt else {
1225 1.245.4.5 yamt rs->sc_flags |= RAIDF_SHUTDOWN;
1226 1.245.4.5 yamt rs->sc_dkdev.dk_copenmask &= ~pmask;
1227 1.245.4.5 yamt rs->sc_dkdev.dk_bopenmask &= ~pmask;
1228 1.245.4.5 yamt rs->sc_dkdev.dk_openmask &= ~pmask;
1229 1.245.4.5 yamt retcode = 0;
1230 1.9 oster }
1231 1.11 oster
1232 1.245.4.5 yamt raidunlock(rs);
1233 1.1 oster
1234 1.245.4.5 yamt if (retcode != 0)
1235 1.245.4.5 yamt return retcode;
1236 1.16 oster
1237 1.217 oster /* free the pseudo device attach bits */
1238 1.217 oster
1239 1.217 oster cf = device_cfdata(rs->sc_dev);
1240 1.245.4.5 yamt if ((retcode = config_detach(rs->sc_dev, DETACH_QUIET)) == 0)
1241 1.245.4.5 yamt free(cf, M_RAIDFRAME);
1242 1.1 oster
1243 1.9 oster return (retcode);
1244 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
1245 1.48 oster clabel_ptr = (RF_ComponentLabel_t **) data;
1246 1.11 oster /* need to read the component label for the disk indicated
1247 1.48 oster by row,column in clabel */
1248 1.11 oster
1249 1.245.4.6 yamt /*
1250 1.245.4.6 yamt * Perhaps there should be an option to skip the in-core
1251 1.245.4.6 yamt * copy and hit the disk, as with disklabel(8).
1252 1.245.4.6 yamt */
1253 1.245.4.6 yamt RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
1254 1.11 oster
1255 1.186 perry retcode = copyin( *clabel_ptr, clabel,
1256 1.11 oster sizeof(RF_ComponentLabel_t));
1257 1.11 oster
1258 1.11 oster if (retcode) {
1259 1.11 oster return(retcode);
1260 1.11 oster }
1261 1.11 oster
1262 1.166 oster clabel->row = 0; /* Don't allow looking at anything else.*/
1263 1.166 oster
1264 1.48 oster column = clabel->column;
1265 1.26 oster
1266 1.166 oster if ((column < 0) || (column >= raidPtr->numCol +
1267 1.90 oster raidPtr->numSpare)) {
1268 1.26 oster return(EINVAL);
1269 1.11 oster }
1270 1.11 oster
1271 1.245.4.6 yamt RF_Free(clabel, sizeof(*clabel));
1272 1.245.4.6 yamt
1273 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, column);
1274 1.11 oster
1275 1.195 yamt if (retcode == 0) {
1276 1.195 yamt retcode = copyout(clabel, *clabel_ptr,
1277 1.195 yamt sizeof(RF_ComponentLabel_t));
1278 1.195 yamt }
1279 1.11 oster return (retcode);
1280 1.11 oster
1281 1.245.4.6 yamt #if 0
1282 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
1283 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1284 1.11 oster
1285 1.11 oster /* XXX check the label for valid stuff... */
1286 1.11 oster /* Note that some things *should not* get modified --
1287 1.186 perry the user should be re-initing the labels instead of
1288 1.11 oster trying to patch things.
1289 1.11 oster */
1290 1.11 oster
1291 1.123 oster raidid = raidPtr->raidid;
1292 1.224 oster #ifdef DEBUG
1293 1.123 oster printf("raid%d: Got component label:\n", raidid);
1294 1.123 oster printf("raid%d: Version: %d\n", raidid, clabel->version);
1295 1.123 oster printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1296 1.123 oster printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1297 1.123 oster printf("raid%d: Column: %d\n", raidid, clabel->column);
1298 1.123 oster printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1299 1.123 oster printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1300 1.123 oster printf("raid%d: Status: %d\n", raidid, clabel->status);
1301 1.174 oster #endif
1302 1.166 oster clabel->row = 0;
1303 1.48 oster column = clabel->column;
1304 1.12 oster
1305 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1306 1.12 oster return(EINVAL);
1307 1.11 oster }
1308 1.12 oster
1309 1.12 oster /* XXX this isn't allowed to do anything for now :-) */
1310 1.48 oster
1311 1.48 oster /* XXX and before it is, we need to fill in the rest
1312 1.48 oster of the fields!?!?!?! */
1313 1.245.4.6 yamt memcpy(raidget_component_label(raidPtr, column),
1314 1.245.4.6 yamt clabel, sizeof(*clabel));
1315 1.245.4.6 yamt raidflush_component_label(raidPtr, column);
1316 1.12 oster return (0);
1317 1.245.4.6 yamt #endif
1318 1.11 oster
1319 1.186 perry case RAIDFRAME_INIT_LABELS:
1320 1.48 oster clabel = (RF_ComponentLabel_t *) data;
1321 1.186 perry /*
1322 1.11 oster we only want the serial number from
1323 1.11 oster the above. We get all the rest of the information
1324 1.11 oster from the config that was used to create this RAID
1325 1.186 perry set.
1326 1.11 oster */
1327 1.12 oster
1328 1.48 oster raidPtr->serial_number = clabel->serial_number;
1329 1.186 perry
1330 1.166 oster for(column=0;column<raidPtr->numCol;column++) {
1331 1.166 oster diskPtr = &raidPtr->Disks[column];
1332 1.166 oster if (!RF_DEAD_DISK(diskPtr->status)) {
1333 1.245.4.6 yamt ci_label = raidget_component_label(raidPtr,
1334 1.245.4.6 yamt column);
1335 1.245.4.6 yamt /* Zeroing this is important. */
1336 1.245.4.6 yamt memset(ci_label, 0, sizeof(*ci_label));
1337 1.245.4.6 yamt raid_init_component_label(raidPtr, ci_label);
1338 1.245.4.6 yamt ci_label->serial_number =
1339 1.245.4.6 yamt raidPtr->serial_number;
1340 1.245.4.6 yamt ci_label->row = 0; /* we dont' pretend to support more */
1341 1.245.4.6 yamt ci_label->partitionSize =
1342 1.245.4.6 yamt diskPtr->partitionSize;
1343 1.209 oster ci_label->column = column;
1344 1.245.4.6 yamt raidflush_component_label(raidPtr, column);
1345 1.11 oster }
1346 1.245.4.6 yamt /* XXXjld what about the spares? */
1347 1.11 oster }
1348 1.209 oster
1349 1.11 oster return (retcode);
1350 1.48 oster case RAIDFRAME_SET_AUTOCONFIG:
1351 1.78 minoura d = rf_set_autoconfig(raidPtr, *(int *) data);
1352 1.186 perry printf("raid%d: New autoconfig value is: %d\n",
1353 1.123 oster raidPtr->raidid, d);
1354 1.78 minoura *(int *) data = d;
1355 1.48 oster return (retcode);
1356 1.48 oster
1357 1.48 oster case RAIDFRAME_SET_ROOT:
1358 1.78 minoura d = rf_set_rootpartition(raidPtr, *(int *) data);
1359 1.186 perry printf("raid%d: New rootpartition value is: %d\n",
1360 1.123 oster raidPtr->raidid, d);
1361 1.78 minoura *(int *) data = d;
1362 1.48 oster return (retcode);
1363 1.9 oster
1364 1.1 oster /* initialize all parity */
1365 1.1 oster case RAIDFRAME_REWRITEPARITY:
1366 1.1 oster
1367 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1368 1.17 oster /* Parity for RAID 0 is trivially correct */
1369 1.42 oster raidPtr->parity_good = RF_RAID_CLEAN;
1370 1.17 oster return(0);
1371 1.17 oster }
1372 1.186 perry
1373 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1374 1.37 oster /* Re-write is already in progress! */
1375 1.37 oster return(EINVAL);
1376 1.37 oster }
1377 1.27 oster
1378 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1379 1.37 oster rf_RewriteParityThread,
1380 1.42 oster raidPtr,"raid_parity");
1381 1.9 oster return (retcode);
1382 1.9 oster
1383 1.11 oster
1384 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1385 1.12 oster sparePtr = (RF_SingleComponent_t *) data;
1386 1.209 oster memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1387 1.209 oster retcode = rf_add_hot_spare(raidPtr, &component);
1388 1.11 oster return(retcode);
1389 1.11 oster
1390 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1391 1.73 oster return(retcode);
1392 1.73 oster
1393 1.73 oster case RAIDFRAME_DELETE_COMPONENT:
1394 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1395 1.186 perry memcpy( &component, componentPtr,
1396 1.73 oster sizeof(RF_SingleComponent_t));
1397 1.73 oster retcode = rf_delete_component(raidPtr, &component);
1398 1.73 oster return(retcode);
1399 1.73 oster
1400 1.73 oster case RAIDFRAME_INCORPORATE_HOT_SPARE:
1401 1.73 oster componentPtr = (RF_SingleComponent_t *)data;
1402 1.186 perry memcpy( &component, componentPtr,
1403 1.73 oster sizeof(RF_SingleComponent_t));
1404 1.73 oster retcode = rf_incorporate_hot_spare(raidPtr, &component);
1405 1.11 oster return(retcode);
1406 1.11 oster
1407 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1408 1.24 oster
1409 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1410 1.24 oster /* Can't do this on a RAID 0!! */
1411 1.24 oster return(EINVAL);
1412 1.24 oster }
1413 1.24 oster
1414 1.42 oster if (raidPtr->recon_in_progress == 1) {
1415 1.37 oster /* a reconstruct is already in progress! */
1416 1.37 oster return(EINVAL);
1417 1.37 oster }
1418 1.37 oster
1419 1.12 oster componentPtr = (RF_SingleComponent_t *) data;
1420 1.186 perry memcpy( &component, componentPtr,
1421 1.12 oster sizeof(RF_SingleComponent_t));
1422 1.166 oster component.row = 0; /* we don't support any more */
1423 1.12 oster column = component.column;
1424 1.147 oster
1425 1.166 oster if ((column < 0) || (column >= raidPtr->numCol)) {
1426 1.12 oster return(EINVAL);
1427 1.12 oster }
1428 1.37 oster
1429 1.149 oster RF_LOCK_MUTEX(raidPtr->mutex);
1430 1.166 oster if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1431 1.186 perry (raidPtr->numFailures > 0)) {
1432 1.149 oster /* XXX 0 above shouldn't be constant!!! */
1433 1.149 oster /* some component other than this has failed.
1434 1.149 oster Let's not make things worse than they already
1435 1.149 oster are... */
1436 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1437 1.149 oster raidPtr->raidid);
1438 1.166 oster printf("raid%d: Col: %d Too many failures.\n",
1439 1.166 oster raidPtr->raidid, column);
1440 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1441 1.149 oster return (EINVAL);
1442 1.149 oster }
1443 1.186 perry if (raidPtr->Disks[column].status ==
1444 1.149 oster rf_ds_reconstructing) {
1445 1.149 oster printf("raid%d: Unable to reconstruct to disk at:\n",
1446 1.149 oster raidPtr->raidid);
1447 1.166 oster printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr->raidid, column);
1448 1.186 perry
1449 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1450 1.149 oster return (EINVAL);
1451 1.149 oster }
1452 1.166 oster if (raidPtr->Disks[column].status == rf_ds_spared) {
1453 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1454 1.149 oster return (EINVAL);
1455 1.149 oster }
1456 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1457 1.149 oster
1458 1.37 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1459 1.38 oster if (rrcopy == NULL)
1460 1.38 oster return(ENOMEM);
1461 1.37 oster
1462 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1463 1.37 oster rrcopy->col = column;
1464 1.37 oster
1465 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1466 1.37 oster rf_ReconstructInPlaceThread,
1467 1.37 oster rrcopy,"raid_reconip");
1468 1.12 oster return(retcode);
1469 1.12 oster
1470 1.1 oster case RAIDFRAME_GET_INFO:
1471 1.42 oster if (!raidPtr->valid)
1472 1.41 oster return (ENODEV);
1473 1.41 oster ucfgp = (RF_DeviceConfig_t **) data;
1474 1.41 oster RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1475 1.41 oster (RF_DeviceConfig_t *));
1476 1.41 oster if (d_cfg == NULL)
1477 1.41 oster return (ENOMEM);
1478 1.166 oster d_cfg->rows = 1; /* there is only 1 row now */
1479 1.42 oster d_cfg->cols = raidPtr->numCol;
1480 1.166 oster d_cfg->ndevs = raidPtr->numCol;
1481 1.41 oster if (d_cfg->ndevs >= RF_MAX_DISKS) {
1482 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1483 1.41 oster return (ENOMEM);
1484 1.41 oster }
1485 1.42 oster d_cfg->nspares = raidPtr->numSpare;
1486 1.41 oster if (d_cfg->nspares >= RF_MAX_DISKS) {
1487 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1488 1.41 oster return (ENOMEM);
1489 1.41 oster }
1490 1.42 oster d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1491 1.41 oster d = 0;
1492 1.166 oster for (j = 0; j < d_cfg->cols; j++) {
1493 1.166 oster d_cfg->devs[d] = raidPtr->Disks[j];
1494 1.166 oster d++;
1495 1.41 oster }
1496 1.41 oster for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1497 1.166 oster d_cfg->spares[i] = raidPtr->Disks[j];
1498 1.41 oster }
1499 1.156 dsl retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1500 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1501 1.41 oster
1502 1.41 oster return (retcode);
1503 1.9 oster
1504 1.22 oster case RAIDFRAME_CHECK_PARITY:
1505 1.42 oster *(int *) data = raidPtr->parity_good;
1506 1.22 oster return (0);
1507 1.41 oster
1508 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_STATUS:
1509 1.245.4.6 yamt rf_paritymap_status(raidPtr->parity_map,
1510 1.245.4.6 yamt (struct rf_pmstat *)data);
1511 1.245.4.6 yamt return 0;
1512 1.245.4.6 yamt
1513 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_SET_PARAMS:
1514 1.245.4.6 yamt if (raidPtr->parity_map == NULL)
1515 1.245.4.6 yamt return ENOENT; /* ??? */
1516 1.245.4.6 yamt if (0 != rf_paritymap_set_params(raidPtr->parity_map,
1517 1.245.4.6 yamt (struct rf_pmparams *)data, 1))
1518 1.245.4.6 yamt return EINVAL;
1519 1.245.4.6 yamt return 0;
1520 1.245.4.6 yamt
1521 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_GET_DISABLE:
1522 1.245.4.6 yamt *(int *) data = rf_paritymap_get_disable(raidPtr);
1523 1.245.4.6 yamt return 0;
1524 1.245.4.6 yamt
1525 1.245.4.6 yamt case RAIDFRAME_PARITYMAP_SET_DISABLE:
1526 1.245.4.6 yamt rf_paritymap_set_disable(raidPtr, *(int *)data);
1527 1.245.4.6 yamt /* XXX should errors be passed up? */
1528 1.245.4.6 yamt return 0;
1529 1.245.4.6 yamt
1530 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1531 1.108 thorpej memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1532 1.41 oster return (0);
1533 1.9 oster
1534 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1535 1.41 oster totals = (RF_AccTotals_t *) data;
1536 1.42 oster *totals = raidPtr->acc_totals;
1537 1.41 oster return (0);
1538 1.9 oster
1539 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1540 1.42 oster raidPtr->keep_acc_totals = *(int *)data;
1541 1.41 oster return (0);
1542 1.9 oster
1543 1.1 oster case RAIDFRAME_GET_SIZE:
1544 1.42 oster *(int *) data = raidPtr->totalSectors;
1545 1.9 oster return (0);
1546 1.1 oster
1547 1.1 oster /* fail a disk & optionally start reconstruction */
1548 1.1 oster case RAIDFRAME_FAIL_DISK:
1549 1.24 oster
1550 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1551 1.24 oster /* Can't do this on a RAID 0!! */
1552 1.24 oster return(EINVAL);
1553 1.24 oster }
1554 1.24 oster
1555 1.1 oster rr = (struct rf_recon_req *) data;
1556 1.166 oster rr->row = 0;
1557 1.166 oster if (rr->col < 0 || rr->col >= raidPtr->numCol)
1558 1.9 oster return (EINVAL);
1559 1.149 oster
1560 1.149 oster
1561 1.149 oster RF_LOCK_MUTEX(raidPtr->mutex);
1562 1.185 oster if (raidPtr->status == rf_rs_reconstructing) {
1563 1.185 oster /* you can't fail a disk while we're reconstructing! */
1564 1.185 oster /* XXX wrong for RAID6 */
1565 1.185 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1566 1.185 oster return (EINVAL);
1567 1.185 oster }
1568 1.186 perry if ((raidPtr->Disks[rr->col].status ==
1569 1.186 perry rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1570 1.149 oster /* some other component has failed. Let's not make
1571 1.149 oster things worse. XXX wrong for RAID6 */
1572 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1573 1.149 oster return (EINVAL);
1574 1.149 oster }
1575 1.166 oster if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1576 1.149 oster /* Can't fail a spared disk! */
1577 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1578 1.149 oster return (EINVAL);
1579 1.149 oster }
1580 1.149 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1581 1.1 oster
1582 1.9 oster /* make a copy of the recon request so that we don't rely on
1583 1.9 oster * the user's buffer */
1584 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1585 1.38 oster if (rrcopy == NULL)
1586 1.38 oster return(ENOMEM);
1587 1.118 wiz memcpy(rrcopy, rr, sizeof(*rr));
1588 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1589 1.1 oster
1590 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1591 1.37 oster rf_ReconThread,
1592 1.37 oster rrcopy,"raid_recon");
1593 1.9 oster return (0);
1594 1.9 oster
1595 1.9 oster /* invoke a copyback operation after recon on whatever disk
1596 1.9 oster * needs it, if any */
1597 1.9 oster case RAIDFRAME_COPYBACK:
1598 1.24 oster
1599 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1600 1.24 oster /* This makes no sense on a RAID 0!! */
1601 1.24 oster return(EINVAL);
1602 1.24 oster }
1603 1.24 oster
1604 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1605 1.37 oster /* Copyback is already in progress! */
1606 1.37 oster return(EINVAL);
1607 1.37 oster }
1608 1.27 oster
1609 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1610 1.37 oster rf_CopybackThread,
1611 1.42 oster raidPtr,"raid_copyback");
1612 1.37 oster return (retcode);
1613 1.9 oster
1614 1.1 oster /* return the percentage completion of reconstruction */
1615 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1616 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1617 1.71 oster /* This makes no sense on a RAID 0, so tell the
1618 1.71 oster user it's done. */
1619 1.71 oster *(int *) data = 100;
1620 1.71 oster return(0);
1621 1.24 oster }
1622 1.166 oster if (raidPtr->status != rf_rs_reconstructing)
1623 1.1 oster *(int *) data = 100;
1624 1.171 oster else {
1625 1.171 oster if (raidPtr->reconControl->numRUsTotal > 0) {
1626 1.171 oster *(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1627 1.171 oster } else {
1628 1.171 oster *(int *) data = 0;
1629 1.171 oster }
1630 1.171 oster }
1631 1.9 oster return (0);
1632 1.83 oster case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1633 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1634 1.166 oster if (raidPtr->status != rf_rs_reconstructing) {
1635 1.83 oster progressInfo.remaining = 0;
1636 1.83 oster progressInfo.completed = 100;
1637 1.83 oster progressInfo.total = 100;
1638 1.83 oster } else {
1639 1.186 perry progressInfo.total =
1640 1.166 oster raidPtr->reconControl->numRUsTotal;
1641 1.186 perry progressInfo.completed =
1642 1.166 oster raidPtr->reconControl->numRUsComplete;
1643 1.83 oster progressInfo.remaining = progressInfo.total -
1644 1.83 oster progressInfo.completed;
1645 1.83 oster }
1646 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1647 1.83 oster sizeof(RF_ProgressInfo_t));
1648 1.83 oster return (retcode);
1649 1.9 oster
1650 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1651 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1652 1.80 oster /* This makes no sense on a RAID 0, so tell the
1653 1.80 oster user it's done. */
1654 1.80 oster *(int *) data = 100;
1655 1.80 oster return(0);
1656 1.37 oster }
1657 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1658 1.186 perry *(int *) data = 100 *
1659 1.186 perry raidPtr->parity_rewrite_stripes_done /
1660 1.83 oster raidPtr->Layout.numStripe;
1661 1.37 oster } else {
1662 1.37 oster *(int *) data = 100;
1663 1.37 oster }
1664 1.37 oster return (0);
1665 1.37 oster
1666 1.83 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1667 1.83 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1668 1.83 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1669 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1670 1.186 perry progressInfo.completed =
1671 1.83 oster raidPtr->parity_rewrite_stripes_done;
1672 1.83 oster progressInfo.remaining = progressInfo.total -
1673 1.83 oster progressInfo.completed;
1674 1.83 oster } else {
1675 1.83 oster progressInfo.remaining = 0;
1676 1.83 oster progressInfo.completed = 100;
1677 1.83 oster progressInfo.total = 100;
1678 1.83 oster }
1679 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1680 1.83 oster sizeof(RF_ProgressInfo_t));
1681 1.83 oster return (retcode);
1682 1.83 oster
1683 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1684 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1685 1.37 oster /* This makes no sense on a RAID 0 */
1686 1.83 oster *(int *) data = 100;
1687 1.83 oster return(0);
1688 1.37 oster }
1689 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1690 1.42 oster *(int *) data = 100 * raidPtr->copyback_stripes_done /
1691 1.42 oster raidPtr->Layout.numStripe;
1692 1.37 oster } else {
1693 1.37 oster *(int *) data = 100;
1694 1.37 oster }
1695 1.37 oster return (0);
1696 1.37 oster
1697 1.83 oster case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1698 1.93 oster progressInfoPtr = (RF_ProgressInfo_t **) data;
1699 1.83 oster if (raidPtr->copyback_in_progress == 1) {
1700 1.83 oster progressInfo.total = raidPtr->Layout.numStripe;
1701 1.186 perry progressInfo.completed =
1702 1.93 oster raidPtr->copyback_stripes_done;
1703 1.83 oster progressInfo.remaining = progressInfo.total -
1704 1.83 oster progressInfo.completed;
1705 1.83 oster } else {
1706 1.83 oster progressInfo.remaining = 0;
1707 1.83 oster progressInfo.completed = 100;
1708 1.83 oster progressInfo.total = 100;
1709 1.83 oster }
1710 1.156 dsl retcode = copyout(&progressInfo, *progressInfoPtr,
1711 1.83 oster sizeof(RF_ProgressInfo_t));
1712 1.83 oster return (retcode);
1713 1.37 oster
1714 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1715 1.9 oster * need a spare table. this ioctl does not return until a
1716 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1717 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1718 1.9 oster * -- I should either compute the spare table in the kernel,
1719 1.9 oster * or have a different -- XXX XXX -- interface (a different
1720 1.42 oster * character device) for delivering the table -- XXX */
1721 1.1 oster #if 0
1722 1.1 oster case RAIDFRAME_SPARET_WAIT:
1723 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1724 1.9 oster while (!rf_sparet_wait_queue)
1725 1.9 oster mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1726 1.1 oster waitreq = rf_sparet_wait_queue;
1727 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1728 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1729 1.9 oster
1730 1.42 oster /* structure assignment */
1731 1.186 perry *((RF_SparetWait_t *) data) = *waitreq;
1732 1.9 oster
1733 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1734 1.9 oster return (0);
1735 1.9 oster
1736 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1737 1.9 oster * code in it that will cause the dameon to exit */
1738 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1739 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1740 1.1 oster waitreq->fcol = -1;
1741 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1742 1.1 oster waitreq->next = rf_sparet_wait_queue;
1743 1.1 oster rf_sparet_wait_queue = waitreq;
1744 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1745 1.1 oster wakeup(&rf_sparet_wait_queue);
1746 1.9 oster return (0);
1747 1.1 oster
1748 1.9 oster /* used by the spare table daemon to deliver a spare table
1749 1.9 oster * into the kernel */
1750 1.1 oster case RAIDFRAME_SEND_SPARET:
1751 1.9 oster
1752 1.1 oster /* install the spare table */
1753 1.42 oster retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1754 1.9 oster
1755 1.9 oster /* respond to the requestor. the return status of the spare
1756 1.9 oster * table installation is passed in the "fcol" field */
1757 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1758 1.1 oster waitreq->fcol = retcode;
1759 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1760 1.1 oster waitreq->next = rf_sparet_resp_queue;
1761 1.1 oster rf_sparet_resp_queue = waitreq;
1762 1.1 oster wakeup(&rf_sparet_resp_queue);
1763 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1764 1.9 oster
1765 1.9 oster return (retcode);
1766 1.1 oster #endif
1767 1.1 oster
1768 1.9 oster default:
1769 1.36 oster break; /* fall through to the os-specific code below */
1770 1.1 oster
1771 1.1 oster }
1772 1.9 oster
1773 1.42 oster if (!raidPtr->valid)
1774 1.9 oster return (EINVAL);
1775 1.9 oster
1776 1.1 oster /*
1777 1.1 oster * Add support for "regular" device ioctls here.
1778 1.1 oster */
1779 1.245.4.4 yamt
1780 1.245.4.4 yamt error = disk_ioctl(&rs->sc_dkdev, cmd, data, flag, l);
1781 1.245.4.4 yamt if (error != EPASSTHROUGH)
1782 1.245.4.4 yamt return (error);
1783 1.9 oster
1784 1.1 oster switch (cmd) {
1785 1.1 oster case DIOCGDINFO:
1786 1.9 oster *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1787 1.1 oster break;
1788 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1789 1.102 fvdl case ODIOCGDINFO:
1790 1.102 fvdl newlabel = *(rs->sc_dkdev.dk_label);
1791 1.102 fvdl if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1792 1.103 fvdl return ENOTTY;
1793 1.102 fvdl memcpy(data, &newlabel, sizeof (struct olddisklabel));
1794 1.102 fvdl break;
1795 1.102 fvdl #endif
1796 1.1 oster
1797 1.1 oster case DIOCGPART:
1798 1.9 oster ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1799 1.9 oster ((struct partinfo *) data)->part =
1800 1.1 oster &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1801 1.1 oster break;
1802 1.1 oster
1803 1.1 oster case DIOCWDINFO:
1804 1.1 oster case DIOCSDINFO:
1805 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1806 1.102 fvdl case ODIOCWDINFO:
1807 1.102 fvdl case ODIOCSDINFO:
1808 1.102 fvdl #endif
1809 1.102 fvdl {
1810 1.102 fvdl struct disklabel *lp;
1811 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1812 1.102 fvdl if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1813 1.102 fvdl memset(&newlabel, 0, sizeof newlabel);
1814 1.102 fvdl memcpy(&newlabel, data, sizeof (struct olddisklabel));
1815 1.102 fvdl lp = &newlabel;
1816 1.102 fvdl } else
1817 1.102 fvdl #endif
1818 1.102 fvdl lp = (struct disklabel *)data;
1819 1.102 fvdl
1820 1.1 oster if ((error = raidlock(rs)) != 0)
1821 1.1 oster return (error);
1822 1.1 oster
1823 1.1 oster rs->sc_flags |= RAIDF_LABELLING;
1824 1.1 oster
1825 1.1 oster error = setdisklabel(rs->sc_dkdev.dk_label,
1826 1.102 fvdl lp, 0, rs->sc_dkdev.dk_cpulabel);
1827 1.1 oster if (error == 0) {
1828 1.102 fvdl if (cmd == DIOCWDINFO
1829 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1830 1.102 fvdl || cmd == ODIOCWDINFO
1831 1.102 fvdl #endif
1832 1.102 fvdl )
1833 1.1 oster error = writedisklabel(RAIDLABELDEV(dev),
1834 1.1 oster raidstrategy, rs->sc_dkdev.dk_label,
1835 1.1 oster rs->sc_dkdev.dk_cpulabel);
1836 1.1 oster }
1837 1.1 oster rs->sc_flags &= ~RAIDF_LABELLING;
1838 1.1 oster
1839 1.1 oster raidunlock(rs);
1840 1.1 oster
1841 1.1 oster if (error)
1842 1.1 oster return (error);
1843 1.1 oster break;
1844 1.102 fvdl }
1845 1.1 oster
1846 1.1 oster case DIOCWLABEL:
1847 1.9 oster if (*(int *) data != 0)
1848 1.1 oster rs->sc_flags |= RAIDF_WLABEL;
1849 1.1 oster else
1850 1.1 oster rs->sc_flags &= ~RAIDF_WLABEL;
1851 1.1 oster break;
1852 1.1 oster
1853 1.1 oster case DIOCGDEFLABEL:
1854 1.102 fvdl raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1855 1.1 oster break;
1856 1.102 fvdl
1857 1.102 fvdl #ifdef __HAVE_OLD_DISKLABEL
1858 1.102 fvdl case ODIOCGDEFLABEL:
1859 1.102 fvdl raidgetdefaultlabel(raidPtr, rs, &newlabel);
1860 1.102 fvdl if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1861 1.103 fvdl return ENOTTY;
1862 1.102 fvdl memcpy(data, &newlabel, sizeof (struct olddisklabel));
1863 1.102 fvdl break;
1864 1.102 fvdl #endif
1865 1.1 oster
1866 1.213 christos case DIOCAWEDGE:
1867 1.213 christos case DIOCDWEDGE:
1868 1.213 christos dkw = (void *)data;
1869 1.213 christos
1870 1.213 christos /* If the ioctl happens here, the parent is us. */
1871 1.213 christos (void)strcpy(dkw->dkw_parent, rs->sc_xname);
1872 1.213 christos return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
1873 1.213 christos
1874 1.213 christos case DIOCLWEDGES:
1875 1.213 christos return dkwedge_list(&rs->sc_dkdev,
1876 1.213 christos (struct dkwedge_list *)data, l);
1877 1.245.4.2 yamt case DIOCCACHESYNC:
1878 1.245.4.2 yamt return rf_sync_component_caches(raidPtr);
1879 1.1 oster default:
1880 1.39 oster retcode = ENOTTY;
1881 1.1 oster }
1882 1.9 oster return (retcode);
1883 1.1 oster
1884 1.1 oster }
1885 1.1 oster
1886 1.1 oster
1887 1.9 oster /* raidinit -- complete the rest of the initialization for the
1888 1.1 oster RAIDframe device. */
1889 1.1 oster
1890 1.1 oster
1891 1.59 oster static void
1892 1.219 oster raidinit(RF_Raid_t *raidPtr)
1893 1.1 oster {
1894 1.245.4.3 yamt cfdata_t cf;
1895 1.1 oster struct raid_softc *rs;
1896 1.59 oster int unit;
1897 1.1 oster
1898 1.59 oster unit = raidPtr->raidid;
1899 1.1 oster
1900 1.1 oster rs = &raid_softc[unit];
1901 1.1 oster
1902 1.1 oster /* XXX should check return code first... */
1903 1.1 oster rs->sc_flags |= RAIDF_INITED;
1904 1.1 oster
1905 1.179 itojun /* XXX doesn't check bounds. */
1906 1.179 itojun snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1907 1.1 oster
1908 1.217 oster /* attach the pseudo device */
1909 1.217 oster cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1910 1.217 oster cf->cf_name = raid_cd.cd_name;
1911 1.217 oster cf->cf_atname = raid_cd.cd_name;
1912 1.217 oster cf->cf_unit = unit;
1913 1.217 oster cf->cf_fstate = FSTATE_STAR;
1914 1.217 oster
1915 1.217 oster rs->sc_dev = config_attach_pseudo(cf);
1916 1.217 oster
1917 1.245.4.6 yamt if (rs->sc_dev == NULL) {
1918 1.217 oster printf("raid%d: config_attach_pseudo failed\n",
1919 1.245.4.6 yamt raidPtr->raidid);
1920 1.245.4.4 yamt rs->sc_flags &= ~RAIDF_INITED;
1921 1.245.4.4 yamt free(cf, M_RAIDFRAME);
1922 1.245.4.4 yamt return;
1923 1.217 oster }
1924 1.217 oster
1925 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1926 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1927 1.9 oster * with disklabels. */
1928 1.11 oster
1929 1.235 oster disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1930 1.219 oster disk_attach(&rs->sc_dkdev);
1931 1.1 oster
1932 1.1 oster /* XXX There may be a weird interaction here between this, and
1933 1.9 oster * protectedSectors, as used in RAIDframe. */
1934 1.11 oster
1935 1.9 oster rs->sc_size = raidPtr->totalSectors;
1936 1.234 oster
1937 1.234 oster dkwedge_discover(&rs->sc_dkdev);
1938 1.234 oster
1939 1.234 oster rf_set_properties(rs, raidPtr);
1940 1.234 oster
1941 1.1 oster }
1942 1.150 oster #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1943 1.1 oster /* wake up the daemon & tell it to get us a spare table
1944 1.1 oster * XXX
1945 1.9 oster * the entries in the queues should be tagged with the raidPtr
1946 1.186 perry * so that in the extremely rare case that two recons happen at once,
1947 1.11 oster * we know for which device were requesting a spare table
1948 1.1 oster * XXX
1949 1.186 perry *
1950 1.39 oster * XXX This code is not currently used. GO
1951 1.1 oster */
1952 1.186 perry int
1953 1.169 oster rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1954 1.9 oster {
1955 1.9 oster int retcode;
1956 1.9 oster
1957 1.9 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1958 1.9 oster req->next = rf_sparet_wait_queue;
1959 1.9 oster rf_sparet_wait_queue = req;
1960 1.9 oster wakeup(&rf_sparet_wait_queue);
1961 1.9 oster
1962 1.9 oster /* mpsleep unlocks the mutex */
1963 1.9 oster while (!rf_sparet_resp_queue) {
1964 1.15 oster tsleep(&rf_sparet_resp_queue, PRIBIO,
1965 1.9 oster "raidframe getsparetable", 0);
1966 1.9 oster }
1967 1.9 oster req = rf_sparet_resp_queue;
1968 1.9 oster rf_sparet_resp_queue = req->next;
1969 1.9 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1970 1.9 oster
1971 1.9 oster retcode = req->fcol;
1972 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
1973 1.9 oster * alloc'd */
1974 1.9 oster return (retcode);
1975 1.1 oster }
1976 1.150 oster #endif
1977 1.39 oster
1978 1.186 perry /* a wrapper around rf_DoAccess that extracts appropriate info from the
1979 1.11 oster * bp & passes it down.
1980 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1981 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1982 1.1 oster * certain conditions (to make some standard utilities work)
1983 1.186 perry *
1984 1.34 oster * Formerly known as: rf_DoAccessKernel
1985 1.1 oster */
1986 1.34 oster void
1987 1.169 oster raidstart(RF_Raid_t *raidPtr)
1988 1.1 oster {
1989 1.1 oster RF_SectorCount_t num_blocks, pb, sum;
1990 1.1 oster RF_RaidAddr_t raid_addr;
1991 1.1 oster struct partition *pp;
1992 1.9 oster daddr_t blocknum;
1993 1.9 oster int unit;
1994 1.1 oster struct raid_softc *rs;
1995 1.9 oster int do_async;
1996 1.34 oster struct buf *bp;
1997 1.180 oster int rc;
1998 1.1 oster
1999 1.1 oster unit = raidPtr->raidid;
2000 1.1 oster rs = &raid_softc[unit];
2001 1.186 perry
2002 1.56 oster /* quick check to see if anything has died recently */
2003 1.56 oster RF_LOCK_MUTEX(raidPtr->mutex);
2004 1.56 oster if (raidPtr->numNewFailures > 0) {
2005 1.151 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
2006 1.186 perry rf_update_component_labels(raidPtr,
2007 1.91 oster RF_NORMAL_COMPONENT_UPDATE);
2008 1.151 oster RF_LOCK_MUTEX(raidPtr->mutex);
2009 1.56 oster raidPtr->numNewFailures--;
2010 1.56 oster }
2011 1.56 oster
2012 1.34 oster /* Check to see if we're at the limit... */
2013 1.34 oster while (raidPtr->openings > 0) {
2014 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
2015 1.34 oster
2016 1.34 oster /* get the next item, if any, from the queue */
2017 1.245.4.2 yamt if ((bp = bufq_get(rs->buf_queue)) == NULL) {
2018 1.34 oster /* nothing more to do */
2019 1.34 oster return;
2020 1.34 oster }
2021 1.34 oster
2022 1.34 oster /* Ok, for the bp we have here, bp->b_blkno is relative to the
2023 1.186 perry * partition.. Need to make it absolute to the underlying
2024 1.34 oster * device.. */
2025 1.1 oster
2026 1.34 oster blocknum = bp->b_blkno;
2027 1.34 oster if (DISKPART(bp->b_dev) != RAW_PART) {
2028 1.34 oster pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
2029 1.34 oster blocknum += pp->p_offset;
2030 1.34 oster }
2031 1.1 oster
2032 1.186 perry db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
2033 1.34 oster (int) blocknum));
2034 1.186 perry
2035 1.34 oster db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
2036 1.34 oster db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
2037 1.186 perry
2038 1.186 perry /* *THIS* is where we adjust what block we're going to...
2039 1.34 oster * but DO NOT TOUCH bp->b_blkno!!! */
2040 1.34 oster raid_addr = blocknum;
2041 1.186 perry
2042 1.34 oster num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
2043 1.34 oster pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
2044 1.34 oster sum = raid_addr + num_blocks + pb;
2045 1.34 oster if (1 || rf_debugKernelAccess) {
2046 1.34 oster db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2047 1.34 oster (int) raid_addr, (int) sum, (int) num_blocks,
2048 1.34 oster (int) pb, (int) bp->b_resid));
2049 1.34 oster }
2050 1.34 oster if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2051 1.34 oster || (sum < num_blocks) || (sum < pb)) {
2052 1.34 oster bp->b_error = ENOSPC;
2053 1.34 oster bp->b_resid = bp->b_bcount;
2054 1.34 oster biodone(bp);
2055 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
2056 1.34 oster continue;
2057 1.34 oster }
2058 1.34 oster /*
2059 1.34 oster * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2060 1.34 oster */
2061 1.186 perry
2062 1.34 oster if (bp->b_bcount & raidPtr->sectorMask) {
2063 1.34 oster bp->b_error = EINVAL;
2064 1.34 oster bp->b_resid = bp->b_bcount;
2065 1.34 oster biodone(bp);
2066 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
2067 1.34 oster continue;
2068 1.186 perry
2069 1.34 oster }
2070 1.34 oster db1_printf(("Calling DoAccess..\n"));
2071 1.186 perry
2072 1.1 oster
2073 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
2074 1.34 oster raidPtr->openings--;
2075 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
2076 1.1 oster
2077 1.34 oster /*
2078 1.34 oster * Everything is async.
2079 1.34 oster */
2080 1.34 oster do_async = 1;
2081 1.186 perry
2082 1.99 oster disk_busy(&rs->sc_dkdev);
2083 1.99 oster
2084 1.186 perry /* XXX we're still at splbio() here... do we *really*
2085 1.34 oster need to be? */
2086 1.20 oster
2087 1.186 perry /* don't ever condition on bp->b_flags & B_WRITE.
2088 1.99 oster * always condition on B_READ instead */
2089 1.186 perry
2090 1.180 oster rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2091 1.180 oster RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2092 1.180 oster do_async, raid_addr, num_blocks,
2093 1.180 oster bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2094 1.151 oster
2095 1.180 oster if (rc) {
2096 1.180 oster bp->b_error = rc;
2097 1.180 oster bp->b_resid = bp->b_bcount;
2098 1.180 oster biodone(bp);
2099 1.180 oster /* continue loop */
2100 1.186 perry }
2101 1.20 oster
2102 1.20 oster RF_LOCK_MUTEX(raidPtr->mutex);
2103 1.20 oster }
2104 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
2105 1.34 oster }
2106 1.20 oster
2107 1.20 oster
2108 1.7 explorer
2109 1.7 explorer
2110 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2111 1.1 oster
2112 1.186 perry int
2113 1.169 oster rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2114 1.1 oster {
2115 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2116 1.1 oster struct buf *bp;
2117 1.9 oster
2118 1.1 oster req->queue = queue;
2119 1.1 oster bp = req->bp;
2120 1.1 oster
2121 1.1 oster switch (req->type) {
2122 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
2123 1.1 oster /* XXX need to do something extra here.. */
2124 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
2125 1.9 oster * and I'd like folks to report it... GO */
2126 1.1 oster printf(("WAKEUP CALLED\n"));
2127 1.1 oster queue->numOutstanding++;
2128 1.1 oster
2129 1.197 oster bp->b_flags = 0;
2130 1.207 simonb bp->b_private = req;
2131 1.1 oster
2132 1.194 oster KernelWakeupFunc(bp);
2133 1.1 oster break;
2134 1.9 oster
2135 1.1 oster case RF_IO_TYPE_READ:
2136 1.1 oster case RF_IO_TYPE_WRITE:
2137 1.175 oster #if RF_ACC_TRACE > 0
2138 1.1 oster if (req->tracerec) {
2139 1.1 oster RF_ETIMER_START(req->tracerec->timer);
2140 1.1 oster }
2141 1.175 oster #endif
2142 1.194 oster InitBP(bp, queue->rf_cinfo->ci_vp,
2143 1.197 oster op, queue->rf_cinfo->ci_dev,
2144 1.9 oster req->sectorOffset, req->numSector,
2145 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
2146 1.9 oster queue->raidPtr->logBytesPerSector, req->b_proc);
2147 1.1 oster
2148 1.1 oster if (rf_debugKernelAccess) {
2149 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
2150 1.9 oster (long) bp->b_blkno));
2151 1.1 oster }
2152 1.1 oster queue->numOutstanding++;
2153 1.1 oster queue->last_deq_sector = req->sectorOffset;
2154 1.9 oster /* acc wouldn't have been let in if there were any pending
2155 1.9 oster * reqs at any other priority */
2156 1.1 oster queue->curPriority = req->priority;
2157 1.1 oster
2158 1.166 oster db1_printf(("Going for %c to unit %d col %d\n",
2159 1.186 perry req->type, queue->raidPtr->raidid,
2160 1.166 oster queue->col));
2161 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
2162 1.9 oster (int) req->sectorOffset, (int) req->numSector,
2163 1.9 oster (int) (req->numSector <<
2164 1.9 oster queue->raidPtr->logBytesPerSector),
2165 1.9 oster (int) queue->raidPtr->logBytesPerSector));
2166 1.1 oster
2167 1.245.4.2 yamt /*
2168 1.245.4.2 yamt * XXX: drop lock here since this can block at
2169 1.245.4.2 yamt * least with backing SCSI devices. Retake it
2170 1.245.4.2 yamt * to minimize fuss with calling interfaces.
2171 1.245.4.2 yamt */
2172 1.245.4.2 yamt
2173 1.245.4.2 yamt RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2174 1.245.4.2 yamt bdev_strategy(bp);
2175 1.245.4.2 yamt RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2176 1.1 oster break;
2177 1.9 oster
2178 1.1 oster default:
2179 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
2180 1.1 oster }
2181 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
2182 1.134 oster
2183 1.9 oster return (0);
2184 1.1 oster }
2185 1.9 oster /* this is the callback function associated with a I/O invoked from
2186 1.1 oster kernel code.
2187 1.1 oster */
2188 1.186 perry static void
2189 1.194 oster KernelWakeupFunc(struct buf *bp)
2190 1.9 oster {
2191 1.9 oster RF_DiskQueueData_t *req = NULL;
2192 1.9 oster RF_DiskQueue_t *queue;
2193 1.74 augustss int s;
2194 1.9 oster
2195 1.36 oster s = splbio();
2196 1.9 oster db1_printf(("recovering the request queue:\n"));
2197 1.207 simonb req = bp->b_private;
2198 1.1 oster
2199 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
2200 1.1 oster
2201 1.175 oster #if RF_ACC_TRACE > 0
2202 1.9 oster if (req->tracerec) {
2203 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
2204 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
2205 1.9 oster RF_LOCK_MUTEX(rf_tracing_mutex);
2206 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2207 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2208 1.9 oster req->tracerec->num_phys_ios++;
2209 1.9 oster RF_UNLOCK_MUTEX(rf_tracing_mutex);
2210 1.9 oster }
2211 1.175 oster #endif
2212 1.1 oster
2213 1.230 ad /* XXX Ok, let's get aggressive... If b_error is set, let's go
2214 1.9 oster * ballistic, and mark the component as hosed... */
2215 1.36 oster
2216 1.230 ad if (bp->b_error != 0) {
2217 1.9 oster /* Mark the disk as dead */
2218 1.9 oster /* but only mark it once... */
2219 1.186 perry /* and only if it wouldn't leave this RAID set
2220 1.183 oster completely broken */
2221 1.193 oster if (((queue->raidPtr->Disks[queue->col].status ==
2222 1.193 oster rf_ds_optimal) ||
2223 1.193 oster (queue->raidPtr->Disks[queue->col].status ==
2224 1.193 oster rf_ds_used_spare)) &&
2225 1.193 oster (queue->raidPtr->numFailures <
2226 1.204 simonb queue->raidPtr->Layout.map->faultsTolerated)) {
2227 1.9 oster printf("raid%d: IO Error. Marking %s as failed.\n",
2228 1.136 oster queue->raidPtr->raidid,
2229 1.166 oster queue->raidPtr->Disks[queue->col].devname);
2230 1.166 oster queue->raidPtr->Disks[queue->col].status =
2231 1.9 oster rf_ds_failed;
2232 1.166 oster queue->raidPtr->status = rf_rs_degraded;
2233 1.9 oster queue->raidPtr->numFailures++;
2234 1.56 oster queue->raidPtr->numNewFailures++;
2235 1.9 oster } else { /* Disk is already dead... */
2236 1.9 oster /* printf("Disk already marked as dead!\n"); */
2237 1.9 oster }
2238 1.4 oster
2239 1.9 oster }
2240 1.4 oster
2241 1.143 oster /* Fill in the error value */
2242 1.143 oster
2243 1.230 ad req->error = bp->b_error;
2244 1.143 oster
2245 1.143 oster simple_lock(&queue->raidPtr->iodone_lock);
2246 1.143 oster
2247 1.143 oster /* Drop this one on the "finished" queue... */
2248 1.143 oster TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2249 1.143 oster
2250 1.143 oster /* Let the raidio thread know there is work to be done. */
2251 1.143 oster wakeup(&(queue->raidPtr->iodone));
2252 1.143 oster
2253 1.143 oster simple_unlock(&queue->raidPtr->iodone_lock);
2254 1.1 oster
2255 1.36 oster splx(s);
2256 1.1 oster }
2257 1.1 oster
2258 1.1 oster
2259 1.1 oster
2260 1.1 oster /*
2261 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
2262 1.1 oster */
2263 1.186 perry static void
2264 1.169 oster InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2265 1.225 christos RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2266 1.169 oster void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2267 1.169 oster struct proc *b_proc)
2268 1.9 oster {
2269 1.9 oster /* bp->b_flags = B_PHYS | rw_flag; */
2270 1.242 ad bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
2271 1.242 ad bp->b_oflags = 0;
2272 1.242 ad bp->b_cflags = 0;
2273 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
2274 1.9 oster bp->b_bufsize = bp->b_bcount;
2275 1.9 oster bp->b_error = 0;
2276 1.9 oster bp->b_dev = dev;
2277 1.187 christos bp->b_data = bf;
2278 1.9 oster bp->b_blkno = startSect;
2279 1.9 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
2280 1.1 oster if (bp->b_bcount == 0) {
2281 1.141 provos panic("bp->b_bcount is zero in InitBP!!");
2282 1.1 oster }
2283 1.161 fvdl bp->b_proc = b_proc;
2284 1.9 oster bp->b_iodone = cbFunc;
2285 1.207 simonb bp->b_private = cbArg;
2286 1.1 oster }
2287 1.1 oster
2288 1.1 oster static void
2289 1.186 perry raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2290 1.169 oster struct disklabel *lp)
2291 1.1 oster {
2292 1.108 thorpej memset(lp, 0, sizeof(*lp));
2293 1.1 oster
2294 1.1 oster /* fabricate a label... */
2295 1.1 oster lp->d_secperunit = raidPtr->totalSectors;
2296 1.1 oster lp->d_secsize = raidPtr->bytesPerSector;
2297 1.45 oster lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2298 1.105 oster lp->d_ntracks = 4 * raidPtr->numCol;
2299 1.186 perry lp->d_ncylinders = raidPtr->totalSectors /
2300 1.45 oster (lp->d_nsectors * lp->d_ntracks);
2301 1.1 oster lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2302 1.1 oster
2303 1.1 oster strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2304 1.9 oster lp->d_type = DTYPE_RAID;
2305 1.1 oster strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2306 1.1 oster lp->d_rpm = 3600;
2307 1.1 oster lp->d_interleave = 1;
2308 1.1 oster lp->d_flags = 0;
2309 1.1 oster
2310 1.1 oster lp->d_partitions[RAW_PART].p_offset = 0;
2311 1.1 oster lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2312 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2313 1.1 oster lp->d_npartitions = RAW_PART + 1;
2314 1.1 oster
2315 1.1 oster lp->d_magic = DISKMAGIC;
2316 1.1 oster lp->d_magic2 = DISKMAGIC;
2317 1.1 oster lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2318 1.1 oster
2319 1.1 oster }
2320 1.1 oster /*
2321 1.1 oster * Read the disklabel from the raid device. If one is not present, fake one
2322 1.1 oster * up.
2323 1.1 oster */
2324 1.1 oster static void
2325 1.169 oster raidgetdisklabel(dev_t dev)
2326 1.1 oster {
2327 1.9 oster int unit = raidunit(dev);
2328 1.1 oster struct raid_softc *rs = &raid_softc[unit];
2329 1.158 dsl const char *errstring;
2330 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
2331 1.1 oster struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2332 1.1 oster RF_Raid_t *raidPtr;
2333 1.1 oster
2334 1.1 oster db1_printf(("Getting the disklabel...\n"));
2335 1.1 oster
2336 1.108 thorpej memset(clp, 0, sizeof(*clp));
2337 1.1 oster
2338 1.1 oster raidPtr = raidPtrs[unit];
2339 1.1 oster
2340 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp);
2341 1.1 oster
2342 1.1 oster /*
2343 1.1 oster * Call the generic disklabel extraction routine.
2344 1.1 oster */
2345 1.1 oster errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2346 1.1 oster rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2347 1.9 oster if (errstring)
2348 1.1 oster raidmakedisklabel(rs);
2349 1.1 oster else {
2350 1.9 oster int i;
2351 1.1 oster struct partition *pp;
2352 1.1 oster
2353 1.1 oster /*
2354 1.1 oster * Sanity check whether the found disklabel is valid.
2355 1.1 oster *
2356 1.1 oster * This is necessary since total size of the raid device
2357 1.1 oster * may vary when an interleave is changed even though exactly
2358 1.211 oster * same components are used, and old disklabel may used
2359 1.1 oster * if that is found.
2360 1.1 oster */
2361 1.1 oster if (lp->d_secperunit != rs->sc_size)
2362 1.123 oster printf("raid%d: WARNING: %s: "
2363 1.245.4.2 yamt "total sector size in disklabel (%" PRIu32 ") != "
2364 1.245.4.2 yamt "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
2365 1.245.4.2 yamt lp->d_secperunit, rs->sc_size);
2366 1.1 oster for (i = 0; i < lp->d_npartitions; i++) {
2367 1.1 oster pp = &lp->d_partitions[i];
2368 1.1 oster if (pp->p_offset + pp->p_size > rs->sc_size)
2369 1.123 oster printf("raid%d: WARNING: %s: end of partition `%c' "
2370 1.245.4.2 yamt "exceeds the size of raid (%" PRIu64 ")\n",
2371 1.245.4.2 yamt unit, rs->sc_xname, 'a' + i, rs->sc_size);
2372 1.1 oster }
2373 1.1 oster }
2374 1.1 oster
2375 1.1 oster }
2376 1.1 oster /*
2377 1.1 oster * Take care of things one might want to take care of in the event
2378 1.1 oster * that a disklabel isn't present.
2379 1.1 oster */
2380 1.1 oster static void
2381 1.169 oster raidmakedisklabel(struct raid_softc *rs)
2382 1.1 oster {
2383 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
2384 1.1 oster db1_printf(("Making a label..\n"));
2385 1.1 oster
2386 1.1 oster /*
2387 1.1 oster * For historical reasons, if there's no disklabel present
2388 1.1 oster * the raw partition must be marked FS_BSDFFS.
2389 1.1 oster */
2390 1.1 oster
2391 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2392 1.1 oster
2393 1.1 oster strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2394 1.1 oster
2395 1.1 oster lp->d_checksum = dkcksum(lp);
2396 1.1 oster }
2397 1.1 oster /*
2398 1.1 oster * Wait interruptibly for an exclusive lock.
2399 1.1 oster *
2400 1.1 oster * XXX
2401 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
2402 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
2403 1.1 oster */
2404 1.1 oster static int
2405 1.169 oster raidlock(struct raid_softc *rs)
2406 1.1 oster {
2407 1.9 oster int error;
2408 1.1 oster
2409 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2410 1.1 oster rs->sc_flags |= RAIDF_WANTED;
2411 1.9 oster if ((error =
2412 1.9 oster tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2413 1.1 oster return (error);
2414 1.1 oster }
2415 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
2416 1.1 oster return (0);
2417 1.1 oster }
2418 1.1 oster /*
2419 1.1 oster * Unlock and wake up any waiters.
2420 1.1 oster */
2421 1.1 oster static void
2422 1.169 oster raidunlock(struct raid_softc *rs)
2423 1.1 oster {
2424 1.1 oster
2425 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
2426 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2427 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
2428 1.1 oster wakeup(rs);
2429 1.1 oster }
2430 1.11 oster }
2431 1.186 perry
2432 1.11 oster
2433 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2434 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2435 1.245.4.6 yamt #define RF_PARITY_MAP_OFFSET \
2436 1.245.4.6 yamt (RF_COMPONENT_INFO_OFFSET + RF_COMPONENT_INFO_SIZE)
2437 1.245.4.6 yamt #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2438 1.11 oster
2439 1.186 perry int
2440 1.245.4.6 yamt raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2441 1.12 oster {
2442 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
2443 1.245.4.6 yamt
2444 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, col);
2445 1.245.4.6 yamt clabel->clean = RF_RAID_CLEAN;
2446 1.245.4.6 yamt raidflush_component_label(raidPtr, col);
2447 1.12 oster return(0);
2448 1.12 oster }
2449 1.12 oster
2450 1.12 oster
2451 1.186 perry int
2452 1.245.4.6 yamt raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2453 1.11 oster {
2454 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
2455 1.245.4.6 yamt
2456 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, col);
2457 1.245.4.6 yamt clabel->clean = RF_RAID_DIRTY;
2458 1.245.4.6 yamt raidflush_component_label(raidPtr, col);
2459 1.11 oster return(0);
2460 1.11 oster }
2461 1.11 oster
2462 1.11 oster int
2463 1.245.4.6 yamt raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2464 1.245.4.6 yamt {
2465 1.245.4.6 yamt return raidread_component_label(raidPtr->Disks[col].dev,
2466 1.245.4.6 yamt raidPtr->raid_cinfo[col].ci_vp,
2467 1.245.4.6 yamt &raidPtr->raid_cinfo[col].ci_label);
2468 1.245.4.6 yamt }
2469 1.245.4.6 yamt
2470 1.245.4.6 yamt RF_ComponentLabel_t *
2471 1.245.4.6 yamt raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2472 1.245.4.6 yamt {
2473 1.245.4.6 yamt return &raidPtr->raid_cinfo[col].ci_label;
2474 1.245.4.6 yamt }
2475 1.245.4.6 yamt
2476 1.245.4.6 yamt int
2477 1.245.4.6 yamt raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2478 1.245.4.6 yamt {
2479 1.245.4.6 yamt RF_ComponentLabel_t *label;
2480 1.245.4.6 yamt
2481 1.245.4.6 yamt label = &raidPtr->raid_cinfo[col].ci_label;
2482 1.245.4.6 yamt label->mod_counter = raidPtr->mod_counter;
2483 1.245.4.6 yamt #ifndef RF_NO_PARITY_MAP
2484 1.245.4.6 yamt label->parity_map_modcount = label->mod_counter;
2485 1.245.4.6 yamt #endif
2486 1.245.4.6 yamt return raidwrite_component_label(raidPtr->Disks[col].dev,
2487 1.245.4.6 yamt raidPtr->raid_cinfo[col].ci_vp, label);
2488 1.245.4.6 yamt }
2489 1.245.4.6 yamt
2490 1.245.4.6 yamt
2491 1.245.4.6 yamt static int
2492 1.186 perry raidread_component_label(dev_t dev, struct vnode *b_vp,
2493 1.245.4.6 yamt RF_ComponentLabel_t *clabel)
2494 1.245.4.6 yamt {
2495 1.245.4.6 yamt return raidread_component_area(dev, b_vp, clabel,
2496 1.245.4.6 yamt sizeof(RF_ComponentLabel_t),
2497 1.245.4.6 yamt RF_COMPONENT_INFO_OFFSET, RF_COMPONENT_INFO_SIZE);
2498 1.245.4.6 yamt }
2499 1.245.4.6 yamt
2500 1.245.4.6 yamt /* ARGSUSED */
2501 1.245.4.6 yamt static int
2502 1.245.4.6 yamt raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2503 1.245.4.6 yamt size_t msize, daddr_t offset, daddr_t dsize)
2504 1.11 oster {
2505 1.11 oster struct buf *bp;
2506 1.130 gehenna const struct bdevsw *bdev;
2507 1.11 oster int error;
2508 1.186 perry
2509 1.11 oster /* XXX should probably ensure that we don't try to do this if
2510 1.186 perry someone has changed rf_protected_sectors. */
2511 1.11 oster
2512 1.98 oster if (b_vp == NULL) {
2513 1.98 oster /* For whatever reason, this component is not valid.
2514 1.98 oster Don't try to read a component label from it. */
2515 1.98 oster return(EINVAL);
2516 1.98 oster }
2517 1.98 oster
2518 1.11 oster /* get a block of the appropriate size... */
2519 1.245.4.6 yamt bp = geteblk((int)dsize);
2520 1.11 oster bp->b_dev = dev;
2521 1.11 oster
2522 1.11 oster /* get our ducks in a row for the read */
2523 1.245.4.6 yamt bp->b_blkno = offset / DEV_BSIZE;
2524 1.245.4.6 yamt bp->b_bcount = dsize;
2525 1.100 chs bp->b_flags |= B_READ;
2526 1.245.4.6 yamt bp->b_resid = dsize;
2527 1.11 oster
2528 1.130 gehenna bdev = bdevsw_lookup(bp->b_dev);
2529 1.130 gehenna if (bdev == NULL)
2530 1.130 gehenna return (ENXIO);
2531 1.130 gehenna (*bdev->d_strategy)(bp);
2532 1.11 oster
2533 1.186 perry error = biowait(bp);
2534 1.11 oster
2535 1.11 oster if (!error) {
2536 1.245.4.6 yamt memcpy(data, bp->b_data, msize);
2537 1.204 simonb }
2538 1.11 oster
2539 1.233 ad brelse(bp, 0);
2540 1.11 oster return(error);
2541 1.11 oster }
2542 1.245.4.6 yamt
2543 1.245.4.6 yamt
2544 1.245.4.6 yamt static int
2545 1.222 christos raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2546 1.245.4.6 yamt RF_ComponentLabel_t *clabel)
2547 1.245.4.6 yamt {
2548 1.245.4.6 yamt return raidwrite_component_area(dev, b_vp, clabel,
2549 1.245.4.6 yamt sizeof(RF_ComponentLabel_t),
2550 1.245.4.6 yamt RF_COMPONENT_INFO_OFFSET, RF_COMPONENT_INFO_SIZE, 0);
2551 1.245.4.6 yamt }
2552 1.245.4.6 yamt
2553 1.245.4.6 yamt /* ARGSUSED */
2554 1.245.4.6 yamt static int
2555 1.245.4.6 yamt raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2556 1.245.4.6 yamt size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2557 1.11 oster {
2558 1.11 oster struct buf *bp;
2559 1.130 gehenna const struct bdevsw *bdev;
2560 1.11 oster int error;
2561 1.11 oster
2562 1.11 oster /* get a block of the appropriate size... */
2563 1.245.4.6 yamt bp = geteblk((int)dsize);
2564 1.11 oster bp->b_dev = dev;
2565 1.11 oster
2566 1.11 oster /* get our ducks in a row for the write */
2567 1.245.4.6 yamt bp->b_blkno = offset / DEV_BSIZE;
2568 1.245.4.6 yamt bp->b_bcount = dsize;
2569 1.245.4.6 yamt bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2570 1.245.4.6 yamt bp->b_resid = dsize;
2571 1.11 oster
2572 1.245.4.6 yamt memset(bp->b_data, 0, dsize);
2573 1.245.4.6 yamt memcpy(bp->b_data, data, msize);
2574 1.11 oster
2575 1.130 gehenna bdev = bdevsw_lookup(bp->b_dev);
2576 1.130 gehenna if (bdev == NULL)
2577 1.130 gehenna return (ENXIO);
2578 1.130 gehenna (*bdev->d_strategy)(bp);
2579 1.245.4.6 yamt if (asyncp)
2580 1.245.4.6 yamt return 0;
2581 1.186 perry error = biowait(bp);
2582 1.233 ad brelse(bp, 0);
2583 1.11 oster if (error) {
2584 1.48 oster #if 1
2585 1.11 oster printf("Failed to write RAID component info!\n");
2586 1.48 oster #endif
2587 1.11 oster }
2588 1.11 oster
2589 1.11 oster return(error);
2590 1.1 oster }
2591 1.12 oster
2592 1.186 perry void
2593 1.245.4.6 yamt rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2594 1.245.4.6 yamt {
2595 1.245.4.6 yamt int c;
2596 1.245.4.6 yamt
2597 1.245.4.6 yamt for (c = 0; c < raidPtr->numCol; c++) {
2598 1.245.4.6 yamt /* Skip dead disks. */
2599 1.245.4.6 yamt if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2600 1.245.4.6 yamt continue;
2601 1.245.4.6 yamt /* XXXjld: what if an error occurs here? */
2602 1.245.4.6 yamt raidwrite_component_area(raidPtr->Disks[c].dev,
2603 1.245.4.6 yamt raidPtr->raid_cinfo[c].ci_vp, map,
2604 1.245.4.6 yamt RF_PARITYMAP_NBYTE,
2605 1.245.4.6 yamt RF_PARITY_MAP_OFFSET, RF_PARITY_MAP_SIZE, 0);
2606 1.245.4.6 yamt }
2607 1.245.4.6 yamt }
2608 1.245.4.6 yamt
2609 1.245.4.6 yamt void
2610 1.245.4.6 yamt rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2611 1.245.4.6 yamt {
2612 1.245.4.6 yamt struct rf_paritymap_ondisk tmp;
2613 1.245.4.6 yamt int c,first;
2614 1.245.4.6 yamt
2615 1.245.4.6 yamt first=1;
2616 1.245.4.6 yamt for (c = 0; c < raidPtr->numCol; c++) {
2617 1.245.4.6 yamt /* Skip dead disks. */
2618 1.245.4.6 yamt if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2619 1.245.4.6 yamt continue;
2620 1.245.4.6 yamt raidread_component_area(raidPtr->Disks[c].dev,
2621 1.245.4.6 yamt raidPtr->raid_cinfo[c].ci_vp, &tmp,
2622 1.245.4.6 yamt RF_PARITYMAP_NBYTE,
2623 1.245.4.6 yamt RF_PARITY_MAP_OFFSET, RF_PARITY_MAP_SIZE);
2624 1.245.4.6 yamt if (first) {
2625 1.245.4.6 yamt memcpy(map, &tmp, sizeof(*map));
2626 1.245.4.6 yamt first = 0;
2627 1.245.4.6 yamt } else {
2628 1.245.4.6 yamt rf_paritymap_merge(map, &tmp);
2629 1.245.4.6 yamt }
2630 1.245.4.6 yamt }
2631 1.245.4.6 yamt }
2632 1.245.4.6 yamt
2633 1.245.4.6 yamt void
2634 1.169 oster rf_markalldirty(RF_Raid_t *raidPtr)
2635 1.12 oster {
2636 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
2637 1.146 oster int sparecol;
2638 1.166 oster int c;
2639 1.166 oster int j;
2640 1.166 oster int scol = -1;
2641 1.12 oster
2642 1.12 oster raidPtr->mod_counter++;
2643 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2644 1.166 oster /* we don't want to touch (at all) a disk that has
2645 1.166 oster failed */
2646 1.166 oster if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2647 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, c);
2648 1.245.4.6 yamt if (clabel->status == rf_ds_spared) {
2649 1.186 perry /* XXX do something special...
2650 1.186 perry but whatever you do, don't
2651 1.166 oster try to access it!! */
2652 1.166 oster } else {
2653 1.245.4.6 yamt raidmarkdirty(raidPtr, c);
2654 1.12 oster }
2655 1.166 oster }
2656 1.186 perry }
2657 1.146 oster
2658 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2659 1.12 oster sparecol = raidPtr->numCol + c;
2660 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2661 1.186 perry /*
2662 1.186 perry
2663 1.186 perry we claim this disk is "optimal" if it's
2664 1.186 perry rf_ds_used_spare, as that means it should be
2665 1.186 perry directly substitutable for the disk it replaced.
2666 1.12 oster We note that too...
2667 1.12 oster
2668 1.12 oster */
2669 1.12 oster
2670 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2671 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2672 1.166 oster scol = j;
2673 1.166 oster break;
2674 1.12 oster }
2675 1.12 oster }
2676 1.186 perry
2677 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, sparecol);
2678 1.12 oster /* make sure status is noted */
2679 1.146 oster
2680 1.245.4.6 yamt raid_init_component_label(raidPtr, clabel);
2681 1.146 oster
2682 1.245.4.6 yamt clabel->row = 0;
2683 1.245.4.6 yamt clabel->column = scol;
2684 1.146 oster /* Note: we *don't* change status from rf_ds_used_spare
2685 1.146 oster to rf_ds_optimal */
2686 1.146 oster /* clabel.status = rf_ds_optimal; */
2687 1.186 perry
2688 1.245.4.6 yamt raidmarkdirty(raidPtr, sparecol);
2689 1.12 oster }
2690 1.12 oster }
2691 1.12 oster }
2692 1.12 oster
2693 1.13 oster
2694 1.13 oster void
2695 1.169 oster rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2696 1.13 oster {
2697 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
2698 1.13 oster int sparecol;
2699 1.166 oster int c;
2700 1.166 oster int j;
2701 1.166 oster int scol;
2702 1.13 oster
2703 1.13 oster scol = -1;
2704 1.13 oster
2705 1.186 perry /* XXX should do extra checks to make sure things really are clean,
2706 1.13 oster rather than blindly setting the clean bit... */
2707 1.13 oster
2708 1.13 oster raidPtr->mod_counter++;
2709 1.13 oster
2710 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2711 1.166 oster if (raidPtr->Disks[c].status == rf_ds_optimal) {
2712 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, c);
2713 1.201 oster /* make sure status is noted */
2714 1.245.4.6 yamt clabel->status = rf_ds_optimal;
2715 1.201 oster
2716 1.214 oster /* note what unit we are configured as */
2717 1.245.4.6 yamt clabel->last_unit = raidPtr->raidid;
2718 1.214 oster
2719 1.245.4.6 yamt raidflush_component_label(raidPtr, c);
2720 1.166 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2721 1.166 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2722 1.245.4.6 yamt raidmarkclean(raidPtr, c);
2723 1.91 oster }
2724 1.166 oster }
2725 1.186 perry }
2726 1.166 oster /* else we don't touch it.. */
2727 1.186 perry }
2728 1.63 oster
2729 1.63 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2730 1.63 oster sparecol = raidPtr->numCol + c;
2731 1.110 oster /* Need to ensure that the reconstruct actually completed! */
2732 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2733 1.186 perry /*
2734 1.186 perry
2735 1.186 perry we claim this disk is "optimal" if it's
2736 1.186 perry rf_ds_used_spare, as that means it should be
2737 1.186 perry directly substitutable for the disk it replaced.
2738 1.63 oster We note that too...
2739 1.63 oster
2740 1.63 oster */
2741 1.63 oster
2742 1.166 oster for(j=0;j<raidPtr->numCol;j++) {
2743 1.166 oster if (raidPtr->Disks[j].spareCol == sparecol) {
2744 1.166 oster scol = j;
2745 1.166 oster break;
2746 1.63 oster }
2747 1.63 oster }
2748 1.186 perry
2749 1.63 oster /* XXX shouldn't *really* need this... */
2750 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, sparecol);
2751 1.63 oster /* make sure status is noted */
2752 1.63 oster
2753 1.245.4.6 yamt raid_init_component_label(raidPtr, clabel);
2754 1.245.4.6 yamt
2755 1.245.4.6 yamt clabel->column = scol;
2756 1.245.4.6 yamt clabel->status = rf_ds_optimal;
2757 1.245.4.6 yamt clabel->last_unit = raidPtr->raidid;
2758 1.63 oster
2759 1.245.4.6 yamt raidflush_component_label(raidPtr, sparecol);
2760 1.91 oster if (final == RF_FINAL_COMPONENT_UPDATE) {
2761 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2762 1.245.4.6 yamt raidmarkclean(raidPtr, sparecol);
2763 1.13 oster }
2764 1.13 oster }
2765 1.13 oster }
2766 1.13 oster }
2767 1.68 oster }
2768 1.68 oster
2769 1.68 oster void
2770 1.169 oster rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2771 1.69 oster {
2772 1.69 oster
2773 1.69 oster if (vp != NULL) {
2774 1.69 oster if (auto_configured == 1) {
2775 1.96 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2776 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2777 1.69 oster vput(vp);
2778 1.186 perry
2779 1.186 perry } else {
2780 1.244 ad (void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2781 1.69 oster }
2782 1.186 perry }
2783 1.69 oster }
2784 1.69 oster
2785 1.69 oster
2786 1.69 oster void
2787 1.169 oster rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2788 1.68 oster {
2789 1.186 perry int r,c;
2790 1.69 oster struct vnode *vp;
2791 1.69 oster int acd;
2792 1.68 oster
2793 1.68 oster
2794 1.68 oster /* We take this opportunity to close the vnodes like we should.. */
2795 1.68 oster
2796 1.166 oster for (c = 0; c < raidPtr->numCol; c++) {
2797 1.166 oster vp = raidPtr->raid_cinfo[c].ci_vp;
2798 1.166 oster acd = raidPtr->Disks[c].auto_configured;
2799 1.166 oster rf_close_component(raidPtr, vp, acd);
2800 1.166 oster raidPtr->raid_cinfo[c].ci_vp = NULL;
2801 1.166 oster raidPtr->Disks[c].auto_configured = 0;
2802 1.68 oster }
2803 1.166 oster
2804 1.68 oster for (r = 0; r < raidPtr->numSpare; r++) {
2805 1.166 oster vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2806 1.166 oster acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2807 1.69 oster rf_close_component(raidPtr, vp, acd);
2808 1.166 oster raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2809 1.166 oster raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2810 1.68 oster }
2811 1.37 oster }
2812 1.63 oster
2813 1.37 oster
2814 1.186 perry void
2815 1.169 oster rf_ReconThread(struct rf_recon_req *req)
2816 1.37 oster {
2817 1.37 oster int s;
2818 1.37 oster RF_Raid_t *raidPtr;
2819 1.37 oster
2820 1.37 oster s = splbio();
2821 1.37 oster raidPtr = (RF_Raid_t *) req->raidPtr;
2822 1.37 oster raidPtr->recon_in_progress = 1;
2823 1.37 oster
2824 1.166 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2825 1.37 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2826 1.37 oster
2827 1.37 oster RF_Free(req, sizeof(*req));
2828 1.37 oster
2829 1.37 oster raidPtr->recon_in_progress = 0;
2830 1.37 oster splx(s);
2831 1.37 oster
2832 1.37 oster /* That's all... */
2833 1.204 simonb kthread_exit(0); /* does not return */
2834 1.37 oster }
2835 1.37 oster
2836 1.37 oster void
2837 1.169 oster rf_RewriteParityThread(RF_Raid_t *raidPtr)
2838 1.37 oster {
2839 1.37 oster int retcode;
2840 1.37 oster int s;
2841 1.37 oster
2842 1.184 oster raidPtr->parity_rewrite_stripes_done = 0;
2843 1.37 oster raidPtr->parity_rewrite_in_progress = 1;
2844 1.37 oster s = splbio();
2845 1.37 oster retcode = rf_RewriteParity(raidPtr);
2846 1.37 oster splx(s);
2847 1.37 oster if (retcode) {
2848 1.37 oster printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2849 1.37 oster } else {
2850 1.37 oster /* set the clean bit! If we shutdown correctly,
2851 1.37 oster the clean bit on each component label will get
2852 1.37 oster set */
2853 1.37 oster raidPtr->parity_good = RF_RAID_CLEAN;
2854 1.37 oster }
2855 1.37 oster raidPtr->parity_rewrite_in_progress = 0;
2856 1.85 oster
2857 1.85 oster /* Anyone waiting for us to stop? If so, inform them... */
2858 1.85 oster if (raidPtr->waitShutdown) {
2859 1.85 oster wakeup(&raidPtr->parity_rewrite_in_progress);
2860 1.85 oster }
2861 1.37 oster
2862 1.37 oster /* That's all... */
2863 1.204 simonb kthread_exit(0); /* does not return */
2864 1.37 oster }
2865 1.37 oster
2866 1.37 oster
2867 1.37 oster void
2868 1.169 oster rf_CopybackThread(RF_Raid_t *raidPtr)
2869 1.37 oster {
2870 1.37 oster int s;
2871 1.37 oster
2872 1.37 oster raidPtr->copyback_in_progress = 1;
2873 1.37 oster s = splbio();
2874 1.37 oster rf_CopybackReconstructedData(raidPtr);
2875 1.37 oster splx(s);
2876 1.37 oster raidPtr->copyback_in_progress = 0;
2877 1.37 oster
2878 1.37 oster /* That's all... */
2879 1.204 simonb kthread_exit(0); /* does not return */
2880 1.37 oster }
2881 1.37 oster
2882 1.37 oster
2883 1.37 oster void
2884 1.169 oster rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2885 1.37 oster {
2886 1.37 oster int s;
2887 1.37 oster RF_Raid_t *raidPtr;
2888 1.186 perry
2889 1.37 oster s = splbio();
2890 1.37 oster raidPtr = req->raidPtr;
2891 1.37 oster raidPtr->recon_in_progress = 1;
2892 1.166 oster rf_ReconstructInPlace(raidPtr, req->col);
2893 1.37 oster RF_Free(req, sizeof(*req));
2894 1.37 oster raidPtr->recon_in_progress = 0;
2895 1.37 oster splx(s);
2896 1.37 oster
2897 1.37 oster /* That's all... */
2898 1.204 simonb kthread_exit(0); /* does not return */
2899 1.48 oster }
2900 1.48 oster
2901 1.213 christos static RF_AutoConfig_t *
2902 1.213 christos rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2903 1.213 christos const char *cname, RF_SectorCount_t size)
2904 1.213 christos {
2905 1.213 christos int good_one = 0;
2906 1.213 christos RF_ComponentLabel_t *clabel;
2907 1.213 christos RF_AutoConfig_t *ac;
2908 1.213 christos
2909 1.213 christos clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2910 1.213 christos if (clabel == NULL) {
2911 1.213 christos oomem:
2912 1.213 christos while(ac_list) {
2913 1.213 christos ac = ac_list;
2914 1.213 christos if (ac->clabel)
2915 1.213 christos free(ac->clabel, M_RAIDFRAME);
2916 1.213 christos ac_list = ac_list->next;
2917 1.213 christos free(ac, M_RAIDFRAME);
2918 1.213 christos }
2919 1.213 christos printf("RAID auto config: out of memory!\n");
2920 1.213 christos return NULL; /* XXX probably should panic? */
2921 1.213 christos }
2922 1.213 christos
2923 1.213 christos if (!raidread_component_label(dev, vp, clabel)) {
2924 1.213 christos /* Got the label. Does it look reasonable? */
2925 1.213 christos if (rf_reasonable_label(clabel) &&
2926 1.213 christos (clabel->partitionSize <= size)) {
2927 1.224 oster #ifdef DEBUG
2928 1.213 christos printf("Component on: %s: %llu\n",
2929 1.213 christos cname, (unsigned long long)size);
2930 1.213 christos rf_print_component_label(clabel);
2931 1.213 christos #endif
2932 1.213 christos /* if it's reasonable, add it, else ignore it. */
2933 1.213 christos ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2934 1.213 christos M_NOWAIT);
2935 1.213 christos if (ac == NULL) {
2936 1.213 christos free(clabel, M_RAIDFRAME);
2937 1.213 christos goto oomem;
2938 1.213 christos }
2939 1.213 christos strlcpy(ac->devname, cname, sizeof(ac->devname));
2940 1.213 christos ac->dev = dev;
2941 1.213 christos ac->vp = vp;
2942 1.213 christos ac->clabel = clabel;
2943 1.213 christos ac->next = ac_list;
2944 1.213 christos ac_list = ac;
2945 1.213 christos good_one = 1;
2946 1.213 christos }
2947 1.213 christos }
2948 1.213 christos if (!good_one) {
2949 1.213 christos /* cleanup */
2950 1.213 christos free(clabel, M_RAIDFRAME);
2951 1.213 christos vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2952 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2953 1.213 christos vput(vp);
2954 1.213 christos }
2955 1.213 christos return ac_list;
2956 1.213 christos }
2957 1.213 christos
2958 1.48 oster RF_AutoConfig_t *
2959 1.245.4.2 yamt rf_find_raid_components(void)
2960 1.48 oster {
2961 1.48 oster struct vnode *vp;
2962 1.48 oster struct disklabel label;
2963 1.245.4.2 yamt device_t dv;
2964 1.245.4.6 yamt deviter_t di;
2965 1.48 oster dev_t dev;
2966 1.213 christos int bmajor, bminor, wedge;
2967 1.48 oster int error;
2968 1.48 oster int i;
2969 1.48 oster RF_AutoConfig_t *ac_list;
2970 1.48 oster
2971 1.48 oster
2972 1.48 oster /* initialize the AutoConfig list */
2973 1.48 oster ac_list = NULL;
2974 1.48 oster
2975 1.48 oster /* we begin by trolling through *all* the devices on the system */
2976 1.48 oster
2977 1.245.4.6 yamt for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
2978 1.245.4.6 yamt dv = deviter_next(&di)) {
2979 1.48 oster
2980 1.48 oster /* we are only interested in disks... */
2981 1.200 thorpej if (device_class(dv) != DV_DISK)
2982 1.48 oster continue;
2983 1.48 oster
2984 1.48 oster /* we don't care about floppies... */
2985 1.206 thorpej if (device_is_a(dv, "fd")) {
2986 1.119 leo continue;
2987 1.119 leo }
2988 1.129 oster
2989 1.129 oster /* we don't care about CD's... */
2990 1.206 thorpej if (device_is_a(dv, "cd")) {
2991 1.129 oster continue;
2992 1.129 oster }
2993 1.129 oster
2994 1.245.4.2 yamt /* we don't care about md's... */
2995 1.245.4.2 yamt if (device_is_a(dv, "md")) {
2996 1.245.4.2 yamt continue;
2997 1.245.4.2 yamt }
2998 1.245.4.2 yamt
2999 1.120 leo /* hdfd is the Atari/Hades floppy driver */
3000 1.206 thorpej if (device_is_a(dv, "hdfd")) {
3001 1.121 leo continue;
3002 1.121 leo }
3003 1.206 thorpej
3004 1.121 leo /* fdisa is the Atari/Milan floppy driver */
3005 1.206 thorpej if (device_is_a(dv, "fdisa")) {
3006 1.48 oster continue;
3007 1.48 oster }
3008 1.186 perry
3009 1.48 oster /* need to find the device_name_to_block_device_major stuff */
3010 1.245 cegger bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
3011 1.48 oster
3012 1.48 oster /* get a vnode for the raw partition of this disk */
3013 1.48 oster
3014 1.213 christos wedge = device_is_a(dv, "dk");
3015 1.213 christos bminor = minor(device_unit(dv));
3016 1.213 christos dev = wedge ? makedev(bmajor, bminor) :
3017 1.213 christos MAKEDISKDEV(bmajor, bminor, RAW_PART);
3018 1.48 oster if (bdevvp(dev, &vp))
3019 1.48 oster panic("RAID can't alloc vnode");
3020 1.48 oster
3021 1.238 pooka error = VOP_OPEN(vp, FREAD, NOCRED);
3022 1.48 oster
3023 1.48 oster if (error) {
3024 1.186 perry /* "Who cares." Continue looking
3025 1.48 oster for something that exists*/
3026 1.48 oster vput(vp);
3027 1.48 oster continue;
3028 1.48 oster }
3029 1.48 oster
3030 1.213 christos if (wedge) {
3031 1.213 christos struct dkwedge_info dkw;
3032 1.213 christos error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
3033 1.238 pooka NOCRED);
3034 1.213 christos if (error) {
3035 1.213 christos printf("RAIDframe: can't get wedge info for "
3036 1.245 cegger "dev %s (%d)\n", device_xname(dv), error);
3037 1.241 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3038 1.241 oster VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3039 1.241 oster vput(vp);
3040 1.213 christos continue;
3041 1.213 christos }
3042 1.213 christos
3043 1.241 oster if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
3044 1.241 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3045 1.241 oster VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3046 1.241 oster vput(vp);
3047 1.228 christos continue;
3048 1.241 oster }
3049 1.213 christos
3050 1.213 christos ac_list = rf_get_component(ac_list, dev, vp,
3051 1.245 cegger device_xname(dv), dkw.dkw_size);
3052 1.213 christos continue;
3053 1.213 christos }
3054 1.213 christos
3055 1.48 oster /* Ok, the disk exists. Go get the disklabel. */
3056 1.238 pooka error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
3057 1.48 oster if (error) {
3058 1.48 oster /*
3059 1.48 oster * XXX can't happen - open() would
3060 1.48 oster * have errored out (or faked up one)
3061 1.48 oster */
3062 1.181 thorpej if (error != ENOTTY)
3063 1.181 thorpej printf("RAIDframe: can't get label for dev "
3064 1.245 cegger "%s (%d)\n", device_xname(dv), error);
3065 1.48 oster }
3066 1.48 oster
3067 1.48 oster /* don't need this any more. We'll allocate it again
3068 1.48 oster a little later if we really do... */
3069 1.96 oster vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3070 1.238 pooka VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3071 1.48 oster vput(vp);
3072 1.48 oster
3073 1.181 thorpej if (error)
3074 1.181 thorpej continue;
3075 1.181 thorpej
3076 1.213 christos for (i = 0; i < label.d_npartitions; i++) {
3077 1.213 christos char cname[sizeof(ac_list->devname)];
3078 1.213 christos
3079 1.48 oster /* We only support partitions marked as RAID */
3080 1.48 oster if (label.d_partitions[i].p_fstype != FS_RAID)
3081 1.48 oster continue;
3082 1.48 oster
3083 1.206 thorpej dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
3084 1.48 oster if (bdevvp(dev, &vp))
3085 1.48 oster panic("RAID can't alloc vnode");
3086 1.48 oster
3087 1.238 pooka error = VOP_OPEN(vp, FREAD, NOCRED);
3088 1.48 oster if (error) {
3089 1.48 oster /* Whatever... */
3090 1.48 oster vput(vp);
3091 1.48 oster continue;
3092 1.48 oster }
3093 1.213 christos snprintf(cname, sizeof(cname), "%s%c",
3094 1.245 cegger device_xname(dv), 'a' + i);
3095 1.213 christos ac_list = rf_get_component(ac_list, dev, vp, cname,
3096 1.213 christos label.d_partitions[i].p_size);
3097 1.48 oster }
3098 1.48 oster }
3099 1.245.4.6 yamt deviter_release(&di);
3100 1.213 christos return ac_list;
3101 1.48 oster }
3102 1.186 perry
3103 1.213 christos
3104 1.48 oster static int
3105 1.169 oster rf_reasonable_label(RF_ComponentLabel_t *clabel)
3106 1.48 oster {
3107 1.186 perry
3108 1.48 oster if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3109 1.48 oster (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3110 1.48 oster ((clabel->clean == RF_RAID_CLEAN) ||
3111 1.48 oster (clabel->clean == RF_RAID_DIRTY)) &&
3112 1.186 perry clabel->row >=0 &&
3113 1.186 perry clabel->column >= 0 &&
3114 1.48 oster clabel->num_rows > 0 &&
3115 1.48 oster clabel->num_columns > 0 &&
3116 1.186 perry clabel->row < clabel->num_rows &&
3117 1.48 oster clabel->column < clabel->num_columns &&
3118 1.48 oster clabel->blockSize > 0 &&
3119 1.48 oster clabel->numBlocks > 0) {
3120 1.48 oster /* label looks reasonable enough... */
3121 1.48 oster return(1);
3122 1.48 oster }
3123 1.48 oster return(0);
3124 1.48 oster }
3125 1.48 oster
3126 1.48 oster
3127 1.224 oster #ifdef DEBUG
3128 1.48 oster void
3129 1.169 oster rf_print_component_label(RF_ComponentLabel_t *clabel)
3130 1.48 oster {
3131 1.48 oster printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3132 1.186 perry clabel->row, clabel->column,
3133 1.48 oster clabel->num_rows, clabel->num_columns);
3134 1.48 oster printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3135 1.48 oster clabel->version, clabel->serial_number,
3136 1.48 oster clabel->mod_counter);
3137 1.48 oster printf(" Clean: %s Status: %d\n",
3138 1.245.4.6 yamt clabel->clean ? "Yes" : "No", clabel->status);
3139 1.48 oster printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3140 1.48 oster clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3141 1.245.4.6 yamt printf(" RAID Level: %c blocksize: %d numBlocks: %u\n",
3142 1.186 perry (char) clabel->parityConfig, clabel->blockSize,
3143 1.48 oster clabel->numBlocks);
3144 1.245.4.6 yamt printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3145 1.186 perry printf(" Contains root partition: %s\n",
3146 1.245.4.6 yamt clabel->root_partition ? "Yes" : "No");
3147 1.245.4.6 yamt printf(" Last configured as: raid%d\n", clabel->last_unit);
3148 1.51 oster #if 0
3149 1.51 oster printf(" Config order: %d\n", clabel->config_order);
3150 1.51 oster #endif
3151 1.186 perry
3152 1.48 oster }
3153 1.133 oster #endif
3154 1.48 oster
3155 1.48 oster RF_ConfigSet_t *
3156 1.169 oster rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3157 1.48 oster {
3158 1.48 oster RF_AutoConfig_t *ac;
3159 1.48 oster RF_ConfigSet_t *config_sets;
3160 1.48 oster RF_ConfigSet_t *cset;
3161 1.48 oster RF_AutoConfig_t *ac_next;
3162 1.48 oster
3163 1.48 oster
3164 1.48 oster config_sets = NULL;
3165 1.48 oster
3166 1.48 oster /* Go through the AutoConfig list, and figure out which components
3167 1.48 oster belong to what sets. */
3168 1.48 oster ac = ac_list;
3169 1.48 oster while(ac!=NULL) {
3170 1.48 oster /* we're going to putz with ac->next, so save it here
3171 1.48 oster for use at the end of the loop */
3172 1.48 oster ac_next = ac->next;
3173 1.48 oster
3174 1.48 oster if (config_sets == NULL) {
3175 1.48 oster /* will need at least this one... */
3176 1.48 oster config_sets = (RF_ConfigSet_t *)
3177 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3178 1.48 oster M_RAIDFRAME, M_NOWAIT);
3179 1.48 oster if (config_sets == NULL) {
3180 1.141 provos panic("rf_create_auto_sets: No memory!");
3181 1.48 oster }
3182 1.48 oster /* this one is easy :) */
3183 1.48 oster config_sets->ac = ac;
3184 1.48 oster config_sets->next = NULL;
3185 1.51 oster config_sets->rootable = 0;
3186 1.48 oster ac->next = NULL;
3187 1.48 oster } else {
3188 1.48 oster /* which set does this component fit into? */
3189 1.48 oster cset = config_sets;
3190 1.48 oster while(cset!=NULL) {
3191 1.49 oster if (rf_does_it_fit(cset, ac)) {
3192 1.86 oster /* looks like it matches... */
3193 1.86 oster ac->next = cset->ac;
3194 1.86 oster cset->ac = ac;
3195 1.48 oster break;
3196 1.48 oster }
3197 1.48 oster cset = cset->next;
3198 1.48 oster }
3199 1.48 oster if (cset==NULL) {
3200 1.48 oster /* didn't find a match above... new set..*/
3201 1.48 oster cset = (RF_ConfigSet_t *)
3202 1.186 perry malloc(sizeof(RF_ConfigSet_t),
3203 1.48 oster M_RAIDFRAME, M_NOWAIT);
3204 1.48 oster if (cset == NULL) {
3205 1.141 provos panic("rf_create_auto_sets: No memory!");
3206 1.48 oster }
3207 1.48 oster cset->ac = ac;
3208 1.48 oster ac->next = NULL;
3209 1.48 oster cset->next = config_sets;
3210 1.51 oster cset->rootable = 0;
3211 1.48 oster config_sets = cset;
3212 1.48 oster }
3213 1.48 oster }
3214 1.48 oster ac = ac_next;
3215 1.48 oster }
3216 1.48 oster
3217 1.48 oster
3218 1.48 oster return(config_sets);
3219 1.48 oster }
3220 1.48 oster
3221 1.48 oster static int
3222 1.169 oster rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3223 1.48 oster {
3224 1.48 oster RF_ComponentLabel_t *clabel1, *clabel2;
3225 1.48 oster
3226 1.48 oster /* If this one matches the *first* one in the set, that's good
3227 1.48 oster enough, since the other members of the set would have been
3228 1.48 oster through here too... */
3229 1.60 oster /* note that we are not checking partitionSize here..
3230 1.60 oster
3231 1.60 oster Note that we are also not checking the mod_counters here.
3232 1.186 perry If everything else matches execpt the mod_counter, that's
3233 1.60 oster good enough for this test. We will deal with the mod_counters
3234 1.186 perry a little later in the autoconfiguration process.
3235 1.60 oster
3236 1.60 oster (clabel1->mod_counter == clabel2->mod_counter) &&
3237 1.81 oster
3238 1.81 oster The reason we don't check for this is that failed disks
3239 1.81 oster will have lower modification counts. If those disks are
3240 1.81 oster not added to the set they used to belong to, then they will
3241 1.81 oster form their own set, which may result in 2 different sets,
3242 1.81 oster for example, competing to be configured at raid0, and
3243 1.81 oster perhaps competing to be the root filesystem set. If the
3244 1.81 oster wrong ones get configured, or both attempt to become /,
3245 1.81 oster weird behaviour and or serious lossage will occur. Thus we
3246 1.81 oster need to bring them into the fold here, and kick them out at
3247 1.81 oster a later point.
3248 1.60 oster
3249 1.60 oster */
3250 1.48 oster
3251 1.48 oster clabel1 = cset->ac->clabel;
3252 1.48 oster clabel2 = ac->clabel;
3253 1.48 oster if ((clabel1->version == clabel2->version) &&
3254 1.48 oster (clabel1->serial_number == clabel2->serial_number) &&
3255 1.48 oster (clabel1->num_rows == clabel2->num_rows) &&
3256 1.48 oster (clabel1->num_columns == clabel2->num_columns) &&
3257 1.48 oster (clabel1->sectPerSU == clabel2->sectPerSU) &&
3258 1.48 oster (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3259 1.48 oster (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3260 1.48 oster (clabel1->parityConfig == clabel2->parityConfig) &&
3261 1.48 oster (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3262 1.48 oster (clabel1->blockSize == clabel2->blockSize) &&
3263 1.48 oster (clabel1->numBlocks == clabel2->numBlocks) &&
3264 1.48 oster (clabel1->autoconfigure == clabel2->autoconfigure) &&
3265 1.48 oster (clabel1->root_partition == clabel2->root_partition) &&
3266 1.48 oster (clabel1->last_unit == clabel2->last_unit) &&
3267 1.48 oster (clabel1->config_order == clabel2->config_order)) {
3268 1.48 oster /* if it get's here, it almost *has* to be a match */
3269 1.48 oster } else {
3270 1.186 perry /* it's not consistent with somebody in the set..
3271 1.48 oster punt */
3272 1.48 oster return(0);
3273 1.48 oster }
3274 1.48 oster /* all was fine.. it must fit... */
3275 1.48 oster return(1);
3276 1.48 oster }
3277 1.48 oster
3278 1.48 oster int
3279 1.169 oster rf_have_enough_components(RF_ConfigSet_t *cset)
3280 1.48 oster {
3281 1.51 oster RF_AutoConfig_t *ac;
3282 1.51 oster RF_AutoConfig_t *auto_config;
3283 1.51 oster RF_ComponentLabel_t *clabel;
3284 1.166 oster int c;
3285 1.51 oster int num_cols;
3286 1.51 oster int num_missing;
3287 1.86 oster int mod_counter;
3288 1.87 oster int mod_counter_found;
3289 1.88 oster int even_pair_failed;
3290 1.88 oster char parity_type;
3291 1.186 perry
3292 1.51 oster
3293 1.48 oster /* check to see that we have enough 'live' components
3294 1.48 oster of this set. If so, we can configure it if necessary */
3295 1.48 oster
3296 1.51 oster num_cols = cset->ac->clabel->num_columns;
3297 1.88 oster parity_type = cset->ac->clabel->parityConfig;
3298 1.51 oster
3299 1.51 oster /* XXX Check for duplicate components!?!?!? */
3300 1.51 oster
3301 1.86 oster /* Determine what the mod_counter is supposed to be for this set. */
3302 1.86 oster
3303 1.87 oster mod_counter_found = 0;
3304 1.101 oster mod_counter = 0;
3305 1.86 oster ac = cset->ac;
3306 1.86 oster while(ac!=NULL) {
3307 1.87 oster if (mod_counter_found==0) {
3308 1.86 oster mod_counter = ac->clabel->mod_counter;
3309 1.87 oster mod_counter_found = 1;
3310 1.87 oster } else {
3311 1.87 oster if (ac->clabel->mod_counter > mod_counter) {
3312 1.87 oster mod_counter = ac->clabel->mod_counter;
3313 1.87 oster }
3314 1.86 oster }
3315 1.86 oster ac = ac->next;
3316 1.86 oster }
3317 1.86 oster
3318 1.51 oster num_missing = 0;
3319 1.51 oster auto_config = cset->ac;
3320 1.51 oster
3321 1.166 oster even_pair_failed = 0;
3322 1.166 oster for(c=0; c<num_cols; c++) {
3323 1.166 oster ac = auto_config;
3324 1.166 oster while(ac!=NULL) {
3325 1.186 perry if ((ac->clabel->column == c) &&
3326 1.166 oster (ac->clabel->mod_counter == mod_counter)) {
3327 1.166 oster /* it's this one... */
3328 1.224 oster #ifdef DEBUG
3329 1.166 oster printf("Found: %s at %d\n",
3330 1.166 oster ac->devname,c);
3331 1.51 oster #endif
3332 1.166 oster break;
3333 1.51 oster }
3334 1.166 oster ac=ac->next;
3335 1.166 oster }
3336 1.166 oster if (ac==NULL) {
3337 1.51 oster /* Didn't find one here! */
3338 1.88 oster /* special case for RAID 1, especially
3339 1.88 oster where there are more than 2
3340 1.88 oster components (where RAIDframe treats
3341 1.88 oster things a little differently :( ) */
3342 1.166 oster if (parity_type == '1') {
3343 1.166 oster if (c%2 == 0) { /* even component */
3344 1.166 oster even_pair_failed = 1;
3345 1.166 oster } else { /* odd component. If
3346 1.166 oster we're failed, and
3347 1.166 oster so is the even
3348 1.166 oster component, it's
3349 1.166 oster "Good Night, Charlie" */
3350 1.166 oster if (even_pair_failed == 1) {
3351 1.166 oster return(0);
3352 1.88 oster }
3353 1.88 oster }
3354 1.166 oster } else {
3355 1.166 oster /* normal accounting */
3356 1.166 oster num_missing++;
3357 1.88 oster }
3358 1.166 oster }
3359 1.166 oster if ((parity_type == '1') && (c%2 == 1)) {
3360 1.88 oster /* Just did an even component, and we didn't
3361 1.186 perry bail.. reset the even_pair_failed flag,
3362 1.88 oster and go on to the next component.... */
3363 1.166 oster even_pair_failed = 0;
3364 1.51 oster }
3365 1.51 oster }
3366 1.51 oster
3367 1.51 oster clabel = cset->ac->clabel;
3368 1.51 oster
3369 1.51 oster if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3370 1.51 oster ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3371 1.51 oster ((clabel->parityConfig == '5') && (num_missing > 1))) {
3372 1.51 oster /* XXX this needs to be made *much* more general */
3373 1.51 oster /* Too many failures */
3374 1.51 oster return(0);
3375 1.51 oster }
3376 1.51 oster /* otherwise, all is well, and we've got enough to take a kick
3377 1.51 oster at autoconfiguring this set */
3378 1.51 oster return(1);
3379 1.48 oster }
3380 1.48 oster
3381 1.48 oster void
3382 1.169 oster rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3383 1.222 christos RF_Raid_t *raidPtr)
3384 1.48 oster {
3385 1.48 oster RF_ComponentLabel_t *clabel;
3386 1.77 oster int i;
3387 1.48 oster
3388 1.48 oster clabel = ac->clabel;
3389 1.48 oster
3390 1.48 oster /* 1. Fill in the common stuff */
3391 1.166 oster config->numRow = clabel->num_rows = 1;
3392 1.48 oster config->numCol = clabel->num_columns;
3393 1.48 oster config->numSpare = 0; /* XXX should this be set here? */
3394 1.48 oster config->sectPerSU = clabel->sectPerSU;
3395 1.48 oster config->SUsPerPU = clabel->SUsPerPU;
3396 1.48 oster config->SUsPerRU = clabel->SUsPerRU;
3397 1.48 oster config->parityConfig = clabel->parityConfig;
3398 1.48 oster /* XXX... */
3399 1.48 oster strcpy(config->diskQueueType,"fifo");
3400 1.48 oster config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3401 1.48 oster config->layoutSpecificSize = 0; /* XXX ?? */
3402 1.48 oster
3403 1.48 oster while(ac!=NULL) {
3404 1.48 oster /* row/col values will be in range due to the checks
3405 1.48 oster in reasonable_label() */
3406 1.166 oster strcpy(config->devnames[0][ac->clabel->column],
3407 1.48 oster ac->devname);
3408 1.48 oster ac = ac->next;
3409 1.48 oster }
3410 1.48 oster
3411 1.77 oster for(i=0;i<RF_MAXDBGV;i++) {
3412 1.163 fvdl config->debugVars[i][0] = 0;
3413 1.77 oster }
3414 1.48 oster }
3415 1.48 oster
3416 1.48 oster int
3417 1.169 oster rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3418 1.48 oster {
3419 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
3420 1.166 oster int column;
3421 1.148 oster int sparecol;
3422 1.48 oster
3423 1.54 oster raidPtr->autoconfigure = new_value;
3424 1.166 oster
3425 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3426 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3427 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, column);
3428 1.245.4.6 yamt clabel->autoconfigure = new_value;
3429 1.245.4.6 yamt raidflush_component_label(raidPtr, column);
3430 1.48 oster }
3431 1.48 oster }
3432 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3433 1.148 oster sparecol = raidPtr->numCol + column;
3434 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3435 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, sparecol);
3436 1.245.4.6 yamt clabel->autoconfigure = new_value;
3437 1.245.4.6 yamt raidflush_component_label(raidPtr, sparecol);
3438 1.148 oster }
3439 1.148 oster }
3440 1.48 oster return(new_value);
3441 1.48 oster }
3442 1.48 oster
3443 1.48 oster int
3444 1.169 oster rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3445 1.48 oster {
3446 1.245.4.6 yamt RF_ComponentLabel_t *clabel;
3447 1.166 oster int column;
3448 1.148 oster int sparecol;
3449 1.48 oster
3450 1.54 oster raidPtr->root_partition = new_value;
3451 1.166 oster for(column=0; column<raidPtr->numCol; column++) {
3452 1.166 oster if (raidPtr->Disks[column].status == rf_ds_optimal) {
3453 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, column);
3454 1.245.4.6 yamt clabel->root_partition = new_value;
3455 1.245.4.6 yamt raidflush_component_label(raidPtr, column);
3456 1.148 oster }
3457 1.148 oster }
3458 1.148 oster for(column = 0; column < raidPtr->numSpare ; column++) {
3459 1.148 oster sparecol = raidPtr->numCol + column;
3460 1.166 oster if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3461 1.245.4.6 yamt clabel = raidget_component_label(raidPtr, sparecol);
3462 1.245.4.6 yamt clabel->root_partition = new_value;
3463 1.245.4.6 yamt raidflush_component_label(raidPtr, sparecol);
3464 1.48 oster }
3465 1.48 oster }
3466 1.48 oster return(new_value);
3467 1.48 oster }
3468 1.48 oster
3469 1.48 oster void
3470 1.169 oster rf_release_all_vps(RF_ConfigSet_t *cset)
3471 1.48 oster {
3472 1.48 oster RF_AutoConfig_t *ac;
3473 1.186 perry
3474 1.48 oster ac = cset->ac;
3475 1.48 oster while(ac!=NULL) {
3476 1.48 oster /* Close the vp, and give it back */
3477 1.48 oster if (ac->vp) {
3478 1.96 oster vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3479 1.238 pooka VOP_CLOSE(ac->vp, FREAD, NOCRED);
3480 1.48 oster vput(ac->vp);
3481 1.86 oster ac->vp = NULL;
3482 1.48 oster }
3483 1.48 oster ac = ac->next;
3484 1.48 oster }
3485 1.48 oster }
3486 1.48 oster
3487 1.48 oster
3488 1.48 oster void
3489 1.169 oster rf_cleanup_config_set(RF_ConfigSet_t *cset)
3490 1.48 oster {
3491 1.48 oster RF_AutoConfig_t *ac;
3492 1.48 oster RF_AutoConfig_t *next_ac;
3493 1.186 perry
3494 1.48 oster ac = cset->ac;
3495 1.48 oster while(ac!=NULL) {
3496 1.48 oster next_ac = ac->next;
3497 1.48 oster /* nuke the label */
3498 1.48 oster free(ac->clabel, M_RAIDFRAME);
3499 1.48 oster /* cleanup the config structure */
3500 1.48 oster free(ac, M_RAIDFRAME);
3501 1.48 oster /* "next.." */
3502 1.48 oster ac = next_ac;
3503 1.48 oster }
3504 1.48 oster /* and, finally, nuke the config set */
3505 1.48 oster free(cset, M_RAIDFRAME);
3506 1.48 oster }
3507 1.48 oster
3508 1.48 oster
3509 1.48 oster void
3510 1.169 oster raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3511 1.48 oster {
3512 1.48 oster /* current version number */
3513 1.186 perry clabel->version = RF_COMPONENT_LABEL_VERSION;
3514 1.57 oster clabel->serial_number = raidPtr->serial_number;
3515 1.48 oster clabel->mod_counter = raidPtr->mod_counter;
3516 1.245.4.6 yamt
3517 1.166 oster clabel->num_rows = 1;
3518 1.48 oster clabel->num_columns = raidPtr->numCol;
3519 1.48 oster clabel->clean = RF_RAID_DIRTY; /* not clean */
3520 1.48 oster clabel->status = rf_ds_optimal; /* "It's good!" */
3521 1.186 perry
3522 1.48 oster clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3523 1.48 oster clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3524 1.48 oster clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3525 1.54 oster
3526 1.54 oster clabel->blockSize = raidPtr->bytesPerSector;
3527 1.54 oster clabel->numBlocks = raidPtr->sectorsPerDisk;
3528 1.54 oster
3529 1.48 oster /* XXX not portable */
3530 1.48 oster clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3531 1.54 oster clabel->maxOutstanding = raidPtr->maxOutstanding;
3532 1.54 oster clabel->autoconfigure = raidPtr->autoconfigure;
3533 1.54 oster clabel->root_partition = raidPtr->root_partition;
3534 1.48 oster clabel->last_unit = raidPtr->raidid;
3535 1.54 oster clabel->config_order = raidPtr->config_order;
3536 1.245.4.6 yamt
3537 1.245.4.6 yamt #ifndef RF_NO_PARITY_MAP
3538 1.245.4.6 yamt rf_paritymap_init_label(raidPtr->parity_map, clabel);
3539 1.245.4.6 yamt #endif
3540 1.51 oster }
3541 1.51 oster
3542 1.51 oster int
3543 1.169 oster rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3544 1.51 oster {
3545 1.51 oster RF_Raid_t *raidPtr;
3546 1.51 oster RF_Config_t *config;
3547 1.51 oster int raidID;
3548 1.51 oster int retcode;
3549 1.51 oster
3550 1.224 oster #ifdef DEBUG
3551 1.72 oster printf("RAID autoconfigure\n");
3552 1.127 oster #endif
3553 1.51 oster
3554 1.51 oster retcode = 0;
3555 1.51 oster *unit = -1;
3556 1.51 oster
3557 1.51 oster /* 1. Create a config structure */
3558 1.51 oster
3559 1.51 oster config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3560 1.51 oster M_RAIDFRAME,
3561 1.51 oster M_NOWAIT);
3562 1.51 oster if (config==NULL) {
3563 1.51 oster printf("Out of mem!?!?\n");
3564 1.51 oster /* XXX do something more intelligent here. */
3565 1.51 oster return(1);
3566 1.51 oster }
3567 1.77 oster
3568 1.77 oster memset(config, 0, sizeof(RF_Config_t));
3569 1.51 oster
3570 1.186 perry /*
3571 1.186 perry 2. Figure out what RAID ID this one is supposed to live at
3572 1.51 oster See if we can get the same RAID dev that it was configured
3573 1.186 perry on last time..
3574 1.51 oster */
3575 1.51 oster
3576 1.51 oster raidID = cset->ac->clabel->last_unit;
3577 1.52 oster if ((raidID < 0) || (raidID >= numraid)) {
3578 1.51 oster /* let's not wander off into lala land. */
3579 1.51 oster raidID = numraid - 1;
3580 1.51 oster }
3581 1.51 oster if (raidPtrs[raidID]->valid != 0) {
3582 1.51 oster
3583 1.186 perry /*
3584 1.186 perry Nope... Go looking for an alternative...
3585 1.51 oster Start high so we don't immediately use raid0 if that's
3586 1.186 perry not taken.
3587 1.51 oster */
3588 1.51 oster
3589 1.115 oster for(raidID = numraid - 1; raidID >= 0; raidID--) {
3590 1.51 oster if (raidPtrs[raidID]->valid == 0) {
3591 1.51 oster /* can use this one! */
3592 1.51 oster break;
3593 1.51 oster }
3594 1.51 oster }
3595 1.51 oster }
3596 1.51 oster
3597 1.51 oster if (raidID < 0) {
3598 1.51 oster /* punt... */
3599 1.51 oster printf("Unable to auto configure this set!\n");
3600 1.51 oster printf("(Out of RAID devs!)\n");
3601 1.204 simonb free(config, M_RAIDFRAME);
3602 1.51 oster return(1);
3603 1.51 oster }
3604 1.127 oster
3605 1.224 oster #ifdef DEBUG
3606 1.72 oster printf("Configuring raid%d:\n",raidID);
3607 1.127 oster #endif
3608 1.127 oster
3609 1.51 oster raidPtr = raidPtrs[raidID];
3610 1.51 oster
3611 1.51 oster /* XXX all this stuff should be done SOMEWHERE ELSE! */
3612 1.51 oster raidPtr->raidid = raidID;
3613 1.51 oster raidPtr->openings = RAIDOUTSTANDING;
3614 1.51 oster
3615 1.51 oster /* 3. Build the configuration structure */
3616 1.51 oster rf_create_configuration(cset->ac, config, raidPtr);
3617 1.51 oster
3618 1.51 oster /* 4. Do the configuration */
3619 1.51 oster retcode = rf_Configure(raidPtr, config, cset->ac);
3620 1.186 perry
3621 1.51 oster if (retcode == 0) {
3622 1.61 oster
3623 1.219 oster raidinit(raidPtrs[raidID]);
3624 1.59 oster
3625 1.59 oster rf_markalldirty(raidPtrs[raidID]);
3626 1.54 oster raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3627 1.51 oster if (cset->ac->clabel->root_partition==1) {
3628 1.51 oster /* everything configured just fine. Make a note
3629 1.51 oster that this set is eligible to be root. */
3630 1.51 oster cset->rootable = 1;
3631 1.54 oster /* XXX do this here? */
3632 1.186 perry raidPtrs[raidID]->root_partition = 1;
3633 1.51 oster }
3634 1.51 oster }
3635 1.51 oster
3636 1.51 oster /* 5. Cleanup */
3637 1.51 oster free(config, M_RAIDFRAME);
3638 1.186 perry
3639 1.51 oster *unit = raidID;
3640 1.51 oster return(retcode);
3641 1.99 oster }
3642 1.99 oster
3643 1.99 oster void
3644 1.169 oster rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3645 1.99 oster {
3646 1.99 oster struct buf *bp;
3647 1.99 oster
3648 1.99 oster bp = (struct buf *)desc->bp;
3649 1.186 perry disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3650 1.145 mrg (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3651 1.13 oster }
3652 1.177 oster
3653 1.177 oster void
3654 1.187 christos rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3655 1.187 christos size_t xmin, size_t xmax)
3656 1.177 oster {
3657 1.227 ad pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3658 1.187 christos pool_sethiwat(p, xmax);
3659 1.187 christos pool_prime(p, xmin);
3660 1.187 christos pool_setlowat(p, xmin);
3661 1.177 oster }
3662 1.190 oster
3663 1.190 oster /*
3664 1.190 oster * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3665 1.190 oster * if there is IO pending and if that IO could possibly be done for a
3666 1.190 oster * given RAID set. Returns 0 if IO is waiting and can be done, 1
3667 1.190 oster * otherwise.
3668 1.190 oster *
3669 1.190 oster */
3670 1.190 oster
3671 1.190 oster int
3672 1.190 oster rf_buf_queue_check(int raidid)
3673 1.190 oster {
3674 1.245.4.2 yamt if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
3675 1.190 oster raidPtrs[raidid]->openings > 0) {
3676 1.190 oster /* there is work to do */
3677 1.190 oster return 0;
3678 1.190 oster }
3679 1.190 oster /* default is nothing to do */
3680 1.190 oster return 1;
3681 1.190 oster }
3682 1.213 christos
3683 1.213 christos int
3684 1.213 christos rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
3685 1.213 christos {
3686 1.213 christos struct partinfo dpart;
3687 1.213 christos struct dkwedge_info dkw;
3688 1.213 christos int error;
3689 1.213 christos
3690 1.238 pooka error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
3691 1.213 christos if (error == 0) {
3692 1.213 christos diskPtr->blockSize = dpart.disklab->d_secsize;
3693 1.213 christos diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
3694 1.213 christos diskPtr->partitionSize = dpart.part->p_size;
3695 1.213 christos return 0;
3696 1.213 christos }
3697 1.213 christos
3698 1.238 pooka error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
3699 1.213 christos if (error == 0) {
3700 1.213 christos diskPtr->blockSize = 512; /* XXX */
3701 1.213 christos diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
3702 1.213 christos diskPtr->partitionSize = dkw.dkw_size;
3703 1.213 christos return 0;
3704 1.213 christos }
3705 1.213 christos return error;
3706 1.213 christos }
3707 1.217 oster
3708 1.217 oster static int
3709 1.245.4.2 yamt raid_match(device_t self, cfdata_t cfdata, void *aux)
3710 1.217 oster {
3711 1.217 oster return 1;
3712 1.217 oster }
3713 1.217 oster
3714 1.217 oster static void
3715 1.245.4.2 yamt raid_attach(device_t parent, device_t self, void *aux)
3716 1.217 oster {
3717 1.217 oster
3718 1.217 oster }
3719 1.217 oster
3720 1.217 oster
3721 1.217 oster static int
3722 1.245.4.2 yamt raid_detach(device_t self, int flags)
3723 1.217 oster {
3724 1.245.4.5 yamt int error;
3725 1.245.4.5 yamt struct raid_softc *rs = &raid_softc[device_unit(self)];
3726 1.217 oster
3727 1.245.4.5 yamt if ((error = raidlock(rs)) != 0)
3728 1.245.4.5 yamt return (error);
3729 1.217 oster
3730 1.245.4.5 yamt error = raid_detach_unlocked(rs);
3731 1.245.4.5 yamt
3732 1.245.4.5 yamt raidunlock(rs);
3733 1.245.4.5 yamt
3734 1.245.4.5 yamt return error;
3735 1.217 oster }
3736 1.217 oster
3737 1.234 oster static void
3738 1.234 oster rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
3739 1.234 oster {
3740 1.234 oster prop_dictionary_t disk_info, odisk_info, geom;
3741 1.234 oster disk_info = prop_dictionary_create();
3742 1.234 oster geom = prop_dictionary_create();
3743 1.234 oster prop_dictionary_set_uint64(geom, "sectors-per-unit",
3744 1.234 oster raidPtr->totalSectors);
3745 1.234 oster prop_dictionary_set_uint32(geom, "sector-size",
3746 1.234 oster raidPtr->bytesPerSector);
3747 1.234 oster
3748 1.234 oster prop_dictionary_set_uint16(geom, "sectors-per-track",
3749 1.234 oster raidPtr->Layout.dataSectorsPerStripe);
3750 1.234 oster prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
3751 1.234 oster 4 * raidPtr->numCol);
3752 1.234 oster
3753 1.234 oster prop_dictionary_set_uint64(geom, "cylinders-per-unit",
3754 1.234 oster raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
3755 1.234 oster (4 * raidPtr->numCol)));
3756 1.234 oster
3757 1.234 oster prop_dictionary_set(disk_info, "geometry", geom);
3758 1.234 oster prop_object_release(geom);
3759 1.234 oster prop_dictionary_set(device_properties(rs->sc_dev),
3760 1.234 oster "disk-info", disk_info);
3761 1.234 oster odisk_info = rs->sc_dkdev.dk_info;
3762 1.234 oster rs->sc_dkdev.dk_info = disk_info;
3763 1.234 oster if (odisk_info)
3764 1.234 oster prop_object_release(odisk_info);
3765 1.234 oster }
3766 1.245.4.2 yamt
3767 1.245.4.2 yamt /*
3768 1.245.4.2 yamt * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3769 1.245.4.2 yamt * We end up returning whatever error was returned by the first cache flush
3770 1.245.4.2 yamt * that fails.
3771 1.245.4.2 yamt */
3772 1.245.4.2 yamt
3773 1.245.4.6 yamt int
3774 1.245.4.2 yamt rf_sync_component_caches(RF_Raid_t *raidPtr)
3775 1.245.4.2 yamt {
3776 1.245.4.2 yamt int c, sparecol;
3777 1.245.4.2 yamt int e,error;
3778 1.245.4.2 yamt int force = 1;
3779 1.245.4.2 yamt
3780 1.245.4.2 yamt error = 0;
3781 1.245.4.2 yamt for (c = 0; c < raidPtr->numCol; c++) {
3782 1.245.4.2 yamt if (raidPtr->Disks[c].status == rf_ds_optimal) {
3783 1.245.4.2 yamt e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3784 1.245.4.2 yamt &force, FWRITE, NOCRED);
3785 1.245.4.2 yamt if (e) {
3786 1.245.4.2 yamt if (e != ENODEV)
3787 1.245.4.2 yamt printf("raid%d: cache flush to component %s failed.\n",
3788 1.245.4.2 yamt raidPtr->raidid, raidPtr->Disks[c].devname);
3789 1.245.4.2 yamt if (error == 0) {
3790 1.245.4.2 yamt error = e;
3791 1.245.4.2 yamt }
3792 1.245.4.2 yamt }
3793 1.245.4.2 yamt }
3794 1.245.4.2 yamt }
3795 1.245.4.2 yamt
3796 1.245.4.2 yamt for( c = 0; c < raidPtr->numSpare ; c++) {
3797 1.245.4.2 yamt sparecol = raidPtr->numCol + c;
3798 1.245.4.2 yamt /* Need to ensure that the reconstruct actually completed! */
3799 1.245.4.2 yamt if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3800 1.245.4.2 yamt e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3801 1.245.4.2 yamt DIOCCACHESYNC, &force, FWRITE, NOCRED);
3802 1.245.4.2 yamt if (e) {
3803 1.245.4.2 yamt if (e != ENODEV)
3804 1.245.4.2 yamt printf("raid%d: cache flush to component %s failed.\n",
3805 1.245.4.2 yamt raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3806 1.245.4.2 yamt if (error == 0) {
3807 1.245.4.2 yamt error = e;
3808 1.245.4.2 yamt }
3809 1.245.4.2 yamt }
3810 1.245.4.2 yamt }
3811 1.245.4.2 yamt }
3812 1.245.4.2 yamt return error;
3813 1.245.4.2 yamt }
3814