rf_netbsdkintf.c revision 1.8 1 1.8 oster /* $NetBSD: rf_netbsdkintf.c,v 1.8 1999/01/26 02:33:59 oster Exp $ */
2 1.1 oster /*-
3 1.1 oster * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
7 1.1 oster * by Greg Oster; Jason R. Thorpe.
8 1.1 oster *
9 1.1 oster * Redistribution and use in source and binary forms, with or without
10 1.1 oster * modification, are permitted provided that the following conditions
11 1.1 oster * are met:
12 1.1 oster * 1. Redistributions of source code must retain the above copyright
13 1.1 oster * notice, this list of conditions and the following disclaimer.
14 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 oster * notice, this list of conditions and the following disclaimer in the
16 1.1 oster * documentation and/or other materials provided with the distribution.
17 1.1 oster * 3. All advertising materials mentioning features or use of this software
18 1.1 oster * must display the following acknowledgement:
19 1.1 oster * This product includes software developed by the NetBSD
20 1.1 oster * Foundation, Inc. and its contributors.
21 1.1 oster * 4. Neither the name of The NetBSD Foundation nor the names of its
22 1.1 oster * contributors may be used to endorse or promote products derived
23 1.1 oster * from this software without specific prior written permission.
24 1.1 oster *
25 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
36 1.1 oster */
37 1.1 oster
38 1.1 oster /*
39 1.1 oster * Copyright (c) 1988 University of Utah.
40 1.1 oster * Copyright (c) 1990, 1993
41 1.1 oster * The Regents of the University of California. All rights reserved.
42 1.1 oster *
43 1.1 oster * This code is derived from software contributed to Berkeley by
44 1.1 oster * the Systems Programming Group of the University of Utah Computer
45 1.1 oster * Science Department.
46 1.1 oster *
47 1.1 oster * Redistribution and use in source and binary forms, with or without
48 1.1 oster * modification, are permitted provided that the following conditions
49 1.1 oster * are met:
50 1.1 oster * 1. Redistributions of source code must retain the above copyright
51 1.1 oster * notice, this list of conditions and the following disclaimer.
52 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
53 1.1 oster * notice, this list of conditions and the following disclaimer in the
54 1.1 oster * documentation and/or other materials provided with the distribution.
55 1.1 oster * 3. All advertising materials mentioning features or use of this software
56 1.1 oster * must display the following acknowledgement:
57 1.1 oster * This product includes software developed by the University of
58 1.1 oster * California, Berkeley and its contributors.
59 1.1 oster * 4. Neither the name of the University nor the names of its contributors
60 1.1 oster * may be used to endorse or promote products derived from this software
61 1.1 oster * without specific prior written permission.
62 1.1 oster *
63 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 1.1 oster * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 1.1 oster * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 1.1 oster * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 1.1 oster * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 1.1 oster * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 1.1 oster * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 1.1 oster * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 1.1 oster * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 1.1 oster * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 1.1 oster * SUCH DAMAGE.
74 1.1 oster *
75 1.1 oster * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 1.1 oster *
77 1.1 oster * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 1.1 oster */
79 1.1 oster
80 1.1 oster
81 1.1 oster
82 1.1 oster
83 1.1 oster /*
84 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
85 1.1 oster * All rights reserved.
86 1.1 oster *
87 1.1 oster * Authors: Mark Holland, Jim Zelenka
88 1.1 oster *
89 1.1 oster * Permission to use, copy, modify and distribute this software and
90 1.1 oster * its documentation is hereby granted, provided that both the copyright
91 1.1 oster * notice and this permission notice appear in all copies of the
92 1.1 oster * software, derivative works or modified versions, and any portions
93 1.1 oster * thereof, and that both notices appear in supporting documentation.
94 1.1 oster *
95 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 1.1 oster *
99 1.1 oster * Carnegie Mellon requests users of this software to return to
100 1.1 oster *
101 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 1.1 oster * School of Computer Science
103 1.1 oster * Carnegie Mellon University
104 1.1 oster * Pittsburgh PA 15213-3890
105 1.1 oster *
106 1.1 oster * any improvements or extensions that they make and grant Carnegie the
107 1.1 oster * rights to redistribute these changes.
108 1.1 oster */
109 1.1 oster
110 1.1 oster /***********************************************************
111 1.1 oster *
112 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
113 1.1 oster *
114 1.1 oster ***********************************************************/
115 1.1 oster
116 1.1 oster #include <sys/errno.h>
117 1.1 oster #include <sys/param.h>
118 1.1 oster #include <sys/pool.h>
119 1.1 oster #include <sys/queue.h>
120 1.1 oster #include <sys/disk.h>
121 1.1 oster #include <sys/device.h>
122 1.1 oster #include <sys/stat.h>
123 1.1 oster #include <sys/ioctl.h>
124 1.1 oster #include <sys/fcntl.h>
125 1.1 oster #include <sys/systm.h>
126 1.1 oster #include <sys/namei.h>
127 1.1 oster #include <sys/vnode.h>
128 1.1 oster #include <sys/param.h>
129 1.1 oster #include <sys/types.h>
130 1.1 oster #include <machine/types.h>
131 1.1 oster #include <sys/disklabel.h>
132 1.1 oster #include <sys/conf.h>
133 1.1 oster #include <sys/lock.h>
134 1.1 oster #include <sys/buf.h>
135 1.1 oster #include <sys/user.h>
136 1.8 oster
137 1.8 oster #include "raid.h"
138 1.1 oster #include "rf_raid.h"
139 1.1 oster #include "rf_raidframe.h"
140 1.1 oster #include "rf_dag.h"
141 1.1 oster #include "rf_dagflags.h"
142 1.1 oster #include "rf_diskqueue.h"
143 1.1 oster #include "rf_acctrace.h"
144 1.1 oster #include "rf_etimer.h"
145 1.1 oster #include "rf_general.h"
146 1.1 oster #include "rf_debugMem.h"
147 1.1 oster #include "rf_kintf.h"
148 1.1 oster #include "rf_options.h"
149 1.1 oster #include "rf_driver.h"
150 1.1 oster #include "rf_parityscan.h"
151 1.1 oster #include "rf_debugprint.h"
152 1.1 oster #include "rf_threadstuff.h"
153 1.1 oster
154 1.1 oster int rf_kdebug_level = 0;
155 1.1 oster
156 1.1 oster #define RFK_BOOT_NONE 0
157 1.1 oster #define RFK_BOOT_GOOD 1
158 1.1 oster #define RFK_BOOT_BAD 2
159 1.1 oster static int rf_kbooted = RFK_BOOT_NONE;
160 1.1 oster
161 1.1 oster #ifdef DEBUG
162 1.1 oster #define db0_printf(a) printf a
163 1.1 oster #define db_printf(a) if (rf_kdebug_level > 0) printf a
164 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
165 1.1 oster #define db2_printf(a) if (rf_kdebug_level > 1) printf a
166 1.1 oster #define db3_printf(a) if (rf_kdebug_level > 2) printf a
167 1.1 oster #define db4_printf(a) if (rf_kdebug_level > 3) printf a
168 1.1 oster #define db5_printf(a) if (rf_kdebug_level > 4) printf a
169 1.1 oster #else /* DEBUG */
170 1.1 oster #define db0_printf(a) printf a
171 1.1 oster #define db1_printf(a) { }
172 1.1 oster #define db2_printf(a) { }
173 1.1 oster #define db3_printf(a) { }
174 1.1 oster #define db4_printf(a) { }
175 1.1 oster #define db5_printf(a) { }
176 1.1 oster #endif /* DEBUG */
177 1.1 oster
178 1.1 oster static RF_Raid_t **raidPtrs; /* global raid device descriptors */
179 1.1 oster
180 1.1 oster static int rf_pending_testaccs;
181 1.1 oster
182 1.1 oster RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
183 1.1 oster RF_DECLARE_STATIC_MUTEX(rf_async_done_q_mutex)
184 1.1 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a spare table */
185 1.1 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from installation process */
186 1.1 oster static struct rf_test_acc *rf_async_done_qh, *rf_async_done_qt;
187 1.1 oster
188 1.1 oster static struct rf_recon_req *recon_queue = NULL; /* used to communicate reconstruction requests */
189 1.1 oster
190 1.1 oster
191 1.1 oster decl_simple_lock_data(,recon_queue_mutex)
192 1.1 oster
193 1.1 oster
194 1.1 oster #define LOCK_RECON_Q_MUTEX() simple_lock(&recon_queue_mutex)
195 1.1 oster #define UNLOCK_RECON_Q_MUTEX() simple_unlock(&recon_queue_mutex)
196 1.1 oster
197 1.1 oster /* prototypes */
198 1.1 oster static void KernelWakeupFunc(struct buf *bp);
199 1.1 oster static void InitBP(struct buf *bp, struct vnode *, unsigned rw_flag, dev_t dev,
200 1.1 oster RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
201 1.1 oster void (*cbFunc)(struct buf *), void *cbArg, int logBytesPerSector,
202 1.1 oster struct proc *b_proc);
203 1.1 oster
204 1.1 oster #define Dprintf0(s) if (rf_queueDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
205 1.1 oster #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,a,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
206 1.1 oster #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,a,b,NULL,NULL,NULL,NULL,NULL,NULL)
207 1.1 oster #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,a,b,c,NULL,NULL,NULL,NULL,NULL)
208 1.1 oster
209 1.1 oster
210 1.1 oster /* this is so that we can compile under 2.0 as well as 3.2 */
211 1.1 oster #ifndef proc_to_task
212 1.1 oster #define proc_to_task(x) ((x)->task)
213 1.1 oster #endif /* !proc_to_task */
214 1.1 oster
215 1.1 oster void raidattach __P((int));
216 1.1 oster int raidsize __P((dev_t));
217 1.1 oster
218 1.2 simonb void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
219 1.2 simonb void rf_CopybackReconstructedData(RF_Raid_t *raidPtr);
220 1.1 oster static int raidinit __P((dev_t,RF_Raid_t *,int));
221 1.1 oster
222 1.2 simonb int raidopen __P((dev_t, int, int, struct proc *));
223 1.2 simonb int raidclose __P((dev_t, int, int, struct proc *));
224 1.2 simonb int raidioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
225 1.2 simonb int raidwrite __P((dev_t, struct uio *, int));
226 1.2 simonb int raidread __P((dev_t, struct uio *, int));
227 1.2 simonb void raidstrategy __P((struct buf *));
228 1.2 simonb int raiddump __P((dev_t, daddr_t, caddr_t, size_t));
229 1.1 oster
230 1.1 oster /*
231 1.1 oster * Pilfered from ccd.c
232 1.1 oster */
233 1.1 oster
234 1.1 oster struct raidbuf {
235 1.1 oster struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
236 1.1 oster struct buf *rf_obp; /* ptr. to original I/O buf */
237 1.1 oster int rf_flags; /* misc. flags */
238 1.1 oster RF_DiskQueueData_t *req; /* the request that this was part of.. */
239 1.1 oster };
240 1.1 oster
241 1.1 oster
242 1.1 oster #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
243 1.1 oster #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
244 1.1 oster
245 1.1 oster /* XXX Not sure if the following should be replacing the raidPtrs above,
246 1.1 oster or if it should be used in conjunction with that... */
247 1.1 oster
248 1.1 oster struct raid_softc {
249 1.1 oster int sc_unit; /* logical unit number */
250 1.1 oster int sc_flags; /* flags */
251 1.1 oster int sc_cflags; /* configuration flags */
252 1.1 oster size_t sc_size; /* size of the raid device */
253 1.1 oster dev_t sc_dev; /* our device..*/
254 1.1 oster char sc_xname[20]; /* XXX external name */
255 1.1 oster struct disk sc_dkdev; /* generic disk device info */
256 1.1 oster struct pool sc_cbufpool; /* component buffer pool */
257 1.1 oster };
258 1.1 oster
259 1.1 oster /* sc_flags */
260 1.1 oster #define RAIDF_INITED 0x01 /* unit has been initialized */
261 1.1 oster #define RAIDF_WLABEL 0x02 /* label area is writable */
262 1.1 oster #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
263 1.1 oster #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
264 1.1 oster #define RAIDF_LOCKED 0x80 /* unit is locked */
265 1.1 oster
266 1.1 oster #define raidunit(x) DISKUNIT(x)
267 1.1 oster static int numraid=0;
268 1.1 oster
269 1.1 oster #define RAIDLABELDEV(dev) \
270 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
271 1.1 oster
272 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
273 1.1 oster struct raid_softc *raid_softc;
274 1.1 oster
275 1.1 oster static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *, struct disklabel *));
276 1.1 oster static void raidgetdisklabel __P((dev_t));
277 1.1 oster static void raidmakedisklabel __P((struct raid_softc *));
278 1.1 oster
279 1.1 oster static int raidlock __P((struct raid_softc *));
280 1.1 oster static void raidunlock __P((struct raid_softc *));
281 1.1 oster int raidlookup __P((char *, struct proc *p, struct vnode **));
282 1.1 oster
283 1.1 oster
284 1.1 oster void
285 1.1 oster raidattach(num)
286 1.1 oster int num;
287 1.1 oster {
288 1.1 oster int raidID;
289 1.1 oster
290 1.1 oster #ifdef DEBUG
291 1.1 oster printf("raidattach: Asked for %d units\n",num);
292 1.1 oster #endif
293 1.1 oster
294 1.1 oster if (num <= 0) {
295 1.1 oster #ifdef DIAGNOSTIC
296 1.1 oster panic("raidattach: count <= 0");
297 1.1 oster #endif
298 1.1 oster return;
299 1.1 oster }
300 1.1 oster /*
301 1.1 oster This is where all the initialization stuff gets done.
302 1.1 oster */
303 1.1 oster
304 1.1 oster /* Make some space for requested number of units... */
305 1.1 oster
306 1.1 oster RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
307 1.1 oster if (raidPtrs == NULL) {
308 1.1 oster panic("raidPtrs is NULL!!\n");
309 1.1 oster }
310 1.1 oster
311 1.1 oster
312 1.1 oster
313 1.1 oster rf_kbooted = rf_boot();
314 1.1 oster if (rf_kbooted) {
315 1.1 oster panic("Serious error booting RAID!!\n");
316 1.1 oster }
317 1.1 oster
318 1.1 oster rf_kbooted = RFK_BOOT_GOOD;
319 1.1 oster
320 1.1 oster /*
321 1.1 oster put together some datastructures like the CCD device does..
322 1.1 oster This lets us lock the device and what-not when it gets opened.
323 1.1 oster */
324 1.1 oster
325 1.1 oster raid_softc = (struct raid_softc *)
326 1.1 oster malloc(num * sizeof(struct raid_softc),
327 1.6 thorpej M_RAIDFRAME, M_NOWAIT);
328 1.1 oster if (raid_softc == NULL) {
329 1.1 oster printf("WARNING: no memory for RAIDframe driver\n");
330 1.1 oster return;
331 1.1 oster }
332 1.1 oster numraid = num;
333 1.1 oster bzero(raid_softc, num * sizeof(struct raid_softc));
334 1.1 oster
335 1.1 oster for(raidID=0;raidID < num;raidID++) {
336 1.1 oster RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
337 1.1 oster (RF_Raid_t *));
338 1.1 oster if (raidPtrs[raidID]==NULL) {
339 1.1 oster printf("raidPtrs[%d] is NULL\n",raidID);
340 1.1 oster }
341 1.1 oster }
342 1.1 oster }
343 1.1 oster
344 1.1 oster
345 1.1 oster int
346 1.1 oster raidsize(dev)
347 1.1 oster dev_t dev;
348 1.1 oster {
349 1.1 oster struct raid_softc *rs;
350 1.1 oster struct disklabel *lp;
351 1.1 oster int part, unit, omask, size;
352 1.1 oster
353 1.1 oster unit = raidunit(dev);
354 1.1 oster if (unit >= numraid)
355 1.1 oster return (-1);
356 1.1 oster rs = &raid_softc[unit];
357 1.1 oster
358 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
359 1.1 oster return (-1);
360 1.1 oster
361 1.1 oster part = DISKPART(dev);
362 1.1 oster omask = rs->sc_dkdev.dk_openmask & (1 << part);
363 1.1 oster lp = rs->sc_dkdev.dk_label;
364 1.1 oster
365 1.1 oster if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
366 1.1 oster return (-1);
367 1.1 oster
368 1.1 oster if (lp->d_partitions[part].p_fstype != FS_SWAP)
369 1.1 oster size = -1;
370 1.1 oster else
371 1.1 oster size = lp->d_partitions[part].p_size *
372 1.1 oster (lp->d_secsize / DEV_BSIZE);
373 1.1 oster
374 1.1 oster if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
375 1.1 oster return (-1);
376 1.1 oster
377 1.1 oster return (size);
378 1.1 oster
379 1.1 oster }
380 1.1 oster
381 1.1 oster int
382 1.1 oster raiddump(dev, blkno, va, size)
383 1.1 oster dev_t dev;
384 1.1 oster daddr_t blkno;
385 1.1 oster caddr_t va;
386 1.1 oster size_t size;
387 1.1 oster {
388 1.1 oster /* Not implemented. */
389 1.1 oster return ENXIO;
390 1.1 oster }
391 1.1 oster
392 1.1 oster /* ARGSUSED */
393 1.1 oster int
394 1.1 oster raidopen(dev, flags, fmt, p)
395 1.1 oster dev_t dev;
396 1.1 oster int flags, fmt;
397 1.1 oster struct proc *p;
398 1.1 oster {
399 1.1 oster int unit = raidunit(dev);
400 1.1 oster struct raid_softc *rs;
401 1.1 oster struct disklabel *lp;
402 1.1 oster int part,pmask;
403 1.1 oster unsigned int raidID;
404 1.1 oster int rc;
405 1.1 oster int error = 0;
406 1.1 oster
407 1.1 oster /* This whole next chunk of code is somewhat suspect... Not sure
408 1.1 oster it's needed here at all... XXX */
409 1.1 oster
410 1.1 oster if (rf_kbooted == RFK_BOOT_NONE) {
411 1.1 oster printf("Doing restart on raidopen.\n");
412 1.1 oster rf_kbooted = RFK_BOOT_GOOD;
413 1.1 oster rc = rf_boot();
414 1.1 oster if (rc) {
415 1.1 oster rf_kbooted = RFK_BOOT_BAD;
416 1.1 oster printf("Someone is unhappy...\n");
417 1.1 oster return(rc);
418 1.1 oster }
419 1.1 oster }
420 1.1 oster
421 1.1 oster if (unit >= numraid)
422 1.1 oster return (ENXIO);
423 1.1 oster rs = &raid_softc[unit];
424 1.1 oster
425 1.1 oster if ((error = raidlock(rs)) != 0)
426 1.1 oster return(error);
427 1.1 oster lp = rs->sc_dkdev.dk_label;
428 1.1 oster
429 1.1 oster raidID = raidunit(dev);
430 1.1 oster
431 1.1 oster part = DISKPART(dev);
432 1.1 oster pmask = (1 << part);
433 1.1 oster
434 1.1 oster db1_printf(("Opening raid device number: %d partition: %d\n",
435 1.1 oster raidID,part));
436 1.1 oster
437 1.1 oster
438 1.1 oster if ((rs->sc_flags & RAIDF_INITED) &&
439 1.1 oster (rs->sc_dkdev.dk_openmask == 0))
440 1.1 oster raidgetdisklabel(dev);
441 1.1 oster
442 1.1 oster /* make sure that this partition exists */
443 1.1 oster
444 1.1 oster if (part != RAW_PART) {
445 1.1 oster db1_printf(("Not a raw partition..\n"));
446 1.1 oster if (((rs->sc_flags & RAIDF_INITED) == 0) ||
447 1.1 oster ((part >= lp->d_npartitions) ||
448 1.1 oster (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
449 1.1 oster error = ENXIO;
450 1.1 oster raidunlock(rs);
451 1.1 oster db1_printf(("Bailing out...\n"));
452 1.1 oster return(error);
453 1.1 oster }
454 1.1 oster }
455 1.1 oster
456 1.1 oster /* Prevent this unit from being unconfigured while open. */
457 1.1 oster switch (fmt) {
458 1.1 oster case S_IFCHR:
459 1.1 oster rs->sc_dkdev.dk_copenmask |= pmask;
460 1.1 oster break;
461 1.1 oster
462 1.1 oster case S_IFBLK:
463 1.1 oster rs->sc_dkdev.dk_bopenmask |= pmask;
464 1.1 oster break;
465 1.1 oster }
466 1.1 oster rs->sc_dkdev.dk_openmask =
467 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
468 1.1 oster
469 1.1 oster raidunlock(rs);
470 1.1 oster
471 1.1 oster return(error);
472 1.1 oster
473 1.1 oster
474 1.1 oster }
475 1.1 oster
476 1.1 oster /* ARGSUSED */
477 1.1 oster int
478 1.1 oster raidclose(dev, flags, fmt, p)
479 1.1 oster dev_t dev;
480 1.1 oster int flags, fmt;
481 1.1 oster struct proc *p;
482 1.1 oster {
483 1.1 oster int unit = raidunit(dev);
484 1.1 oster struct raid_softc *rs;
485 1.1 oster int error = 0;
486 1.1 oster int part;
487 1.1 oster
488 1.1 oster if (unit >= numraid)
489 1.1 oster return (ENXIO);
490 1.1 oster rs = &raid_softc[unit];
491 1.1 oster
492 1.1 oster if ((error = raidlock(rs)) != 0)
493 1.1 oster return (error);
494 1.1 oster
495 1.1 oster part = DISKPART(dev);
496 1.1 oster
497 1.1 oster /* ...that much closer to allowing unconfiguration... */
498 1.1 oster switch (fmt) {
499 1.1 oster case S_IFCHR:
500 1.1 oster rs->sc_dkdev.dk_copenmask &= ~(1 << part);
501 1.1 oster break;
502 1.1 oster
503 1.1 oster case S_IFBLK:
504 1.1 oster rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
505 1.1 oster break;
506 1.1 oster }
507 1.1 oster rs->sc_dkdev.dk_openmask =
508 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
509 1.1 oster
510 1.1 oster raidunlock(rs);
511 1.1 oster return (0);
512 1.1 oster
513 1.1 oster }
514 1.1 oster
515 1.1 oster void
516 1.1 oster raidstrategy(bp)
517 1.1 oster register struct buf *bp;
518 1.1 oster {
519 1.1 oster register int s;
520 1.1 oster
521 1.1 oster unsigned int raidID = raidunit(bp->b_dev);
522 1.1 oster RF_Raid_t *raidPtr;
523 1.1 oster struct raid_softc *rs = &raid_softc[raidID];
524 1.1 oster struct disklabel *lp;
525 1.1 oster int wlabel;
526 1.1 oster
527 1.5 oster #if 0
528 1.5 oster db1_printf(("Strategy: 0x%x 0x%x\n",bp,bp->b_data));
529 1.1 oster db1_printf(("Strategy(2): bp->b_bufsize%d\n", (int)bp->b_bufsize));
530 1.1 oster db1_printf(("bp->b_count=%d\n",(int)bp->b_bcount));
531 1.1 oster db1_printf(("bp->b_resid=%d\n",(int)bp->b_resid));
532 1.1 oster db1_printf(("bp->b_blkno=%d\n",(int)bp->b_blkno));
533 1.5 oster
534 1.1 oster if (bp->b_flags&B_READ)
535 1.1 oster db1_printf(("READ\n"));
536 1.1 oster else
537 1.1 oster db1_printf(("WRITE\n"));
538 1.1 oster #endif
539 1.1 oster if (rf_kbooted != RFK_BOOT_GOOD)
540 1.1 oster return;
541 1.1 oster if (raidID >= numraid || !raidPtrs[raidID]) {
542 1.1 oster bp->b_error = ENODEV;
543 1.1 oster bp->b_flags |= B_ERROR;
544 1.1 oster bp->b_resid = bp->b_bcount;
545 1.1 oster biodone(bp);
546 1.1 oster return;
547 1.1 oster }
548 1.1 oster raidPtr = raidPtrs[raidID];
549 1.1 oster if (!raidPtr->valid) {
550 1.1 oster bp->b_error = ENODEV;
551 1.1 oster bp->b_flags |= B_ERROR;
552 1.1 oster bp->b_resid = bp->b_bcount;
553 1.1 oster biodone(bp);
554 1.1 oster return;
555 1.1 oster }
556 1.1 oster if (bp->b_bcount == 0) {
557 1.1 oster db1_printf(("b_bcount is zero..\n"));
558 1.1 oster biodone(bp);
559 1.1 oster return;
560 1.1 oster }
561 1.1 oster lp = rs->sc_dkdev.dk_label;
562 1.1 oster
563 1.1 oster /*
564 1.1 oster * Do bounds checking and adjust transfer. If there's an
565 1.1 oster * error, the bounds check will flag that for us.
566 1.1 oster */
567 1.1 oster
568 1.1 oster wlabel = rs->sc_flags & (RAIDF_WLABEL|RAIDF_LABELLING);
569 1.1 oster if (DISKPART(bp->b_dev) != RAW_PART)
570 1.1 oster if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
571 1.1 oster db1_printf(("Bounds check failed!!:%d %d\n",
572 1.1 oster (int)bp->b_blkno,(int)wlabel));
573 1.1 oster biodone(bp);
574 1.1 oster return;
575 1.1 oster }
576 1.1 oster
577 1.1 oster s = splbio(); /* XXX Needed? */
578 1.1 oster db1_printf(("Beginning strategy...\n"));
579 1.1 oster
580 1.1 oster bp->b_resid = 0;
581 1.1 oster bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp,
582 1.1 oster NULL, NULL, NULL);
583 1.1 oster if (bp->b_error) {
584 1.1 oster bp->b_flags |= B_ERROR;
585 1.1 oster db1_printf(("bp->b_flags HAS B_ERROR SET!!!: %d\n",
586 1.1 oster bp->b_error));
587 1.1 oster }
588 1.1 oster splx(s);
589 1.5 oster #if 0
590 1.1 oster db1_printf(("Strategy exiting: 0x%x 0x%x %d %d\n",
591 1.5 oster bp,bp->b_data,
592 1.1 oster (int)bp->b_bcount,(int)bp->b_resid));
593 1.5 oster #endif
594 1.1 oster }
595 1.1 oster
596 1.1 oster /* ARGSUSED */
597 1.1 oster int
598 1.1 oster raidread(dev, uio, flags)
599 1.1 oster dev_t dev;
600 1.1 oster struct uio *uio;
601 1.1 oster int flags;
602 1.1 oster {
603 1.1 oster int unit = raidunit(dev);
604 1.1 oster struct raid_softc *rs;
605 1.1 oster int result;
606 1.1 oster int part;
607 1.1 oster
608 1.1 oster if (unit >= numraid)
609 1.1 oster return (ENXIO);
610 1.1 oster rs = &raid_softc[unit];
611 1.1 oster
612 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
613 1.1 oster return (ENXIO);
614 1.1 oster part = DISKPART(dev);
615 1.1 oster
616 1.1 oster db1_printf(("raidread: unit: %d partition: %d\n",unit,part));
617 1.1 oster
618 1.1 oster #if 0
619 1.1 oster return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
620 1.1 oster #endif
621 1.1 oster result=physio(raidstrategy, NULL, dev, B_READ, minphys, uio);
622 1.1 oster db1_printf(("raidread done. Result is %d %d\n",
623 1.1 oster result,uio->uio_resid));
624 1.1 oster return(result);
625 1.1 oster
626 1.1 oster }
627 1.1 oster
628 1.1 oster /* ARGSUSED */
629 1.1 oster int
630 1.1 oster raidwrite(dev, uio, flags)
631 1.1 oster dev_t dev;
632 1.1 oster struct uio *uio;
633 1.1 oster int flags;
634 1.1 oster {
635 1.1 oster int unit = raidunit(dev);
636 1.1 oster struct raid_softc *rs;
637 1.1 oster
638 1.1 oster if (unit >= numraid)
639 1.1 oster return (ENXIO);
640 1.1 oster rs = &raid_softc[unit];
641 1.1 oster
642 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
643 1.1 oster return (ENXIO);
644 1.1 oster db1_printf(("raidwrite\n"));
645 1.1 oster return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
646 1.1 oster
647 1.1 oster
648 1.1 oster }
649 1.1 oster
650 1.1 oster int
651 1.1 oster raidioctl(dev, cmd, data, flag, p)
652 1.1 oster dev_t dev;
653 1.1 oster u_long cmd;
654 1.1 oster caddr_t data;
655 1.1 oster int flag;
656 1.1 oster struct proc *p;
657 1.1 oster {
658 1.1 oster int unit = raidunit(dev);
659 1.1 oster int error = 0;
660 1.1 oster int part, pmask;
661 1.1 oster struct raid_softc *rs;
662 1.1 oster #if 0
663 1.1 oster int r,c;
664 1.1 oster #endif
665 1.1 oster /* struct raid_ioctl *ccio = (struct ccd_ioctl *)data; */
666 1.1 oster
667 1.1 oster /* struct ccdbuf *cbp; */
668 1.1 oster /* struct raidbuf *raidbp; */
669 1.1 oster RF_Config_t *k_cfg, *u_cfg;
670 1.1 oster u_char *specific_buf;
671 1.1 oster int retcode = 0;
672 1.1 oster
673 1.1 oster int row;
674 1.1 oster struct rf_recon_req *rrcopy, *rr;
675 1.1 oster #if 0
676 1.1 oster int nbytes, spl, rw, row;
677 1.1 oster struct rf_test_acc *ta;
678 1.1 oster struct buf *bp;
679 1.1 oster RF_SparetWait_t *waitreq;
680 1.1 oster struct rf_test_acc *ta_p, *ta_copy;
681 1.1 oster #endif
682 1.1 oster
683 1.1 oster if (unit >= numraid)
684 1.1 oster return (ENXIO);
685 1.1 oster rs = &raid_softc[unit];
686 1.1 oster
687 1.1 oster db1_printf(("raidioctl: %d %d %d %d\n",(int)dev,
688 1.1 oster (int)DISKPART(dev),(int)unit,(int)cmd));
689 1.1 oster
690 1.1 oster /* Must be open for writes for these commands... */
691 1.1 oster switch (cmd) {
692 1.1 oster case DIOCSDINFO:
693 1.1 oster case DIOCWDINFO:
694 1.1 oster case DIOCWLABEL:
695 1.1 oster if ((flag & FWRITE) == 0)
696 1.1 oster return (EBADF);
697 1.1 oster }
698 1.1 oster
699 1.1 oster /* Must be initialized for these... */
700 1.1 oster switch (cmd) {
701 1.1 oster case DIOCGDINFO:
702 1.1 oster case DIOCSDINFO:
703 1.1 oster case DIOCWDINFO:
704 1.1 oster case DIOCGPART:
705 1.1 oster case DIOCWLABEL:
706 1.1 oster case DIOCGDEFLABEL:
707 1.1 oster case RAIDFRAME_SHUTDOWN:
708 1.1 oster case RAIDFRAME_REWRITEPARITY:
709 1.1 oster case RAIDFRAME_GET_INFO:
710 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
711 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
712 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
713 1.1 oster case RAIDFRAME_GET_SIZE:
714 1.1 oster case RAIDFRAME_FAIL_DISK:
715 1.1 oster case RAIDFRAME_COPYBACK:
716 1.1 oster case RAIDFRAME_CHECKRECON:
717 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
718 1.1 oster return (ENXIO);
719 1.1 oster }
720 1.1 oster
721 1.1 oster switch (cmd) {
722 1.1 oster
723 1.1 oster
724 1.1 oster /* configure the system */
725 1.1 oster case RAIDFRAME_CONFIGURE:
726 1.1 oster
727 1.1 oster db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n"));
728 1.1 oster /* copy-in the configuration information */
729 1.1 oster /* data points to a pointer to the configuration structure */
730 1.1 oster u_cfg = *((RF_Config_t **) data);
731 1.1 oster RF_Malloc(k_cfg,sizeof(RF_Config_t),(RF_Config_t *));
732 1.1 oster if (k_cfg == NULL) {
733 1.1 oster db3_printf(("rf_ioctl: ENOMEM for config. Code is %d\n", retcode));
734 1.1 oster return(ENOMEM);
735 1.1 oster }
736 1.1 oster retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
737 1.1 oster sizeof(RF_Config_t));
738 1.1 oster if (retcode) {
739 1.1 oster db3_printf(("rf_ioctl: retcode=%d copyin.1\n",
740 1.1 oster retcode));
741 1.1 oster return(retcode);
742 1.1 oster }
743 1.1 oster
744 1.1 oster /* allocate a buffer for the layout-specific data,
745 1.1 oster and copy it in */
746 1.1 oster if (k_cfg->layoutSpecificSize) {
747 1.1 oster if (k_cfg->layoutSpecificSize > 10000) {
748 1.1 oster /* sanity check */
749 1.1 oster db3_printf(("rf_ioctl: EINVAL %d\n", retcode));
750 1.1 oster return(EINVAL);
751 1.1 oster }
752 1.1 oster RF_Malloc(specific_buf,k_cfg->layoutSpecificSize,
753 1.1 oster (u_char *));
754 1.1 oster if (specific_buf == NULL) {
755 1.1 oster RF_Free(k_cfg,sizeof(RF_Config_t));
756 1.1 oster db3_printf(("rf_ioctl: ENOMEM %d\n", retcode));
757 1.1 oster return(ENOMEM);
758 1.1 oster }
759 1.1 oster retcode = copyin(k_cfg->layoutSpecific,
760 1.1 oster (caddr_t) specific_buf,
761 1.1 oster k_cfg->layoutSpecificSize);
762 1.1 oster if (retcode) {
763 1.1 oster db3_printf(("rf_ioctl: retcode=%d copyin.2\n",
764 1.1 oster retcode));
765 1.1 oster return(retcode);
766 1.1 oster }
767 1.1 oster } else specific_buf = NULL;
768 1.1 oster k_cfg->layoutSpecific = specific_buf;
769 1.1 oster
770 1.1 oster /* should do some kind of sanity check on the configuration.
771 1.1 oster Store the sum of all the bytes in the last byte?
772 1.1 oster */
773 1.1 oster
774 1.5 oster #if 0
775 1.1 oster db1_printf(("Considering configuring the system.:%d 0x%x\n",
776 1.5 oster unit,p));
777 1.5 oster #endif
778 1.1 oster
779 1.1 oster /* We need the pointer to this a little deeper, so
780 1.1 oster stash it here... */
781 1.1 oster
782 1.1 oster raidPtrs[unit]->proc = p;
783 1.1 oster
784 1.1 oster /* configure the system */
785 1.1 oster rf_pending_testaccs = 0;
786 1.1 oster
787 1.1 oster
788 1.1 oster raidPtrs[unit]->raidid = unit;
789 1.1 oster retcode = rf_Configure(raidPtrs[unit], k_cfg);
790 1.1 oster
791 1.1 oster
792 1.1 oster if (retcode == 0) {
793 1.1 oster retcode = raidinit(dev, raidPtrs[unit],unit);
794 1.1 oster }
795 1.1 oster
796 1.1 oster /* free the buffers. No return code here. */
797 1.1 oster if (k_cfg->layoutSpecificSize) {
798 1.1 oster RF_Free(specific_buf,k_cfg->layoutSpecificSize);
799 1.1 oster }
800 1.1 oster RF_Free(k_cfg,sizeof(RF_Config_t));
801 1.1 oster
802 1.1 oster db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n",
803 1.1 oster retcode));
804 1.1 oster return(retcode);
805 1.1 oster
806 1.1 oster /* shutdown the system */
807 1.1 oster case RAIDFRAME_SHUTDOWN:
808 1.1 oster
809 1.1 oster if ((error = raidlock(rs)) != 0)
810 1.1 oster return(error);
811 1.1 oster
812 1.1 oster /*
813 1.1 oster * If somebody has a partition mounted, we shouldn't
814 1.1 oster * shutdown.
815 1.1 oster */
816 1.1 oster
817 1.1 oster part = DISKPART(dev);
818 1.1 oster pmask = (1 << part);
819 1.1 oster if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
820 1.1 oster ((rs->sc_dkdev.dk_bopenmask & pmask) &&
821 1.1 oster (rs->sc_dkdev.dk_copenmask & pmask))) {
822 1.1 oster raidunlock(rs);
823 1.1 oster return (EBUSY);
824 1.1 oster }
825 1.1 oster
826 1.1 oster /* the intention here was to disallow shutdowns while
827 1.1 oster raidframe is mounted, but it doesn't work because the
828 1.1 oster shutdown ioctl calls rf_open
829 1.1 oster */
830 1.1 oster if (rf_pending_testaccs > 0) {
831 1.1 oster printf("RAIDFRAME: Can't shutdown because there are %d pending test accs\n",
832 1.1 oster rf_pending_testaccs);
833 1.1 oster return(EINVAL);
834 1.1 oster }
835 1.1 oster if (rf_debugKernelAccess) {
836 1.1 oster printf("call shutdown\n");
837 1.1 oster }
838 1.1 oster raidPtrs[unit]->proc = p; /* XXX necessary evil */
839 1.1 oster retcode = rf_Shutdown(raidPtrs[unit]);
840 1.1 oster
841 1.3 hubertf db1_printf(("Done main shutdown\n"));
842 1.1 oster
843 1.1 oster pool_destroy(&rs->sc_cbufpool);
844 1.3 hubertf db1_printf(("Done freeing component buffer freelist\n"));
845 1.1 oster
846 1.1 oster /* It's no longer initialized... */
847 1.1 oster rs->sc_flags &= ~RAIDF_INITED;
848 1.1 oster
849 1.1 oster /* Detach the disk. */
850 1.1 oster disk_detach(&rs->sc_dkdev);
851 1.1 oster
852 1.1 oster raidunlock(rs);
853 1.1 oster
854 1.1 oster return(retcode);
855 1.1 oster
856 1.1 oster /* initialize all parity */
857 1.1 oster case RAIDFRAME_REWRITEPARITY:
858 1.1 oster
859 1.1 oster if (raidPtrs[unit]->Layout.map->faultsTolerated == 0)
860 1.1 oster return(EINVAL);
861 1.1 oster /* borrow the thread of the requesting process */
862 1.1 oster raidPtrs[unit]->proc = p; /* Blah... :-p GO */
863 1.1 oster retcode = rf_RewriteParity(raidPtrs[unit]);
864 1.1 oster /* return I/O Error if the parity rewrite fails */
865 1.1 oster
866 1.1 oster if (retcode)
867 1.1 oster retcode = EIO;
868 1.1 oster return(retcode);
869 1.1 oster
870 1.1 oster /* issue a test-unit-ready through raidframe to the
871 1.1 oster indicated device */
872 1.1 oster #if 0 /* XXX not supported yet (ever?) */
873 1.1 oster case RAIDFRAME_TUR:
874 1.1 oster /* debug only */
875 1.1 oster retcode = rf_SCSI_DoTUR(0, 0, 0, 0, *(dev_t *) data);
876 1.1 oster return(retcode);
877 1.1 oster #endif
878 1.1 oster case RAIDFRAME_GET_INFO:
879 1.1 oster {
880 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
881 1.1 oster RF_DeviceConfig_t *cfg, **ucfgp;
882 1.1 oster int i, j, d;
883 1.1 oster
884 1.1 oster if (!raid->valid)
885 1.1 oster return(ENODEV);
886 1.1 oster ucfgp = (RF_DeviceConfig_t **)data;
887 1.1 oster RF_Malloc(cfg,sizeof(RF_DeviceConfig_t),
888 1.1 oster (RF_DeviceConfig_t *));
889 1.1 oster if (cfg == NULL)
890 1.1 oster return(ENOMEM);
891 1.1 oster bzero((char *)cfg, sizeof(RF_DeviceConfig_t));
892 1.1 oster cfg->rows = raid->numRow;
893 1.1 oster cfg->cols = raid->numCol;
894 1.1 oster cfg->ndevs = raid->numRow * raid->numCol;
895 1.1 oster if (cfg->ndevs >= RF_MAX_DISKS) {
896 1.1 oster cfg->ndevs = 0;
897 1.1 oster return(ENOMEM);
898 1.1 oster }
899 1.1 oster cfg->nspares = raid->numSpare;
900 1.1 oster if (cfg->nspares >= RF_MAX_DISKS) {
901 1.1 oster cfg->nspares = 0;
902 1.1 oster return(ENOMEM);
903 1.1 oster }
904 1.1 oster cfg->maxqdepth = raid->maxQueueDepth;
905 1.1 oster d = 0;
906 1.1 oster for(i=0;i<cfg->rows;i++) {
907 1.1 oster for(j=0;j<cfg->cols;j++) {
908 1.1 oster cfg->devs[d] = raid->Disks[i][j];
909 1.1 oster d++;
910 1.1 oster }
911 1.1 oster }
912 1.1 oster for(j=cfg->cols,i=0;i<cfg->nspares;i++,j++) {
913 1.1 oster cfg->spares[i] = raid->Disks[0][j];
914 1.1 oster }
915 1.1 oster retcode = copyout((caddr_t)cfg, (caddr_t)*ucfgp,
916 1.1 oster sizeof(RF_DeviceConfig_t));
917 1.1 oster RF_Free(cfg,sizeof(RF_DeviceConfig_t));
918 1.1 oster
919 1.1 oster return(retcode);
920 1.1 oster }
921 1.1 oster break;
922 1.1 oster
923 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
924 1.1 oster {
925 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
926 1.1 oster
927 1.1 oster bzero(&raid->acc_totals, sizeof(raid->acc_totals));
928 1.1 oster return(0);
929 1.1 oster }
930 1.1 oster break;
931 1.1 oster
932 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
933 1.1 oster {
934 1.1 oster RF_AccTotals_t *totals = (RF_AccTotals_t *)data;
935 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
936 1.1 oster
937 1.1 oster *totals = raid->acc_totals;
938 1.1 oster return(0);
939 1.1 oster }
940 1.1 oster break;
941 1.1 oster
942 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
943 1.1 oster {
944 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
945 1.1 oster int *keep = (int *)data;
946 1.1 oster
947 1.1 oster raid->keep_acc_totals = *keep;
948 1.1 oster return(0);
949 1.1 oster }
950 1.1 oster break;
951 1.1 oster
952 1.1 oster case RAIDFRAME_GET_SIZE:
953 1.1 oster *(int *) data = raidPtrs[unit]->totalSectors;
954 1.1 oster return(0);
955 1.1 oster
956 1.1 oster #define RAIDFRAME_RECON 1
957 1.1 oster /* XXX The above should probably be set somewhere else!! GO */
958 1.1 oster #if RAIDFRAME_RECON > 0
959 1.1 oster
960 1.1 oster /* fail a disk & optionally start reconstruction */
961 1.1 oster case RAIDFRAME_FAIL_DISK:
962 1.1 oster rr = (struct rf_recon_req *) data;
963 1.1 oster
964 1.1 oster if (rr->row < 0 || rr->row >= raidPtrs[unit]->numRow
965 1.1 oster || rr->col < 0 || rr->col >= raidPtrs[unit]->numCol)
966 1.1 oster return(EINVAL);
967 1.1 oster
968 1.1 oster printf("Failing the disk: row: %d col: %d\n",rr->row,rr->col);
969 1.1 oster
970 1.1 oster /* make a copy of the recon request so that we don't
971 1.1 oster rely on the user's buffer */
972 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
973 1.1 oster bcopy(rr, rrcopy, sizeof(*rr));
974 1.1 oster rrcopy->raidPtr = (void *) raidPtrs[unit];
975 1.1 oster
976 1.1 oster LOCK_RECON_Q_MUTEX();
977 1.1 oster rrcopy->next = recon_queue;
978 1.1 oster recon_queue = rrcopy;
979 1.1 oster wakeup(&recon_queue);
980 1.1 oster UNLOCK_RECON_Q_MUTEX();
981 1.1 oster
982 1.1 oster return(0);
983 1.1 oster
984 1.1 oster /* invoke a copyback operation after recon on whatever
985 1.1 oster disk needs it, if any */
986 1.1 oster case RAIDFRAME_COPYBACK:
987 1.1 oster /* borrow the current thread to get this done */
988 1.1 oster raidPtrs[unit]->proc = p; /* ICK.. but needed :-p GO */
989 1.1 oster rf_CopybackReconstructedData(raidPtrs[unit]);
990 1.1 oster return(0);
991 1.1 oster
992 1.1 oster /* return the percentage completion of reconstruction */
993 1.1 oster case RAIDFRAME_CHECKRECON:
994 1.1 oster row = *(int *) data;
995 1.1 oster if (row < 0 || row >= raidPtrs[unit]->numRow)
996 1.1 oster return(EINVAL);
997 1.1 oster if (raidPtrs[unit]->status[row] != rf_rs_reconstructing)
998 1.1 oster *(int *) data = 100;
999 1.1 oster else
1000 1.1 oster *(int *) data = raidPtrs[unit]->reconControl[row]->percentComplete;
1001 1.1 oster return(0);
1002 1.1 oster
1003 1.1 oster /* the sparetable daemon calls this to wait for the
1004 1.1 oster kernel to need a spare table.
1005 1.1 oster * this ioctl does not return until a spare table is needed.
1006 1.1 oster * XXX -- calling mpsleep here in the ioctl code is almost
1007 1.1 oster certainly wrong and evil. -- XXX
1008 1.1 oster * XXX -- I should either compute the spare table in the
1009 1.1 oster kernel, or have a different -- XXX
1010 1.1 oster * XXX -- interface (a different character device) for
1011 1.1 oster delivering the table -- XXX
1012 1.1 oster */
1013 1.1 oster #if 0
1014 1.1 oster case RAIDFRAME_SPARET_WAIT:
1015 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1016 1.1 oster while (!rf_sparet_wait_queue) mpsleep(&rf_sparet_wait_queue, (PZERO+1)|PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1017 1.1 oster waitreq = rf_sparet_wait_queue;
1018 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1019 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1020 1.1 oster
1021 1.1 oster *((RF_SparetWait_t *) data) = *waitreq; /* structure assignment */
1022 1.1 oster
1023 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1024 1.1 oster return(0);
1025 1.1 oster
1026 1.1 oster
1027 1.1 oster /* wakes up a process waiting on SPARET_WAIT and puts an
1028 1.1 oster error code in it that will cause the dameon to exit */
1029 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1030 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1031 1.1 oster waitreq->fcol = -1;
1032 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1033 1.1 oster waitreq->next = rf_sparet_wait_queue;
1034 1.1 oster rf_sparet_wait_queue = waitreq;
1035 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1036 1.1 oster wakeup(&rf_sparet_wait_queue);
1037 1.1 oster return(0);
1038 1.1 oster
1039 1.1 oster /* used by the spare table daemon to deliver a spare table
1040 1.1 oster into the kernel */
1041 1.1 oster case RAIDFRAME_SEND_SPARET:
1042 1.1 oster
1043 1.1 oster /* install the spare table */
1044 1.1 oster retcode = rf_SetSpareTable(raidPtrs[unit],*(void **) data);
1045 1.1 oster
1046 1.1 oster /* respond to the requestor. the return status of the
1047 1.1 oster spare table installation is passed in the "fcol" field */
1048 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1049 1.1 oster waitreq->fcol = retcode;
1050 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1051 1.1 oster waitreq->next = rf_sparet_resp_queue;
1052 1.1 oster rf_sparet_resp_queue = waitreq;
1053 1.1 oster wakeup(&rf_sparet_resp_queue);
1054 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1055 1.1 oster
1056 1.1 oster return(retcode);
1057 1.1 oster #endif
1058 1.1 oster
1059 1.1 oster
1060 1.1 oster #endif /* RAIDFRAME_RECON > 0 */
1061 1.1 oster
1062 1.1 oster default: break; /* fall through to the os-specific code below */
1063 1.1 oster
1064 1.1 oster }
1065 1.1 oster
1066 1.1 oster if (!raidPtrs[unit]->valid)
1067 1.1 oster return(EINVAL);
1068 1.1 oster
1069 1.1 oster /*
1070 1.1 oster * Add support for "regular" device ioctls here.
1071 1.1 oster */
1072 1.1 oster
1073 1.1 oster switch (cmd) {
1074 1.1 oster case DIOCGDINFO:
1075 1.1 oster db1_printf(("DIOCGDINFO %d %d\n",(int)dev,(int)DISKPART(dev)));
1076 1.1 oster *(struct disklabel *)data = *(rs->sc_dkdev.dk_label);
1077 1.1 oster break;
1078 1.1 oster
1079 1.1 oster case DIOCGPART:
1080 1.1 oster db1_printf(("DIOCGPART: %d %d\n",(int)dev,(int)DISKPART(dev)));
1081 1.1 oster ((struct partinfo *)data)->disklab = rs->sc_dkdev.dk_label;
1082 1.1 oster ((struct partinfo *)data)->part =
1083 1.1 oster &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1084 1.1 oster break;
1085 1.1 oster
1086 1.1 oster case DIOCWDINFO:
1087 1.1 oster db1_printf(("DIOCWDINFO\n"));
1088 1.1 oster case DIOCSDINFO:
1089 1.1 oster db1_printf(("DIOCSDINFO\n"));
1090 1.1 oster if ((error = raidlock(rs)) != 0)
1091 1.1 oster return (error);
1092 1.1 oster
1093 1.1 oster rs->sc_flags |= RAIDF_LABELLING;
1094 1.1 oster
1095 1.1 oster error = setdisklabel(rs->sc_dkdev.dk_label,
1096 1.1 oster (struct disklabel *)data, 0, rs->sc_dkdev.dk_cpulabel);
1097 1.1 oster if (error == 0) {
1098 1.1 oster if (cmd == DIOCWDINFO)
1099 1.1 oster error = writedisklabel(RAIDLABELDEV(dev),
1100 1.1 oster raidstrategy, rs->sc_dkdev.dk_label,
1101 1.1 oster rs->sc_dkdev.dk_cpulabel);
1102 1.1 oster }
1103 1.1 oster
1104 1.1 oster rs->sc_flags &= ~RAIDF_LABELLING;
1105 1.1 oster
1106 1.1 oster raidunlock(rs);
1107 1.1 oster
1108 1.1 oster if (error)
1109 1.1 oster return (error);
1110 1.1 oster break;
1111 1.1 oster
1112 1.1 oster case DIOCWLABEL:
1113 1.1 oster db1_printf(("DIOCWLABEL\n"));
1114 1.1 oster if (*(int *)data != 0)
1115 1.1 oster rs->sc_flags |= RAIDF_WLABEL;
1116 1.1 oster else
1117 1.1 oster rs->sc_flags &= ~RAIDF_WLABEL;
1118 1.1 oster break;
1119 1.1 oster
1120 1.1 oster case DIOCGDEFLABEL:
1121 1.1 oster db1_printf(("DIOCGDEFLABEL\n"));
1122 1.1 oster raidgetdefaultlabel(raidPtrs[unit], rs,
1123 1.1 oster (struct disklabel *)data);
1124 1.1 oster break;
1125 1.1 oster
1126 1.1 oster default:
1127 1.1 oster retcode = ENOTTY; /* XXXX ?? OR EINVAL ? */
1128 1.1 oster }
1129 1.1 oster return(retcode);
1130 1.1 oster
1131 1.1 oster }
1132 1.1 oster
1133 1.1 oster
1134 1.1 oster /* raidinit -- complete the rest of the initialization for the
1135 1.1 oster RAIDframe device. */
1136 1.1 oster
1137 1.1 oster
1138 1.1 oster static int
1139 1.1 oster raidinit(dev, raidPtr,unit)
1140 1.1 oster dev_t dev;
1141 1.1 oster RF_Raid_t *raidPtr;
1142 1.1 oster int unit;
1143 1.1 oster {
1144 1.1 oster int retcode;
1145 1.1 oster /* int ix; */
1146 1.1 oster /* struct raidbuf *raidbp; */
1147 1.1 oster struct raid_softc *rs;
1148 1.1 oster
1149 1.1 oster retcode = 0;
1150 1.1 oster
1151 1.1 oster rs = &raid_softc[unit];
1152 1.1 oster pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1153 1.6 thorpej 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
1154 1.1 oster
1155 1.1 oster
1156 1.1 oster /* XXX should check return code first... */
1157 1.1 oster rs->sc_flags |= RAIDF_INITED;
1158 1.1 oster
1159 1.1 oster sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds.*/
1160 1.1 oster
1161 1.1 oster rs->sc_dkdev.dk_name = rs->sc_xname;
1162 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1163 1.1 oster other things, so it's critical to call this *BEFORE* we
1164 1.1 oster try putzing with disklabels. */
1165 1.1 oster disk_attach(&rs->sc_dkdev);
1166 1.1 oster
1167 1.1 oster /* XXX There may be a weird interaction here between this, and
1168 1.1 oster protectedSectors, as used in RAIDframe. */
1169 1.1 oster rs->sc_size = raidPtr->totalSectors;
1170 1.1 oster rs->sc_dev = dev;
1171 1.1 oster return(retcode);
1172 1.1 oster }
1173 1.1 oster
1174 1.1 oster
1175 1.1 oster /*********************************************************
1176 1.1 oster *
1177 1.1 oster * initialization code called at boot time (startup.c)
1178 1.1 oster *
1179 1.1 oster ********************************************************/
1180 1.1 oster int rf_boot()
1181 1.1 oster {
1182 1.1 oster int i, rc;
1183 1.1 oster
1184 1.1 oster rc = rf_mutex_init(&rf_sparet_wait_mutex);
1185 1.1 oster if (rc) {
1186 1.1 oster RF_PANIC();
1187 1.1 oster }
1188 1.1 oster rc = rf_mutex_init(&rf_async_done_q_mutex);
1189 1.1 oster if (rc) {
1190 1.1 oster RF_PANIC();
1191 1.1 oster }
1192 1.1 oster rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
1193 1.1 oster recon_queue = NULL;
1194 1.1 oster rf_async_done_qh = rf_async_done_qt = NULL;
1195 1.1 oster for (i=0; i<numraid; i++)
1196 1.1 oster raidPtrs[i] = NULL;
1197 1.1 oster rc = rf_BootRaidframe();
1198 1.1 oster if (rc == 0)
1199 1.1 oster printf("Kernelized RAIDframe activated\n");
1200 1.1 oster else
1201 1.1 oster rf_kbooted = RFK_BOOT_BAD;
1202 1.1 oster return(rc);
1203 1.1 oster }
1204 1.1 oster
1205 1.1 oster /*
1206 1.1 oster * This kernel thread never exits. It is created once, and persists
1207 1.1 oster * until the system reboots.
1208 1.1 oster */
1209 1.1 oster void rf_ReconKernelThread()
1210 1.1 oster {
1211 1.1 oster struct rf_recon_req *req;
1212 1.1 oster int s;
1213 1.1 oster
1214 1.1 oster /* XXX not sure what spl() level we should be at here... probably splbio() */
1215 1.1 oster s=splbio();
1216 1.1 oster
1217 1.1 oster while (1) {
1218 1.1 oster /* grab the next reconstruction request from the queue */
1219 1.1 oster LOCK_RECON_Q_MUTEX();
1220 1.1 oster while (!recon_queue) {
1221 1.1 oster UNLOCK_RECON_Q_MUTEX();
1222 1.1 oster tsleep(&recon_queue, PRIBIO | PCATCH, "raidframe recon", 0);
1223 1.1 oster LOCK_RECON_Q_MUTEX();
1224 1.1 oster }
1225 1.1 oster req = recon_queue;
1226 1.1 oster recon_queue = recon_queue->next;
1227 1.1 oster UNLOCK_RECON_Q_MUTEX();
1228 1.1 oster
1229 1.1 oster /*
1230 1.1 oster * If flags specifies that we should start recon, this call
1231 1.1 oster * will not return until reconstruction completes, fails, or is aborted.
1232 1.1 oster */
1233 1.1 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
1234 1.1 oster ((req->flags&RF_FDFLAGS_RECON) ? 1 : 0));
1235 1.1 oster
1236 1.1 oster RF_Free(req, sizeof(*req));
1237 1.1 oster }
1238 1.1 oster }
1239 1.1 oster /* wake up the daemon & tell it to get us a spare table
1240 1.1 oster * XXX
1241 1.1 oster * the entries in the queues should be tagged with the raidPtr
1242 1.1 oster * so that in the extremely rare case that two recons happen at once, we know for
1243 1.1 oster * which device were requesting a spare table
1244 1.1 oster * XXX
1245 1.1 oster */
1246 1.1 oster int rf_GetSpareTableFromDaemon(req)
1247 1.1 oster RF_SparetWait_t *req;
1248 1.1 oster {
1249 1.1 oster int retcode;
1250 1.1 oster
1251 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1252 1.1 oster req->next = rf_sparet_wait_queue;
1253 1.1 oster rf_sparet_wait_queue = req;
1254 1.1 oster wakeup(&rf_sparet_wait_queue);
1255 1.1 oster
1256 1.1 oster /* mpsleep unlocks the mutex */
1257 1.1 oster while (!rf_sparet_resp_queue) {
1258 1.1 oster tsleep(&rf_sparet_resp_queue, PRIBIO | PCATCH,
1259 1.1 oster "raidframe getsparetable", 0);
1260 1.1 oster #if 0
1261 1.1 oster mpsleep(&rf_sparet_resp_queue, PZERO, "sparet resp", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1262 1.1 oster #endif
1263 1.1 oster }
1264 1.1 oster req = rf_sparet_resp_queue;
1265 1.1 oster rf_sparet_resp_queue = req->next;
1266 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1267 1.1 oster
1268 1.1 oster retcode = req->fcol;
1269 1.1 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we alloc'd */
1270 1.1 oster return(retcode);
1271 1.1 oster }
1272 1.1 oster
1273 1.1 oster /* a wrapper around rf_DoAccess that extracts appropriate info from the bp & passes it down.
1274 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1275 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1276 1.1 oster * certain conditions (to make some standard utilities work)
1277 1.1 oster */
1278 1.1 oster int rf_DoAccessKernel(raidPtr, bp, flags, cbFunc, cbArg)
1279 1.1 oster RF_Raid_t *raidPtr;
1280 1.1 oster struct buf *bp;
1281 1.1 oster RF_RaidAccessFlags_t flags;
1282 1.1 oster void (*cbFunc)(struct buf *);
1283 1.1 oster void *cbArg;
1284 1.1 oster {
1285 1.1 oster RF_SectorCount_t num_blocks, pb, sum;
1286 1.1 oster RF_RaidAddr_t raid_addr;
1287 1.1 oster int retcode;
1288 1.1 oster struct partition *pp;
1289 1.1 oster daddr_t blocknum;
1290 1.1 oster int unit;
1291 1.1 oster struct raid_softc *rs;
1292 1.7 explorer int do_async;
1293 1.1 oster
1294 1.1 oster /* XXX The dev_t used here should be for /dev/[r]raid* !!! */
1295 1.1 oster
1296 1.1 oster unit = raidPtr->raidid;
1297 1.1 oster rs = &raid_softc[unit];
1298 1.1 oster
1299 1.1 oster /* Ok, for the bp we have here, bp->b_blkno is relative to the
1300 1.1 oster partition.. Need to make it absolute to the underlying
1301 1.1 oster device.. */
1302 1.1 oster
1303 1.1 oster blocknum = bp->b_blkno;
1304 1.1 oster if (DISKPART(bp->b_dev) != RAW_PART) {
1305 1.1 oster pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1306 1.1 oster blocknum += pp->p_offset;
1307 1.1 oster db1_printf(("updated: %d %d\n",DISKPART(bp->b_dev),
1308 1.1 oster pp->p_offset));
1309 1.1 oster } else {
1310 1.1 oster db1_printf(("Is raw..\n"));
1311 1.1 oster }
1312 1.1 oster db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno, (int) blocknum));
1313 1.1 oster
1314 1.1 oster db1_printf(("bp->b_bcount = %d\n",(int)bp->b_bcount));
1315 1.1 oster db1_printf(("bp->b_resid = %d\n",(int)bp->b_resid));
1316 1.1 oster
1317 1.1 oster /* *THIS* is where we adjust what block we're going to... but
1318 1.1 oster DO NOT TOUCH bp->b_blkno!!! */
1319 1.1 oster raid_addr = blocknum;
1320 1.1 oster
1321 1.1 oster num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1322 1.1 oster pb = (bp->b_bcount&raidPtr->sectorMask) ? 1 : 0;
1323 1.1 oster sum = raid_addr + num_blocks + pb;
1324 1.1 oster if (1 || rf_debugKernelAccess) {
1325 1.1 oster db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1326 1.1 oster (int)raid_addr, (int)sum,(int)num_blocks,
1327 1.1 oster (int)pb,(int)bp->b_resid));
1328 1.1 oster }
1329 1.1 oster
1330 1.1 oster
1331 1.1 oster if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1332 1.1 oster || (sum < num_blocks) || (sum < pb))
1333 1.1 oster {
1334 1.1 oster bp->b_error = ENOSPC;
1335 1.1 oster bp->b_flags |= B_ERROR;
1336 1.1 oster bp->b_resid = bp->b_bcount;
1337 1.1 oster biodone(bp);
1338 1.1 oster return(bp->b_error);
1339 1.1 oster }
1340 1.1 oster
1341 1.1 oster /*
1342 1.1 oster * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1343 1.1 oster */
1344 1.1 oster
1345 1.1 oster if (bp->b_bcount & raidPtr->sectorMask) {
1346 1.1 oster bp->b_error = EINVAL;
1347 1.1 oster bp->b_flags |= B_ERROR;
1348 1.1 oster bp->b_resid = bp->b_bcount;
1349 1.1 oster biodone(bp);
1350 1.1 oster return(bp->b_error);
1351 1.1 oster }
1352 1.1 oster db1_printf(("Calling DoAccess..\n"));
1353 1.1 oster
1354 1.7 explorer /*
1355 1.7 explorer * XXX For now, all writes are sync
1356 1.7 explorer */
1357 1.7 explorer do_async = 1;
1358 1.7 explorer if ((bp->b_flags & B_READ) == 0)
1359 1.7 explorer do_async = 0;
1360 1.7 explorer
1361 1.1 oster /* don't ever condition on bp->b_flags & B_WRITE.
1362 1.1 oster always condition on B_READ instead */
1363 1.1 oster retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1364 1.1 oster RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1365 1.7 explorer do_async, raid_addr, num_blocks,
1366 1.7 explorer bp->b_un.b_addr,
1367 1.1 oster bp, NULL, NULL, RF_DAG_NONBLOCKING_IO|flags,
1368 1.1 oster NULL, cbFunc, cbArg);
1369 1.5 oster #if 0
1370 1.5 oster db1_printf(("After call to DoAccess: 0x%x 0x%x %d\n",bp,
1371 1.5 oster bp->b_data,(int)bp->b_resid));
1372 1.5 oster #endif
1373 1.7 explorer
1374 1.7 explorer /*
1375 1.7 explorer * If we requested sync I/O, sleep here.
1376 1.7 explorer */
1377 1.7 explorer if ((retcode == 0) && (do_async == 0))
1378 1.7 explorer tsleep(bp, PRIBIO, "raidsyncio", 0);
1379 1.7 explorer
1380 1.1 oster return(retcode);
1381 1.1 oster }
1382 1.1 oster
1383 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1384 1.1 oster
1385 1.1 oster int rf_DispatchKernelIO(queue, req)
1386 1.1 oster RF_DiskQueue_t *queue;
1387 1.1 oster RF_DiskQueueData_t *req;
1388 1.1 oster {
1389 1.1 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1390 1.1 oster struct buf *bp;
1391 1.1 oster struct raidbuf *raidbp=NULL;
1392 1.1 oster struct raid_softc *rs;
1393 1.1 oster int unit;
1394 1.1 oster
1395 1.1 oster /* XXX along with the vnode, we also need the softc associated with
1396 1.1 oster this device.. */
1397 1.1 oster
1398 1.1 oster req->queue = queue;
1399 1.1 oster
1400 1.1 oster unit = queue->raidPtr->raidid;
1401 1.1 oster
1402 1.1 oster db1_printf(("DispatchKernelIO unit: %d\n",unit));
1403 1.1 oster
1404 1.1 oster if (unit >= numraid) {
1405 1.1 oster printf("Invalid unit number: %d %d\n",unit,numraid);
1406 1.1 oster panic("Invalid Unit number in rf_DispatchKernelIO\n");
1407 1.1 oster }
1408 1.1 oster
1409 1.1 oster rs = &raid_softc[unit];
1410 1.1 oster
1411 1.1 oster /* XXX is this the right place? */
1412 1.1 oster disk_busy(&rs->sc_dkdev);
1413 1.1 oster
1414 1.1 oster bp = req->bp;
1415 1.1 oster
1416 1.4 oster /*
1417 1.4 oster XXX when there is a physical disk failure, someone is passing
1418 1.4 oster us a buffer that contains old stuff!! Attempt to deal with
1419 1.4 oster this problem without taking a performance hit...
1420 1.4 oster (not sure where the real bug is. It's buried in RAIDframe
1421 1.4 oster somewhere) :-( GO )
1422 1.4 oster */
1423 1.4 oster
1424 1.4 oster if (bp->b_flags & B_ERROR) {
1425 1.4 oster bp->b_flags &= ~B_ERROR;
1426 1.4 oster }
1427 1.4 oster if (bp->b_error!=0) {
1428 1.4 oster bp->b_error = 0;
1429 1.4 oster }
1430 1.4 oster
1431 1.1 oster raidbp = RAIDGETBUF(rs);
1432 1.1 oster
1433 1.1 oster raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1434 1.1 oster
1435 1.1 oster /*
1436 1.1 oster * context for raidiodone
1437 1.1 oster */
1438 1.1 oster raidbp->rf_obp = bp;
1439 1.1 oster raidbp->req = req;
1440 1.1 oster
1441 1.1 oster switch (req->type) {
1442 1.1 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1443 1.1 oster /*
1444 1.1 oster Dprintf2("rf_DispatchKernelIO: NOP to r %d c %d\n",
1445 1.1 oster queue->row, queue->col);
1446 1.1 oster */
1447 1.1 oster /* XXX need to do something extra here.. */
1448 1.1 oster /* I'm leaving this in, as I've never actually seen it
1449 1.1 oster used, and I'd like folks to report it... GO */
1450 1.1 oster printf(("WAKEUP CALLED\n"));
1451 1.1 oster queue->numOutstanding++;
1452 1.1 oster
1453 1.1 oster /* XXX need to glue the original buffer into this?? */
1454 1.1 oster
1455 1.1 oster KernelWakeupFunc(&raidbp->rf_buf);
1456 1.1 oster break;
1457 1.1 oster
1458 1.1 oster case RF_IO_TYPE_READ:
1459 1.1 oster case RF_IO_TYPE_WRITE:
1460 1.1 oster
1461 1.1 oster if (req->tracerec) {
1462 1.1 oster RF_ETIMER_START(req->tracerec->timer);
1463 1.1 oster }
1464 1.1 oster
1465 1.1 oster
1466 1.1 oster InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1467 1.1 oster op | bp->b_flags, queue->rf_cinfo->ci_dev,
1468 1.1 oster req->sectorOffset, req->numSector,
1469 1.1 oster req->buf, KernelWakeupFunc, (void *) req,
1470 1.1 oster queue->raidPtr->logBytesPerSector, req->b_proc);
1471 1.1 oster
1472 1.1 oster if (rf_debugKernelAccess) {
1473 1.1 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
1474 1.1 oster (long) bp->b_blkno));
1475 1.1 oster }
1476 1.1 oster queue->numOutstanding++;
1477 1.1 oster queue->last_deq_sector = req->sectorOffset;
1478 1.1 oster /* acc wouldn't have been let in if there were any
1479 1.1 oster pending reqs at any other priority */
1480 1.1 oster queue->curPriority = req->priority;
1481 1.1 oster /*
1482 1.1 oster Dprintf3("rf_DispatchKernelIO: %c to row %d col %d\n",
1483 1.1 oster req->type, queue->row, queue->col);
1484 1.1 oster */
1485 1.1 oster
1486 1.1 oster db1_printf(("Going for %c to unit %d row %d col %d\n",
1487 1.1 oster req->type, unit, queue->row, queue->col));
1488 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
1489 1.1 oster (int) req->sectorOffset, (int) req->numSector,
1490 1.1 oster (int) (req->numSector <<
1491 1.1 oster queue->raidPtr->logBytesPerSector),
1492 1.1 oster (int) queue->raidPtr->logBytesPerSector));
1493 1.1 oster if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1494 1.1 oster raidbp->rf_buf.b_vp->v_numoutput++;
1495 1.1 oster }
1496 1.1 oster
1497 1.1 oster VOP_STRATEGY(&raidbp->rf_buf);
1498 1.1 oster
1499 1.1 oster break;
1500 1.1 oster
1501 1.1 oster default:
1502 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
1503 1.1 oster }
1504 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
1505 1.1 oster return(0);
1506 1.1 oster }
1507 1.1 oster
1508 1.1 oster /* this is the callback function associated with a I/O invoked from
1509 1.1 oster kernel code.
1510 1.1 oster */
1511 1.1 oster static void KernelWakeupFunc(vbp)
1512 1.1 oster struct buf *vbp;
1513 1.1 oster {
1514 1.1 oster RF_DiskQueueData_t *req = NULL;
1515 1.1 oster RF_DiskQueue_t *queue;
1516 1.1 oster struct raidbuf *raidbp = (struct raidbuf *)vbp;
1517 1.1 oster struct buf *bp;
1518 1.1 oster struct raid_softc *rs;
1519 1.1 oster int unit;
1520 1.1 oster register int s;
1521 1.1 oster
1522 1.1 oster s=splbio(); /* XXX */
1523 1.1 oster db1_printf(("recovering the request queue:\n"));
1524 1.1 oster req = raidbp->req;
1525 1.1 oster
1526 1.1 oster bp = raidbp->rf_obp;
1527 1.5 oster #if 0
1528 1.5 oster db1_printf(("bp=0x%x\n",bp));
1529 1.5 oster #endif
1530 1.1 oster
1531 1.1 oster queue = (RF_DiskQueue_t *) req->queue;
1532 1.1 oster
1533 1.1 oster if (raidbp->rf_buf.b_flags & B_ERROR) {
1534 1.1 oster #if 0
1535 1.1 oster printf("Setting bp->b_flags!!! %d\n",raidbp->rf_buf.b_error);
1536 1.1 oster #endif
1537 1.1 oster bp->b_flags |= B_ERROR;
1538 1.1 oster bp->b_error = raidbp->rf_buf.b_error ?
1539 1.1 oster raidbp->rf_buf.b_error : EIO;
1540 1.1 oster }
1541 1.1 oster
1542 1.5 oster #if 0
1543 1.1 oster db1_printf(("raidbp->rf_buf.b_bcount=%d\n",(int)raidbp->rf_buf.b_bcount));
1544 1.1 oster db1_printf(("raidbp->rf_buf.b_bufsize=%d\n",(int)raidbp->rf_buf.b_bufsize));
1545 1.1 oster db1_printf(("raidbp->rf_buf.b_resid=%d\n",(int)raidbp->rf_buf.b_resid));
1546 1.5 oster db1_printf(("raidbp->rf_buf.b_data=0x%x\n",raidbp->rf_buf.b_data));
1547 1.5 oster #endif
1548 1.1 oster
1549 1.1 oster /* XXX methinks this could be wrong... */
1550 1.1 oster #if 1
1551 1.1 oster bp->b_resid = raidbp->rf_buf.b_resid;
1552 1.1 oster #endif
1553 1.1 oster
1554 1.1 oster if (req->tracerec) {
1555 1.1 oster RF_ETIMER_STOP(req->tracerec->timer);
1556 1.1 oster RF_ETIMER_EVAL(req->tracerec->timer);
1557 1.1 oster RF_LOCK_MUTEX(rf_tracing_mutex);
1558 1.1 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1559 1.1 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1560 1.1 oster req->tracerec->num_phys_ios++;
1561 1.1 oster RF_UNLOCK_MUTEX(rf_tracing_mutex);
1562 1.1 oster }
1563 1.1 oster
1564 1.1 oster bp->b_bcount = raidbp->rf_buf.b_bcount;/* XXXX ?? */
1565 1.1 oster
1566 1.1 oster unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1567 1.1 oster
1568 1.4 oster
1569 1.4 oster /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go ballistic,
1570 1.4 oster and mark the component as hosed... */
1571 1.4 oster #if 1
1572 1.4 oster if (bp->b_flags&B_ERROR) {
1573 1.4 oster /* Mark the disk as dead */
1574 1.4 oster /* but only mark it once... */
1575 1.4 oster if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1576 1.4 oster rf_ds_optimal) {
1577 1.4 oster printf("raid%d: IO Error. Marking %s as failed.\n",
1578 1.4 oster unit, queue->raidPtr->Disks[queue->row][queue->col].devname );
1579 1.4 oster queue->raidPtr->Disks[queue->row][queue->col].status =
1580 1.4 oster rf_ds_failed;
1581 1.4 oster queue->raidPtr->status[queue->row] = rf_rs_degraded;
1582 1.4 oster queue->raidPtr->numFailures++;
1583 1.4 oster } else { /* Disk is already dead... */
1584 1.4 oster /* printf("Disk already marked as dead!\n"); */
1585 1.4 oster }
1586 1.4 oster
1587 1.4 oster }
1588 1.4 oster #endif
1589 1.4 oster
1590 1.1 oster rs = &raid_softc[unit];
1591 1.1 oster RAIDPUTBUF(rs,raidbp);
1592 1.4 oster
1593 1.1 oster
1594 1.1 oster if (bp->b_resid==0) {
1595 1.1 oster db1_printf(("Disk is no longer busy for this buffer... %d %ld %ld\n",
1596 1.1 oster unit, bp->b_resid, bp->b_bcount));
1597 1.1 oster /* XXX is this the right place for a disk_unbusy()??!??!?!? */
1598 1.1 oster disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
1599 1.1 oster } else {
1600 1.1 oster db1_printf(("b_resid is still %ld\n",bp->b_resid));
1601 1.1 oster }
1602 1.1 oster
1603 1.1 oster rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1604 1.1 oster (req->CompleteFunc)(req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1605 1.1 oster /* printf("Exiting KernelWakeupFunc\n"); */
1606 1.1 oster
1607 1.1 oster splx(s); /* XXX */
1608 1.1 oster }
1609 1.1 oster
1610 1.1 oster
1611 1.1 oster
1612 1.1 oster /*
1613 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
1614 1.1 oster */
1615 1.1 oster static void InitBP(
1616 1.1 oster struct buf *bp,
1617 1.1 oster struct vnode *b_vp,
1618 1.1 oster unsigned rw_flag,
1619 1.1 oster dev_t dev,
1620 1.1 oster RF_SectorNum_t startSect,
1621 1.1 oster RF_SectorCount_t numSect,
1622 1.1 oster caddr_t buf,
1623 1.1 oster void (*cbFunc)(struct buf *),
1624 1.1 oster void *cbArg,
1625 1.1 oster int logBytesPerSector,
1626 1.1 oster struct proc *b_proc)
1627 1.1 oster {
1628 1.1 oster /* bp->b_flags = B_PHYS | rw_flag; */
1629 1.1 oster bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1630 1.1 oster bp->b_bcount = numSect << logBytesPerSector;
1631 1.1 oster bp->b_bufsize = bp->b_bcount;
1632 1.1 oster bp->b_error = 0;
1633 1.1 oster bp->b_dev = dev;
1634 1.1 oster db1_printf(("bp->b_dev is %d\n", dev));
1635 1.1 oster bp->b_un.b_addr = buf;
1636 1.5 oster #if 0
1637 1.5 oster db1_printf(("bp->b_data=0x%x\n",bp->b_data));
1638 1.5 oster #endif
1639 1.1 oster
1640 1.1 oster bp->b_blkno = startSect;
1641 1.1 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1642 1.1 oster db1_printf(("b_bcount is: %d\n",(int)bp->b_bcount));
1643 1.1 oster if (bp->b_bcount == 0) {
1644 1.1 oster panic("bp->b_bcount is zero in InitBP!!\n");
1645 1.1 oster }
1646 1.1 oster bp->b_proc = b_proc;
1647 1.1 oster bp->b_iodone = cbFunc;
1648 1.1 oster bp->b_vp = b_vp;
1649 1.1 oster
1650 1.1 oster }
1651 1.1 oster
1652 1.1 oster /* Extras... */
1653 1.1 oster
1654 1.1 oster unsigned int rpcc()
1655 1.1 oster {
1656 1.1 oster /* XXX no clue what this is supposed to do.. my guess is
1657 1.1 oster that it's supposed to read the CPU cycle counter... */
1658 1.1 oster /* db1_printf("this is supposed to do something useful too!??\n"); */
1659 1.1 oster return(0);
1660 1.1 oster }
1661 1.1 oster
1662 1.1 oster #if 0
1663 1.1 oster int rf_GetSpareTableFromDaemon(req)
1664 1.1 oster RF_SparetWait_t *req;
1665 1.1 oster {
1666 1.1 oster int retcode=1;
1667 1.1 oster printf("This is supposed to do something useful!!\n"); /* XXX */
1668 1.1 oster
1669 1.1 oster return(retcode);
1670 1.1 oster
1671 1.1 oster }
1672 1.1 oster #endif
1673 1.1 oster
1674 1.1 oster static void
1675 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp)
1676 1.1 oster RF_Raid_t *raidPtr;
1677 1.1 oster struct raid_softc *rs;
1678 1.1 oster struct disklabel *lp;
1679 1.1 oster {
1680 1.1 oster db1_printf(("Building a default label...\n"));
1681 1.1 oster bzero(lp, sizeof(*lp));
1682 1.1 oster
1683 1.1 oster /* fabricate a label... */
1684 1.1 oster lp->d_secperunit = raidPtr->totalSectors;
1685 1.1 oster lp->d_secsize = raidPtr->bytesPerSector;
1686 1.1 oster lp->d_nsectors = 1024 * (1024 / raidPtr->bytesPerSector);
1687 1.1 oster lp->d_ntracks = 1;
1688 1.1 oster lp->d_ncylinders = raidPtr->totalSectors / lp->d_nsectors;
1689 1.1 oster lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1690 1.1 oster
1691 1.1 oster strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1692 1.1 oster lp->d_type = DTYPE_RAID;
1693 1.1 oster strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1694 1.1 oster lp->d_rpm = 3600;
1695 1.1 oster lp->d_interleave = 1;
1696 1.1 oster lp->d_flags = 0;
1697 1.1 oster
1698 1.1 oster lp->d_partitions[RAW_PART].p_offset = 0;
1699 1.1 oster lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1700 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1701 1.1 oster lp->d_npartitions = RAW_PART + 1;
1702 1.1 oster
1703 1.1 oster lp->d_magic = DISKMAGIC;
1704 1.1 oster lp->d_magic2 = DISKMAGIC;
1705 1.1 oster lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1706 1.1 oster
1707 1.1 oster }
1708 1.1 oster
1709 1.1 oster /*
1710 1.1 oster * Read the disklabel from the raid device. If one is not present, fake one
1711 1.1 oster * up.
1712 1.1 oster */
1713 1.1 oster static void
1714 1.1 oster raidgetdisklabel(dev)
1715 1.1 oster dev_t dev;
1716 1.1 oster {
1717 1.1 oster int unit = raidunit(dev);
1718 1.1 oster struct raid_softc *rs = &raid_softc[unit];
1719 1.1 oster char *errstring;
1720 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1721 1.1 oster struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1722 1.1 oster RF_Raid_t *raidPtr;
1723 1.1 oster
1724 1.1 oster db1_printf(("Getting the disklabel...\n"));
1725 1.1 oster
1726 1.1 oster bzero(clp, sizeof(*clp));
1727 1.1 oster
1728 1.1 oster raidPtr = raidPtrs[unit];
1729 1.1 oster
1730 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp);
1731 1.1 oster
1732 1.1 oster /*
1733 1.1 oster * Call the generic disklabel extraction routine.
1734 1.1 oster */
1735 1.1 oster errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
1736 1.1 oster rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
1737 1.1 oster if (errstring)
1738 1.1 oster raidmakedisklabel(rs);
1739 1.1 oster else {
1740 1.1 oster int i;
1741 1.1 oster struct partition *pp;
1742 1.1 oster
1743 1.1 oster /*
1744 1.1 oster * Sanity check whether the found disklabel is valid.
1745 1.1 oster *
1746 1.1 oster * This is necessary since total size of the raid device
1747 1.1 oster * may vary when an interleave is changed even though exactly
1748 1.1 oster * same componets are used, and old disklabel may used
1749 1.1 oster * if that is found.
1750 1.1 oster */
1751 1.1 oster if (lp->d_secperunit != rs->sc_size)
1752 1.1 oster printf("WARNING: %s: "
1753 1.1 oster "total sector size in disklabel (%d) != "
1754 1.1 oster "the size of raid (%d)\n", rs->sc_xname,
1755 1.1 oster lp->d_secperunit, rs->sc_size);
1756 1.1 oster for (i = 0; i < lp->d_npartitions; i++) {
1757 1.1 oster pp = &lp->d_partitions[i];
1758 1.1 oster if (pp->p_offset + pp->p_size > rs->sc_size)
1759 1.1 oster printf("WARNING: %s: end of partition `%c' "
1760 1.1 oster "exceeds the size of raid (%d)\n",
1761 1.1 oster rs->sc_xname, 'a' + i, rs->sc_size);
1762 1.1 oster }
1763 1.1 oster }
1764 1.1 oster
1765 1.1 oster }
1766 1.1 oster
1767 1.1 oster /*
1768 1.1 oster * Take care of things one might want to take care of in the event
1769 1.1 oster * that a disklabel isn't present.
1770 1.1 oster */
1771 1.1 oster static void
1772 1.1 oster raidmakedisklabel(rs)
1773 1.1 oster struct raid_softc *rs;
1774 1.1 oster {
1775 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1776 1.1 oster db1_printf(("Making a label..\n"));
1777 1.1 oster
1778 1.1 oster /*
1779 1.1 oster * For historical reasons, if there's no disklabel present
1780 1.1 oster * the raw partition must be marked FS_BSDFFS.
1781 1.1 oster */
1782 1.1 oster
1783 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1784 1.1 oster
1785 1.1 oster strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
1786 1.1 oster
1787 1.1 oster lp->d_checksum = dkcksum(lp);
1788 1.1 oster }
1789 1.1 oster
1790 1.1 oster /*
1791 1.1 oster * Lookup the provided name in the filesystem. If the file exists,
1792 1.1 oster * is a valid block device, and isn't being used by anyone else,
1793 1.1 oster * set *vpp to the file's vnode.
1794 1.1 oster * You'll find the original of this in ccd.c
1795 1.1 oster */
1796 1.1 oster int
1797 1.1 oster raidlookup(path, p, vpp)
1798 1.1 oster char *path;
1799 1.1 oster struct proc *p;
1800 1.1 oster struct vnode **vpp; /* result */
1801 1.1 oster {
1802 1.1 oster struct nameidata nd;
1803 1.1 oster struct vnode *vp;
1804 1.1 oster struct vattr va;
1805 1.1 oster int error;
1806 1.1 oster
1807 1.1 oster NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
1808 1.1 oster if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
1809 1.1 oster #ifdef DEBUG
1810 1.1 oster printf("RAIDframe: vn_open returned %d\n",error);
1811 1.1 oster #endif
1812 1.1 oster return (error);
1813 1.1 oster }
1814 1.1 oster vp = nd.ni_vp;
1815 1.1 oster if (vp->v_usecount > 1) {
1816 1.1 oster VOP_UNLOCK(vp, 0);
1817 1.1 oster (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1818 1.1 oster return (EBUSY);
1819 1.1 oster }
1820 1.1 oster if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
1821 1.1 oster VOP_UNLOCK(vp, 0);
1822 1.1 oster (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1823 1.1 oster return (error);
1824 1.1 oster }
1825 1.1 oster /* XXX: eventually we should handle VREG, too. */
1826 1.1 oster if (va.va_type != VBLK) {
1827 1.1 oster VOP_UNLOCK(vp, 0);
1828 1.1 oster (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1829 1.1 oster return (ENOTBLK);
1830 1.1 oster }
1831 1.1 oster VOP_UNLOCK(vp, 0);
1832 1.1 oster *vpp = vp;
1833 1.1 oster return (0);
1834 1.1 oster }
1835 1.1 oster
1836 1.1 oster /*
1837 1.1 oster * Wait interruptibly for an exclusive lock.
1838 1.1 oster *
1839 1.1 oster * XXX
1840 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
1841 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
1842 1.1 oster */
1843 1.1 oster static int
1844 1.1 oster raidlock(rs)
1845 1.1 oster struct raid_softc *rs;
1846 1.1 oster {
1847 1.1 oster int error;
1848 1.1 oster
1849 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
1850 1.1 oster rs->sc_flags |= RAIDF_WANTED;
1851 1.1 oster if ((error =
1852 1.1 oster tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
1853 1.1 oster return (error);
1854 1.1 oster }
1855 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
1856 1.1 oster return (0);
1857 1.1 oster }
1858 1.1 oster
1859 1.1 oster /*
1860 1.1 oster * Unlock and wake up any waiters.
1861 1.1 oster */
1862 1.1 oster static void
1863 1.1 oster raidunlock(rs)
1864 1.1 oster struct raid_softc *rs;
1865 1.1 oster {
1866 1.1 oster
1867 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
1868 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
1869 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
1870 1.1 oster wakeup(rs);
1871 1.1 oster }
1872 1.1 oster }
1873