rf_netbsdkintf.c revision 1.2 1 /* $NetBSD: rf_netbsdkintf.c,v 1.2 1998/11/13 11:51:55 simonb Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115 /*
116 * :
117 * Log: rf_kintf.c,v
118 * Revision 1.57 1996/07/19 16:12:20 jimz
119 * remove addition of protectedSectors in InitBP- it's already
120 * done in the diskqueue code
121 *
122 * Revision 1.56 1996/07/17 21:00:58 jimz
123 * clean up timer interface, tracing
124 *
125 * Revision 1.55 1996/06/17 03:00:54 jimz
126 * Change RAIDFRAME_GET_INFO interface to do its own copyout()
127 * (because size of device config structure now exceeds 8k)
128 *
129 * Revision 1.54 1996/06/09 02:36:46 jimz
130 * lots of little crufty cleanup- fixup whitespace
131 * issues, comment #ifdefs, improve typing in some
132 * places (esp size-related)
133 *
134 * Revision 1.53 1996/06/07 21:33:04 jimz
135 * begin using consistent types for sector numbers,
136 * stripe numbers, row+col numbers, recon unit numbers
137 *
138 * Revision 1.52 1996/06/06 17:28:08 jimz
139 * track sector number of last I/O dequeued
140 *
141 * Revision 1.51 1996/06/05 18:06:02 jimz
142 * Major code cleanup. The Great Renaming is now done.
143 * Better modularity. Better typing. Fixed a bunch of
144 * synchronization bugs. Made a lot of global stuff
145 * per-desc or per-array. Removed dead code.
146 *
147 * Revision 1.50 1996/06/03 23:28:26 jimz
148 * more bugfixes
149 * check in tree to sync for IPDS runs with current bugfixes
150 * there still may be a problem with threads in the script test
151 * getting I/Os stuck- not trivially reproducible (runs ~50 times
152 * in a row without getting stuck)
153 *
154 * Revision 1.49 1996/06/02 17:31:48 jimz
155 * Moved a lot of global stuff into array structure, where it belongs.
156 * Fixed up paritylogging, pss modules in this manner. Some general
157 * code cleanup. Removed lots of dead code, some dead files.
158 *
159 * Revision 1.48 1996/05/31 22:26:54 jimz
160 * fix a lot of mapping problems, memory allocation problems
161 * found some weird lock issues, fixed 'em
162 * more code cleanup
163 *
164 * Revision 1.47 1996/05/30 12:59:18 jimz
165 * make etimer happier, more portable
166 *
167 * Revision 1.46 1996/05/30 11:29:41 jimz
168 * Numerous bug fixes. Stripe lock release code disagreed with the taking code
169 * about when stripes should be locked (I made it consistent: no parity, no lock)
170 * There was a lot of extra serialization of I/Os which I've removed- a lot of
171 * it was to calculate values for the cache code, which is no longer with us.
172 * More types, function, macro cleanup. Added code to properly quiesce the array
173 * on shutdown. Made a lot of stuff array-specific which was (bogusly) general
174 * before. Fixed memory allocation, freeing bugs.
175 *
176 * Revision 1.45 1996/05/27 18:56:37 jimz
177 * more code cleanup
178 * better typing
179 * compiles in all 3 environments
180 *
181 * Revision 1.44 1996/05/24 22:17:04 jimz
182 * continue code + namespace cleanup
183 * typed a bunch of flags
184 *
185 * Revision 1.43 1996/05/24 01:59:45 jimz
186 * another checkpoint in code cleanup for release
187 * time to sync kernel tree
188 *
189 * Revision 1.42 1996/05/23 22:17:54 jimz
190 * fix sector size hardcoding problems
191 *
192 * Revision 1.41 1996/05/23 21:46:35 jimz
193 * checkpoint in code cleanup (release prep)
194 * lots of types, function names have been fixed
195 *
196 * Revision 1.40 1996/05/23 13:18:07 jimz
197 * tracing_mutex -> rf_tracing_mutex
198 *
199 * Revision 1.39 1996/05/23 00:33:23 jimz
200 * code cleanup: move all debug decls to rf_options.c, all extern
201 * debug decls to rf_options.h, all debug vars preceded by rf_
202 *
203 * Revision 1.38 1996/05/20 16:15:32 jimz
204 * switch to rf_{mutex,cond}_{init,destroy}
205 *
206 * Revision 1.37 1996/05/10 16:23:47 jimz
207 * RF_offset -> RF_Offset
208 *
209 * Revision 1.36 1996/05/08 21:01:24 jimz
210 * fixed up enum type names that were conflicting with other
211 * enums and function names (ie, "panic")
212 * future naming trends will be towards RF_ and rf_ for
213 * everything raidframe-related
214 *
215 * Revision 1.35 1996/05/03 19:10:48 jimz
216 * change sanity checking for bogus I/Os to return more appropriate
217 * values (to make some user-level utilities happer with RAIDframe)
218 *
219 * Revision 1.34 1996/05/02 22:17:00 jimz
220 * When using DKUSAGE, send a bogus IO after configuring to let DKUSAGE know
221 * that we exist. This will let user-level programs doing group stats on the
222 * RF device function without error before RF gets its first IO
223 *
224 * Changed rf_device_config devs and spares fields to RF_RaidDisk_t
225 *
226 * Inc numOutstanding for the disk queue in rf_DispatchKernelIO if
227 * type is IO_TYPE_NOP. I'm not sure this is right, but it seems to be,
228 * because the disk IO completion routine wants to dec it, and doesn't
229 * care if there was no such IO.
230 *
231 * Revision 1.33 1996/05/02 15:05:44 jimz
232 * for now, rf_DoAccessKernel will reject non-sector-sized I/Os
233 * eventually, it should do something more clever...
234 * (and do it in DoAccess(), not just DoAccessKernel())
235 *
236 * Revision 1.32 1996/05/01 16:28:39 jimz
237 * get rid of uses of ccmn_ functions
238 *
239 * Revision 1.31 1996/05/01 15:42:17 jimz
240 * ccmn_* memory management is on the way out. This is an archival checkpoint-
241 * both the old and new code are in place (all the ccmn_ calls are #if 0). After
242 * this, the ccmn_ code will no longer appear.
243 *
244 * Revision 1.30 1996/04/22 15:53:13 jimz
245 * MAX_RAIDS -> NRAIDFRAME
246 *
247 * Revision 1.29 1995/12/12 18:10:06 jimz
248 * MIN -> RF_MIN, MAX -> RF_MAX, ASSERT -> RF_ASSERT
249 * fix 80-column brain damage in comments
250 *
251 * Revision 1.28 1995/12/01 19:11:01 root
252 * added copyright info
253 *
254 * Revision 1.27 1995/11/28 18:56:40 wvcii
255 * disabled buffer copy in rf_write
256 *
257 * Revision 1.26 1995/10/06 16:37:08 jimz
258 * get struct bufs from ubc, not cam
259 * copy all write data, and operate on copy
260 * (temporary hack to get around dags in PQ that want
261 * to Xor into user write buffers)
262 *
263 * Revision 1.25 1995/09/30 22:23:08 jimz
264 * do not require raid to be active to perform ACCTOTAL ioctl
265 *
266 * Revision 1.24 1995/09/30 20:39:08 jimz
267 * added new ioctls:
268 * RAIDFRAME_RESET_ACCTOTALS
269 * RAIDFRAME_GET_ACCTOTALS
270 * RAIDFRAME_KEEP_ACCTOTALS
271 *
272 * Revision 1.23 1995/09/20 21:11:59 jimz
273 * include dfstrace.h in KERNEL block
274 * (even though it's a kernel-only file, this makes the depend process
275 * at user-level happy. Why the user-level Makefile wants to depend
276 * kintf.c is less clear, but this is a workaround).
277 *
278 * Revision 1.22 1995/09/19 23:19:03 jimz
279 * added DKUSAGE support
280 *
281 */
282
283
284
285
286 #ifdef _KERNEL
287 #define KERNEL
288 #endif
289
290
291
292 #ifdef KERNEL
293
294 #include <sys/errno.h>
295
296 #ifdef __NetBSD__
297 #include "raid.h"
298 #include <sys/param.h>
299 #include <sys/pool.h>
300 #include <sys/queue.h>
301 #include <sys/disk.h>
302 #include <sys/device.h>
303 #include <sys/stat.h>
304 #include <sys/ioctl.h>
305 #include <sys/fcntl.h>
306 #include <sys/systm.h>
307 #include <sys/namei.h>
308 #include <sys/vnode.h>
309 #endif
310
311 #include <sys/param.h>
312 #include <sys/types.h>
313
314 #include <machine/types.h>
315
316 #include <sys/disklabel.h>
317
318 #include <sys/conf.h>
319
320
321 #ifdef __NetBSD__
322 #include <sys/lock.h>
323 #endif /* __NetBSD__ */
324
325
326 #include <sys/buf.h>
327 #include <sys/user.h>
328 #include "rf_raid.h"
329 #include "rf_raidframe.h"
330 #include "rf_dag.h"
331 #include "rf_dagflags.h"
332 #include "rf_diskqueue.h"
333 #include "rf_acctrace.h"
334 #include "rf_etimer.h"
335 #include "rf_general.h"
336 #include "rf_debugMem.h"
337 #include "rf_kintf.h"
338 #include "rf_options.h"
339 #include "rf_driver.h"
340 #include "rf_parityscan.h"
341 #include "rf_debugprint.h"
342 #include "rf_threadstuff.h"
343
344 int rf_kdebug_level = 0;
345
346 #define RFK_BOOT_NONE 0
347 #define RFK_BOOT_GOOD 1
348 #define RFK_BOOT_BAD 2
349 static int rf_kbooted = RFK_BOOT_NONE;
350
351 #ifdef DEBUG
352 #define db0_printf(a) printf a
353 #define db_printf(a) if (rf_kdebug_level > 0) printf a
354 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
355 #define db2_printf(a) if (rf_kdebug_level > 1) printf a
356 #define db3_printf(a) if (rf_kdebug_level > 2) printf a
357 #define db4_printf(a) if (rf_kdebug_level > 3) printf a
358 #define db5_printf(a) if (rf_kdebug_level > 4) printf a
359 #else /* DEBUG */
360 #define db0_printf(a) printf a
361 #define db1_printf(a) { }
362 #define db2_printf(a) { }
363 #define db3_printf(a) { }
364 #define db4_printf(a) { }
365 #define db5_printf(a) { }
366 #endif /* DEBUG */
367
368 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
369
370 static int rf_pending_testaccs;
371
372 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
373 RF_DECLARE_STATIC_MUTEX(rf_async_done_q_mutex)
374 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a spare table */
375 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from installation process */
376 static struct rf_test_acc *rf_async_done_qh, *rf_async_done_qt;
377
378 static struct rf_recon_req *recon_queue = NULL; /* used to communicate reconstruction requests */
379
380
381 decl_simple_lock_data(,recon_queue_mutex)
382
383
384 #define LOCK_RECON_Q_MUTEX() simple_lock(&recon_queue_mutex)
385 #define UNLOCK_RECON_Q_MUTEX() simple_unlock(&recon_queue_mutex)
386
387 /* prototypes */
388 static void KernelWakeupFunc(struct buf *bp);
389 static void InitBP(struct buf *bp, struct vnode *, unsigned rw_flag, dev_t dev,
390 RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
391 void (*cbFunc)(struct buf *), void *cbArg, int logBytesPerSector,
392 struct proc *b_proc);
393
394 #define Dprintf0(s) if (rf_queueDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
395 #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,a,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
396 #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,a,b,NULL,NULL,NULL,NULL,NULL,NULL)
397 #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,a,b,c,NULL,NULL,NULL,NULL,NULL)
398
399
400 /* this is so that we can compile under 2.0 as well as 3.2 */
401 #ifndef proc_to_task
402 #define proc_to_task(x) ((x)->task)
403 #endif /* !proc_to_task */
404
405 void raidattach __P((int));
406 int raidsize __P((dev_t));
407
408 void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
409 void rf_CopybackReconstructedData(RF_Raid_t *raidPtr);
410 static int raidinit __P((dev_t,RF_Raid_t *,int));
411
412 int raidopen __P((dev_t, int, int, struct proc *));
413 int raidclose __P((dev_t, int, int, struct proc *));
414 int raidioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
415 int raidwrite __P((dev_t, struct uio *, int));
416 int raidread __P((dev_t, struct uio *, int));
417 void raidstrategy __P((struct buf *));
418 int raiddump __P((dev_t, daddr_t, caddr_t, size_t));
419
420 /*
421 * Pilfered from ccd.c
422 */
423
424 struct raidbuf {
425 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
426 struct buf *rf_obp; /* ptr. to original I/O buf */
427 int rf_flags; /* misc. flags */
428 RF_DiskQueueData_t *req; /* the request that this was part of.. */
429 };
430
431
432 #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
433 #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
434
435 /* XXX Not sure if the following should be replacing the raidPtrs above,
436 or if it should be used in conjunction with that... */
437
438 struct raid_softc {
439 int sc_unit; /* logical unit number */
440 int sc_flags; /* flags */
441 int sc_cflags; /* configuration flags */
442 size_t sc_size; /* size of the raid device */
443 dev_t sc_dev; /* our device..*/
444 char sc_xname[20]; /* XXX external name */
445 struct disk sc_dkdev; /* generic disk device info */
446 struct pool sc_cbufpool; /* component buffer pool */
447 };
448
449 /* sc_flags */
450 #define RAIDF_INITED 0x01 /* unit has been initialized */
451 #define RAIDF_WLABEL 0x02 /* label area is writable */
452 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
453 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
454 #define RAIDF_LOCKED 0x80 /* unit is locked */
455
456 #define raidunit(x) DISKUNIT(x)
457 static int numraid=0;
458
459 #define RAIDLABELDEV(dev) \
460 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
461
462 /* declared here, and made public, for the benefit of KVM stuff.. */
463 struct raid_softc *raid_softc;
464
465 static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *, struct disklabel *));
466 static void raidgetdisklabel __P((dev_t));
467 static void raidmakedisklabel __P((struct raid_softc *));
468
469 static int raidlock __P((struct raid_softc *));
470 static void raidunlock __P((struct raid_softc *));
471 int raidlookup __P((char *, struct proc *p, struct vnode **));
472
473
474 void
475 raidattach(num)
476 int num;
477 {
478 int raidID;
479
480 #ifdef DEBUG
481 printf("raidattach: Asked for %d units\n",num);
482 #endif
483
484 if (num <= 0) {
485 #ifdef DIAGNOSTIC
486 panic("raidattach: count <= 0");
487 #endif
488 return;
489 }
490 /*
491 This is where all the initialization stuff gets done.
492 */
493
494 /* Make some space for requested number of units... */
495
496 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
497 if (raidPtrs == NULL) {
498 panic("raidPtrs is NULL!!\n");
499 }
500
501
502
503 rf_kbooted = rf_boot();
504 if (rf_kbooted) {
505 panic("Serious error booting RAID!!\n");
506 }
507
508 rf_kbooted = RFK_BOOT_GOOD;
509
510 /*
511 put together some datastructures like the CCD device does..
512 This lets us lock the device and what-not when it gets opened.
513 */
514
515 raid_softc = (struct raid_softc *)
516 malloc(num * sizeof(struct raid_softc),
517 M_DEVBUF, M_NOWAIT);
518 if (raid_softc == NULL) {
519 printf("WARNING: no memory for RAIDframe driver\n");
520 return;
521 }
522 numraid = num;
523 bzero(raid_softc, num * sizeof(struct raid_softc));
524
525 for(raidID=0;raidID < num;raidID++) {
526 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
527 (RF_Raid_t *));
528 if (raidPtrs[raidID]==NULL) {
529 printf("raidPtrs[%d] is NULL\n",raidID);
530 }
531 }
532 }
533
534
535 int
536 raidsize(dev)
537 dev_t dev;
538 {
539 struct raid_softc *rs;
540 struct disklabel *lp;
541 int part, unit, omask, size;
542
543 unit = raidunit(dev);
544 if (unit >= numraid)
545 return (-1);
546 rs = &raid_softc[unit];
547
548 if ((rs->sc_flags & RAIDF_INITED) == 0)
549 return (-1);
550
551 part = DISKPART(dev);
552 omask = rs->sc_dkdev.dk_openmask & (1 << part);
553 lp = rs->sc_dkdev.dk_label;
554
555 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
556 return (-1);
557
558 if (lp->d_partitions[part].p_fstype != FS_SWAP)
559 size = -1;
560 else
561 size = lp->d_partitions[part].p_size *
562 (lp->d_secsize / DEV_BSIZE);
563
564 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
565 return (-1);
566
567 return (size);
568
569 }
570
571 int
572 raiddump(dev, blkno, va, size)
573 dev_t dev;
574 daddr_t blkno;
575 caddr_t va;
576 size_t size;
577 {
578 /* Not implemented. */
579 return ENXIO;
580 }
581
582 /* ARGSUSED */
583 int
584 raidopen(dev, flags, fmt, p)
585 dev_t dev;
586 int flags, fmt;
587 struct proc *p;
588 {
589 int unit = raidunit(dev);
590 struct raid_softc *rs;
591 struct disklabel *lp;
592 int part,pmask;
593 unsigned int raidID;
594 int rc;
595 int error = 0;
596
597 /* This whole next chunk of code is somewhat suspect... Not sure
598 it's needed here at all... XXX */
599
600 if (rf_kbooted == RFK_BOOT_NONE) {
601 printf("Doing restart on raidopen.\n");
602 rf_kbooted = RFK_BOOT_GOOD;
603 rc = rf_boot();
604 if (rc) {
605 rf_kbooted = RFK_BOOT_BAD;
606 printf("Someone is unhappy...\n");
607 return(rc);
608 }
609 }
610
611 if (unit >= numraid)
612 return (ENXIO);
613 rs = &raid_softc[unit];
614
615 if ((error = raidlock(rs)) != 0)
616 return(error);
617 lp = rs->sc_dkdev.dk_label;
618
619 raidID = raidunit(dev);
620
621 part = DISKPART(dev);
622 pmask = (1 << part);
623
624 db1_printf(("Opening raid device number: %d partition: %d\n",
625 raidID,part));
626
627
628 if ((rs->sc_flags & RAIDF_INITED) &&
629 (rs->sc_dkdev.dk_openmask == 0))
630 raidgetdisklabel(dev);
631
632 /* make sure that this partition exists */
633
634 if (part != RAW_PART) {
635 db1_printf(("Not a raw partition..\n"));
636 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
637 ((part >= lp->d_npartitions) ||
638 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
639 error = ENXIO;
640 raidunlock(rs);
641 db1_printf(("Bailing out...\n"));
642 return(error);
643 }
644 }
645
646 /* Prevent this unit from being unconfigured while open. */
647 switch (fmt) {
648 case S_IFCHR:
649 rs->sc_dkdev.dk_copenmask |= pmask;
650 break;
651
652 case S_IFBLK:
653 rs->sc_dkdev.dk_bopenmask |= pmask;
654 break;
655 }
656 rs->sc_dkdev.dk_openmask =
657 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
658
659 raidunlock(rs);
660
661 return(error);
662
663
664 }
665
666 /* ARGSUSED */
667 int
668 raidclose(dev, flags, fmt, p)
669 dev_t dev;
670 int flags, fmt;
671 struct proc *p;
672 {
673 int unit = raidunit(dev);
674 struct raid_softc *rs;
675 int error = 0;
676 int part;
677
678 if (unit >= numraid)
679 return (ENXIO);
680 rs = &raid_softc[unit];
681
682 if ((error = raidlock(rs)) != 0)
683 return (error);
684
685 part = DISKPART(dev);
686
687 /* ...that much closer to allowing unconfiguration... */
688 switch (fmt) {
689 case S_IFCHR:
690 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
691 break;
692
693 case S_IFBLK:
694 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
695 break;
696 }
697 rs->sc_dkdev.dk_openmask =
698 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
699
700 raidunlock(rs);
701 return (0);
702
703 }
704
705 void
706 raidstrategy(bp)
707 register struct buf *bp;
708 {
709 register int s;
710
711 unsigned int raidID = raidunit(bp->b_dev);
712 RF_Raid_t *raidPtr;
713 struct raid_softc *rs = &raid_softc[raidID];
714 struct disklabel *lp;
715 int wlabel;
716
717 db1_printf(("Strategy: 0x%x 0x%x\n",(int)bp,(int)bp->b_data));
718 db1_printf(("Strategy(2): bp->b_bufsize%d\n", (int)bp->b_bufsize));
719 db1_printf(("bp->b_count=%d\n",(int)bp->b_bcount));
720 db1_printf(("bp->b_resid=%d\n",(int)bp->b_resid));
721 db1_printf(("bp->b_blkno=%d\n",(int)bp->b_blkno));
722 #if 0
723 if (bp->b_flags&B_READ)
724 db1_printf(("READ\n"));
725 else
726 db1_printf(("WRITE\n"));
727 #endif
728 if (rf_kbooted != RFK_BOOT_GOOD)
729 return;
730 if (raidID >= numraid || !raidPtrs[raidID]) {
731 bp->b_error = ENODEV;
732 bp->b_flags |= B_ERROR;
733 bp->b_resid = bp->b_bcount;
734 biodone(bp);
735 return;
736 }
737 raidPtr = raidPtrs[raidID];
738 if (!raidPtr->valid) {
739 bp->b_error = ENODEV;
740 bp->b_flags |= B_ERROR;
741 bp->b_resid = bp->b_bcount;
742 biodone(bp);
743 return;
744 }
745 if (bp->b_bcount == 0) {
746 db1_printf(("b_bcount is zero..\n"));
747 biodone(bp);
748 return;
749 }
750 lp = rs->sc_dkdev.dk_label;
751
752 /*
753 * Do bounds checking and adjust transfer. If there's an
754 * error, the bounds check will flag that for us.
755 */
756
757 wlabel = rs->sc_flags & (RAIDF_WLABEL|RAIDF_LABELLING);
758 if (DISKPART(bp->b_dev) != RAW_PART)
759 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
760 db1_printf(("Bounds check failed!!:%d %d\n",
761 (int)bp->b_blkno,(int)wlabel));
762 biodone(bp);
763 return;
764 }
765
766 s = splbio(); /* XXX Needed? */
767 db1_printf(("Beginning strategy...\n"));
768
769 bp->b_resid = 0;
770 bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp,
771 NULL, NULL, NULL);
772 if (bp->b_error) {
773 bp->b_flags |= B_ERROR;
774 db1_printf(("bp->b_flags HAS B_ERROR SET!!!: %d\n",
775 bp->b_error));
776 }
777 splx(s);
778 db1_printf(("Strategy exiting: 0x%x 0x%x %d %d\n",
779 (int)bp,(int)bp->b_data,
780 (int)bp->b_bcount,(int)bp->b_resid));
781 }
782
783 /* ARGSUSED */
784 int
785 raidread(dev, uio, flags)
786 dev_t dev;
787 struct uio *uio;
788 int flags;
789 {
790 int unit = raidunit(dev);
791 struct raid_softc *rs;
792 int result;
793 int part;
794
795 if (unit >= numraid)
796 return (ENXIO);
797 rs = &raid_softc[unit];
798
799 if ((rs->sc_flags & RAIDF_INITED) == 0)
800 return (ENXIO);
801 part = DISKPART(dev);
802
803 db1_printf(("raidread: unit: %d partition: %d\n",unit,part));
804
805 #if 0
806 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
807 #endif
808 result=physio(raidstrategy, NULL, dev, B_READ, minphys, uio);
809 db1_printf(("raidread done. Result is %d %d\n",
810 result,uio->uio_resid));
811 return(result);
812
813 }
814
815 /* ARGSUSED */
816 int
817 raidwrite(dev, uio, flags)
818 dev_t dev;
819 struct uio *uio;
820 int flags;
821 {
822 int unit = raidunit(dev);
823 struct raid_softc *rs;
824
825 if (unit >= numraid)
826 return (ENXIO);
827 rs = &raid_softc[unit];
828
829 if ((rs->sc_flags & RAIDF_INITED) == 0)
830 return (ENXIO);
831 db1_printf(("raidwrite\n"));
832 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
833
834
835 }
836
837 int
838 raidioctl(dev, cmd, data, flag, p)
839 dev_t dev;
840 u_long cmd;
841 caddr_t data;
842 int flag;
843 struct proc *p;
844 {
845 int unit = raidunit(dev);
846 int error = 0;
847 int part, pmask;
848 struct raid_softc *rs;
849 #if 0
850 int r,c;
851 #endif
852 /* struct raid_ioctl *ccio = (struct ccd_ioctl *)data; */
853
854 /* struct ccdbuf *cbp; */
855 /* struct raidbuf *raidbp; */
856 RF_Config_t *k_cfg, *u_cfg;
857 u_char *specific_buf;
858 int retcode = 0;
859
860 int row;
861 struct rf_recon_req *rrcopy, *rr;
862 #if 0
863 int nbytes, spl, rw, row;
864 struct rf_test_acc *ta;
865 struct buf *bp;
866 RF_SparetWait_t *waitreq;
867 struct rf_test_acc *ta_p, *ta_copy;
868 #endif
869
870 if (unit >= numraid)
871 return (ENXIO);
872 rs = &raid_softc[unit];
873
874 db1_printf(("raidioctl: %d %d %d %d\n",(int)dev,
875 (int)DISKPART(dev),(int)unit,(int)cmd));
876
877 /* Must be open for writes for these commands... */
878 switch (cmd) {
879 case DIOCSDINFO:
880 case DIOCWDINFO:
881 case DIOCWLABEL:
882 if ((flag & FWRITE) == 0)
883 return (EBADF);
884 }
885
886 /* Must be initialized for these... */
887 switch (cmd) {
888 case DIOCGDINFO:
889 case DIOCSDINFO:
890 case DIOCWDINFO:
891 case DIOCGPART:
892 case DIOCWLABEL:
893 case DIOCGDEFLABEL:
894 case RAIDFRAME_SHUTDOWN:
895 case RAIDFRAME_REWRITEPARITY:
896 case RAIDFRAME_GET_INFO:
897 case RAIDFRAME_RESET_ACCTOTALS:
898 case RAIDFRAME_GET_ACCTOTALS:
899 case RAIDFRAME_KEEP_ACCTOTALS:
900 case RAIDFRAME_GET_SIZE:
901 case RAIDFRAME_FAIL_DISK:
902 case RAIDFRAME_COPYBACK:
903 case RAIDFRAME_CHECKRECON:
904 if ((rs->sc_flags & RAIDF_INITED) == 0)
905 return (ENXIO);
906 }
907
908 switch (cmd) {
909
910
911 /* configure the system */
912 case RAIDFRAME_CONFIGURE:
913
914 db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n"));
915 /* copy-in the configuration information */
916 /* data points to a pointer to the configuration structure */
917 u_cfg = *((RF_Config_t **) data);
918 RF_Malloc(k_cfg,sizeof(RF_Config_t),(RF_Config_t *));
919 if (k_cfg == NULL) {
920 db3_printf(("rf_ioctl: ENOMEM for config. Code is %d\n", retcode));
921 return(ENOMEM);
922 }
923 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
924 sizeof(RF_Config_t));
925 if (retcode) {
926 db3_printf(("rf_ioctl: retcode=%d copyin.1\n",
927 retcode));
928 return(retcode);
929 }
930
931 /* allocate a buffer for the layout-specific data,
932 and copy it in */
933 if (k_cfg->layoutSpecificSize) {
934 if (k_cfg->layoutSpecificSize > 10000) {
935 /* sanity check */
936 db3_printf(("rf_ioctl: EINVAL %d\n", retcode));
937 return(EINVAL);
938 }
939 RF_Malloc(specific_buf,k_cfg->layoutSpecificSize,
940 (u_char *));
941 if (specific_buf == NULL) {
942 RF_Free(k_cfg,sizeof(RF_Config_t));
943 db3_printf(("rf_ioctl: ENOMEM %d\n", retcode));
944 return(ENOMEM);
945 }
946 retcode = copyin(k_cfg->layoutSpecific,
947 (caddr_t) specific_buf,
948 k_cfg->layoutSpecificSize);
949 if (retcode) {
950 db3_printf(("rf_ioctl: retcode=%d copyin.2\n",
951 retcode));
952 return(retcode);
953 }
954 } else specific_buf = NULL;
955 k_cfg->layoutSpecific = specific_buf;
956
957 /* should do some kind of sanity check on the configuration.
958 Store the sum of all the bytes in the last byte?
959 */
960
961 db1_printf(("Considering configuring the system.:%d 0x%x\n",
962 unit,(int)p));
963
964 /* We need the pointer to this a little deeper, so
965 stash it here... */
966
967 raidPtrs[unit]->proc = p;
968
969 /* configure the system */
970 rf_pending_testaccs = 0;
971
972
973 raidPtrs[unit]->raidid = unit;
974 retcode = rf_Configure(raidPtrs[unit], k_cfg);
975
976
977 if (retcode == 0) {
978 retcode = raidinit(dev, raidPtrs[unit],unit);
979 }
980
981 /* free the buffers. No return code here. */
982 if (k_cfg->layoutSpecificSize) {
983 RF_Free(specific_buf,k_cfg->layoutSpecificSize);
984 }
985 RF_Free(k_cfg,sizeof(RF_Config_t));
986
987 db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n",
988 retcode));
989 return(retcode);
990
991 /* shutdown the system */
992 case RAIDFRAME_SHUTDOWN:
993
994 if ((error = raidlock(rs)) != 0)
995 return(error);
996
997 /*
998 * If somebody has a partition mounted, we shouldn't
999 * shutdown.
1000 */
1001
1002 part = DISKPART(dev);
1003 pmask = (1 << part);
1004 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1005 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1006 (rs->sc_dkdev.dk_copenmask & pmask))) {
1007 raidunlock(rs);
1008 return (EBUSY);
1009 }
1010
1011 /* the intention here was to disallow shutdowns while
1012 raidframe is mounted, but it doesn't work because the
1013 shutdown ioctl calls rf_open
1014 */
1015 if (rf_pending_testaccs > 0) {
1016 printf("RAIDFRAME: Can't shutdown because there are %d pending test accs\n",
1017 rf_pending_testaccs);
1018 return(EINVAL);
1019 }
1020 if (rf_debugKernelAccess) {
1021 printf("call shutdown\n");
1022 }
1023 raidPtrs[unit]->proc = p; /* XXX necessary evil */
1024 retcode = rf_Shutdown(raidPtrs[unit]);
1025
1026 db1_printf("Done main shutdown\n");
1027
1028 pool_destroy(&rs->sc_cbufpool);
1029 db1_printf("Done freeing component buffer freelist\n");
1030
1031 /* It's no longer initialized... */
1032 rs->sc_flags &= ~RAIDF_INITED;
1033
1034 /* Detach the disk. */
1035 disk_detach(&rs->sc_dkdev);
1036
1037 raidunlock(rs);
1038
1039 return(retcode);
1040
1041 /* initialize all parity */
1042 case RAIDFRAME_REWRITEPARITY:
1043
1044 if (raidPtrs[unit]->Layout.map->faultsTolerated == 0)
1045 return(EINVAL);
1046 /* borrow the thread of the requesting process */
1047 raidPtrs[unit]->proc = p; /* Blah... :-p GO */
1048 retcode = rf_RewriteParity(raidPtrs[unit]);
1049 /* return I/O Error if the parity rewrite fails */
1050
1051 if (retcode)
1052 retcode = EIO;
1053 return(retcode);
1054
1055 /* issue a test-unit-ready through raidframe to the
1056 indicated device */
1057 #if 0 /* XXX not supported yet (ever?) */
1058 case RAIDFRAME_TUR:
1059 /* debug only */
1060 retcode = rf_SCSI_DoTUR(0, 0, 0, 0, *(dev_t *) data);
1061 return(retcode);
1062 #endif
1063 case RAIDFRAME_GET_INFO:
1064 {
1065 RF_Raid_t *raid = raidPtrs[unit];
1066 RF_DeviceConfig_t *cfg, **ucfgp;
1067 int i, j, d;
1068
1069 if (!raid->valid)
1070 return(ENODEV);
1071 ucfgp = (RF_DeviceConfig_t **)data;
1072 RF_Malloc(cfg,sizeof(RF_DeviceConfig_t),
1073 (RF_DeviceConfig_t *));
1074 if (cfg == NULL)
1075 return(ENOMEM);
1076 bzero((char *)cfg, sizeof(RF_DeviceConfig_t));
1077 cfg->rows = raid->numRow;
1078 cfg->cols = raid->numCol;
1079 cfg->ndevs = raid->numRow * raid->numCol;
1080 if (cfg->ndevs >= RF_MAX_DISKS) {
1081 cfg->ndevs = 0;
1082 return(ENOMEM);
1083 }
1084 cfg->nspares = raid->numSpare;
1085 if (cfg->nspares >= RF_MAX_DISKS) {
1086 cfg->nspares = 0;
1087 return(ENOMEM);
1088 }
1089 cfg->maxqdepth = raid->maxQueueDepth;
1090 d = 0;
1091 for(i=0;i<cfg->rows;i++) {
1092 for(j=0;j<cfg->cols;j++) {
1093 cfg->devs[d] = raid->Disks[i][j];
1094 d++;
1095 }
1096 }
1097 for(j=cfg->cols,i=0;i<cfg->nspares;i++,j++) {
1098 cfg->spares[i] = raid->Disks[0][j];
1099 }
1100 retcode = copyout((caddr_t)cfg, (caddr_t)*ucfgp,
1101 sizeof(RF_DeviceConfig_t));
1102 RF_Free(cfg,sizeof(RF_DeviceConfig_t));
1103
1104 return(retcode);
1105 }
1106 break;
1107
1108 case RAIDFRAME_RESET_ACCTOTALS:
1109 {
1110 RF_Raid_t *raid = raidPtrs[unit];
1111
1112 bzero(&raid->acc_totals, sizeof(raid->acc_totals));
1113 return(0);
1114 }
1115 break;
1116
1117 case RAIDFRAME_GET_ACCTOTALS:
1118 {
1119 RF_AccTotals_t *totals = (RF_AccTotals_t *)data;
1120 RF_Raid_t *raid = raidPtrs[unit];
1121
1122 *totals = raid->acc_totals;
1123 return(0);
1124 }
1125 break;
1126
1127 case RAIDFRAME_KEEP_ACCTOTALS:
1128 {
1129 RF_Raid_t *raid = raidPtrs[unit];
1130 int *keep = (int *)data;
1131
1132 raid->keep_acc_totals = *keep;
1133 return(0);
1134 }
1135 break;
1136
1137 case RAIDFRAME_GET_SIZE:
1138 *(int *) data = raidPtrs[unit]->totalSectors;
1139 return(0);
1140
1141 #define RAIDFRAME_RECON 1
1142 /* XXX The above should probably be set somewhere else!! GO */
1143 #if RAIDFRAME_RECON > 0
1144
1145 /* fail a disk & optionally start reconstruction */
1146 case RAIDFRAME_FAIL_DISK:
1147 rr = (struct rf_recon_req *) data;
1148
1149 if (rr->row < 0 || rr->row >= raidPtrs[unit]->numRow
1150 || rr->col < 0 || rr->col >= raidPtrs[unit]->numCol)
1151 return(EINVAL);
1152
1153 printf("Failing the disk: row: %d col: %d\n",rr->row,rr->col);
1154
1155 /* make a copy of the recon request so that we don't
1156 rely on the user's buffer */
1157 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1158 bcopy(rr, rrcopy, sizeof(*rr));
1159 rrcopy->raidPtr = (void *) raidPtrs[unit];
1160
1161 LOCK_RECON_Q_MUTEX();
1162 rrcopy->next = recon_queue;
1163 recon_queue = rrcopy;
1164 wakeup(&recon_queue);
1165 UNLOCK_RECON_Q_MUTEX();
1166
1167 return(0);
1168
1169 /* invoke a copyback operation after recon on whatever
1170 disk needs it, if any */
1171 case RAIDFRAME_COPYBACK:
1172 /* borrow the current thread to get this done */
1173 raidPtrs[unit]->proc = p; /* ICK.. but needed :-p GO */
1174 rf_CopybackReconstructedData(raidPtrs[unit]);
1175 return(0);
1176
1177 /* return the percentage completion of reconstruction */
1178 case RAIDFRAME_CHECKRECON:
1179 row = *(int *) data;
1180 if (row < 0 || row >= raidPtrs[unit]->numRow)
1181 return(EINVAL);
1182 if (raidPtrs[unit]->status[row] != rf_rs_reconstructing)
1183 *(int *) data = 100;
1184 else
1185 *(int *) data = raidPtrs[unit]->reconControl[row]->percentComplete;
1186 return(0);
1187
1188 /* the sparetable daemon calls this to wait for the
1189 kernel to need a spare table.
1190 * this ioctl does not return until a spare table is needed.
1191 * XXX -- calling mpsleep here in the ioctl code is almost
1192 certainly wrong and evil. -- XXX
1193 * XXX -- I should either compute the spare table in the
1194 kernel, or have a different -- XXX
1195 * XXX -- interface (a different character device) for
1196 delivering the table -- XXX
1197 */
1198 #if 0
1199 case RAIDFRAME_SPARET_WAIT:
1200 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1201 while (!rf_sparet_wait_queue) mpsleep(&rf_sparet_wait_queue, (PZERO+1)|PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1202 waitreq = rf_sparet_wait_queue;
1203 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1204 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1205
1206 *((RF_SparetWait_t *) data) = *waitreq; /* structure assignment */
1207
1208 RF_Free(waitreq, sizeof(*waitreq));
1209 return(0);
1210
1211
1212 /* wakes up a process waiting on SPARET_WAIT and puts an
1213 error code in it that will cause the dameon to exit */
1214 case RAIDFRAME_ABORT_SPARET_WAIT:
1215 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1216 waitreq->fcol = -1;
1217 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1218 waitreq->next = rf_sparet_wait_queue;
1219 rf_sparet_wait_queue = waitreq;
1220 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1221 wakeup(&rf_sparet_wait_queue);
1222 return(0);
1223
1224 /* used by the spare table daemon to deliver a spare table
1225 into the kernel */
1226 case RAIDFRAME_SEND_SPARET:
1227
1228 /* install the spare table */
1229 retcode = rf_SetSpareTable(raidPtrs[unit],*(void **) data);
1230
1231 /* respond to the requestor. the return status of the
1232 spare table installation is passed in the "fcol" field */
1233 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1234 waitreq->fcol = retcode;
1235 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1236 waitreq->next = rf_sparet_resp_queue;
1237 rf_sparet_resp_queue = waitreq;
1238 wakeup(&rf_sparet_resp_queue);
1239 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1240
1241 return(retcode);
1242 #endif
1243
1244
1245 #endif /* RAIDFRAME_RECON > 0 */
1246
1247 default: break; /* fall through to the os-specific code below */
1248
1249 }
1250
1251 if (!raidPtrs[unit]->valid)
1252 return(EINVAL);
1253
1254 /*
1255 * Add support for "regular" device ioctls here.
1256 */
1257
1258 switch (cmd) {
1259 case DIOCGDINFO:
1260 db1_printf(("DIOCGDINFO %d %d\n",(int)dev,(int)DISKPART(dev)));
1261 *(struct disklabel *)data = *(rs->sc_dkdev.dk_label);
1262 break;
1263
1264 case DIOCGPART:
1265 db1_printf(("DIOCGPART: %d %d\n",(int)dev,(int)DISKPART(dev)));
1266 ((struct partinfo *)data)->disklab = rs->sc_dkdev.dk_label;
1267 ((struct partinfo *)data)->part =
1268 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1269 break;
1270
1271 case DIOCWDINFO:
1272 db1_printf(("DIOCWDINFO\n"));
1273 case DIOCSDINFO:
1274 db1_printf(("DIOCSDINFO\n"));
1275 if ((error = raidlock(rs)) != 0)
1276 return (error);
1277
1278 rs->sc_flags |= RAIDF_LABELLING;
1279
1280 error = setdisklabel(rs->sc_dkdev.dk_label,
1281 (struct disklabel *)data, 0, rs->sc_dkdev.dk_cpulabel);
1282 if (error == 0) {
1283 if (cmd == DIOCWDINFO)
1284 error = writedisklabel(RAIDLABELDEV(dev),
1285 raidstrategy, rs->sc_dkdev.dk_label,
1286 rs->sc_dkdev.dk_cpulabel);
1287 }
1288
1289 rs->sc_flags &= ~RAIDF_LABELLING;
1290
1291 raidunlock(rs);
1292
1293 if (error)
1294 return (error);
1295 break;
1296
1297 case DIOCWLABEL:
1298 db1_printf(("DIOCWLABEL\n"));
1299 if (*(int *)data != 0)
1300 rs->sc_flags |= RAIDF_WLABEL;
1301 else
1302 rs->sc_flags &= ~RAIDF_WLABEL;
1303 break;
1304
1305 case DIOCGDEFLABEL:
1306 db1_printf(("DIOCGDEFLABEL\n"));
1307 raidgetdefaultlabel(raidPtrs[unit], rs,
1308 (struct disklabel *)data);
1309 break;
1310
1311 default:
1312 retcode = ENOTTY; /* XXXX ?? OR EINVAL ? */
1313 }
1314 return(retcode);
1315
1316 }
1317
1318
1319 /* raidinit -- complete the rest of the initialization for the
1320 RAIDframe device. */
1321
1322
1323 static int
1324 raidinit(dev, raidPtr,unit)
1325 dev_t dev;
1326 RF_Raid_t *raidPtr;
1327 int unit;
1328 {
1329 int retcode;
1330 /* int ix; */
1331 /* struct raidbuf *raidbp; */
1332 struct raid_softc *rs;
1333
1334 retcode = 0;
1335
1336 rs = &raid_softc[unit];
1337 pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1338 0, 0, "raidpl", 0, NULL, NULL, M_DEVBUF);
1339
1340
1341 /* XXX should check return code first... */
1342 rs->sc_flags |= RAIDF_INITED;
1343
1344 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds.*/
1345
1346 rs->sc_dkdev.dk_name = rs->sc_xname;
1347 /* disk_attach actually creates space for the CPU disklabel, among
1348 other things, so it's critical to call this *BEFORE* we
1349 try putzing with disklabels. */
1350 disk_attach(&rs->sc_dkdev);
1351
1352 /* XXX There may be a weird interaction here between this, and
1353 protectedSectors, as used in RAIDframe. */
1354 rs->sc_size = raidPtr->totalSectors;
1355 rs->sc_dev = dev;
1356 return(retcode);
1357 }
1358
1359
1360 /*********************************************************
1361 *
1362 * initialization code called at boot time (startup.c)
1363 *
1364 ********************************************************/
1365 int rf_boot()
1366 {
1367 int i, rc;
1368
1369 rc = rf_mutex_init(&rf_sparet_wait_mutex);
1370 if (rc) {
1371 RF_PANIC();
1372 }
1373 rc = rf_mutex_init(&rf_async_done_q_mutex);
1374 if (rc) {
1375 RF_PANIC();
1376 }
1377 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
1378 recon_queue = NULL;
1379 rf_async_done_qh = rf_async_done_qt = NULL;
1380 for (i=0; i<numraid; i++)
1381 raidPtrs[i] = NULL;
1382 rc = rf_BootRaidframe();
1383 if (rc == 0)
1384 printf("Kernelized RAIDframe activated\n");
1385 else
1386 rf_kbooted = RFK_BOOT_BAD;
1387 return(rc);
1388 }
1389
1390 /*
1391 * This kernel thread never exits. It is created once, and persists
1392 * until the system reboots.
1393 */
1394 void rf_ReconKernelThread()
1395 {
1396 struct rf_recon_req *req;
1397 int s;
1398
1399 /* XXX not sure what spl() level we should be at here... probably splbio() */
1400 s=splbio();
1401
1402 while (1) {
1403 /* grab the next reconstruction request from the queue */
1404 LOCK_RECON_Q_MUTEX();
1405 while (!recon_queue) {
1406 UNLOCK_RECON_Q_MUTEX();
1407 tsleep(&recon_queue, PRIBIO | PCATCH, "raidframe recon", 0);
1408 LOCK_RECON_Q_MUTEX();
1409 }
1410 req = recon_queue;
1411 recon_queue = recon_queue->next;
1412 UNLOCK_RECON_Q_MUTEX();
1413
1414 /*
1415 * If flags specifies that we should start recon, this call
1416 * will not return until reconstruction completes, fails, or is aborted.
1417 */
1418 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
1419 ((req->flags&RF_FDFLAGS_RECON) ? 1 : 0));
1420
1421 RF_Free(req, sizeof(*req));
1422 }
1423 }
1424 /* wake up the daemon & tell it to get us a spare table
1425 * XXX
1426 * the entries in the queues should be tagged with the raidPtr
1427 * so that in the extremely rare case that two recons happen at once, we know for
1428 * which device were requesting a spare table
1429 * XXX
1430 */
1431 int rf_GetSpareTableFromDaemon(req)
1432 RF_SparetWait_t *req;
1433 {
1434 int retcode;
1435
1436 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1437 req->next = rf_sparet_wait_queue;
1438 rf_sparet_wait_queue = req;
1439 wakeup(&rf_sparet_wait_queue);
1440
1441 /* mpsleep unlocks the mutex */
1442 while (!rf_sparet_resp_queue) {
1443 tsleep(&rf_sparet_resp_queue, PRIBIO | PCATCH,
1444 "raidframe getsparetable", 0);
1445 #if 0
1446 mpsleep(&rf_sparet_resp_queue, PZERO, "sparet resp", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1447 #endif
1448 }
1449 req = rf_sparet_resp_queue;
1450 rf_sparet_resp_queue = req->next;
1451 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1452
1453 retcode = req->fcol;
1454 RF_Free(req, sizeof(*req)); /* this is not the same req as we alloc'd */
1455 return(retcode);
1456 }
1457
1458 /* a wrapper around rf_DoAccess that extracts appropriate info from the bp & passes it down.
1459 * any calls originating in the kernel must use non-blocking I/O
1460 * do some extra sanity checking to return "appropriate" error values for
1461 * certain conditions (to make some standard utilities work)
1462 */
1463 int rf_DoAccessKernel(raidPtr, bp, flags, cbFunc, cbArg)
1464 RF_Raid_t *raidPtr;
1465 struct buf *bp;
1466 RF_RaidAccessFlags_t flags;
1467 void (*cbFunc)(struct buf *);
1468 void *cbArg;
1469 {
1470 RF_SectorCount_t num_blocks, pb, sum;
1471 RF_RaidAddr_t raid_addr;
1472 int retcode;
1473 struct partition *pp;
1474 daddr_t blocknum;
1475 int unit;
1476 struct raid_softc *rs;
1477
1478 /* XXX The dev_t used here should be for /dev/[r]raid* !!! */
1479
1480 unit = raidPtr->raidid;
1481 rs = &raid_softc[unit];
1482
1483 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1484 partition.. Need to make it absolute to the underlying
1485 device.. */
1486
1487 blocknum = bp->b_blkno;
1488 if (DISKPART(bp->b_dev) != RAW_PART) {
1489 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1490 blocknum += pp->p_offset;
1491 db1_printf(("updated: %d %d\n",DISKPART(bp->b_dev),
1492 pp->p_offset));
1493 } else {
1494 db1_printf(("Is raw..\n"));
1495 }
1496 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno, (int) blocknum));
1497
1498 db1_printf(("bp->b_bcount = %d\n",(int)bp->b_bcount));
1499 db1_printf(("bp->b_resid = %d\n",(int)bp->b_resid));
1500
1501 /* *THIS* is where we adjust what block we're going to... but
1502 DO NOT TOUCH bp->b_blkno!!! */
1503 raid_addr = blocknum;
1504
1505 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1506 pb = (bp->b_bcount&raidPtr->sectorMask) ? 1 : 0;
1507 sum = raid_addr + num_blocks + pb;
1508 if (1 || rf_debugKernelAccess) {
1509 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1510 (int)raid_addr, (int)sum,(int)num_blocks,
1511 (int)pb,(int)bp->b_resid));
1512 }
1513
1514
1515 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1516 || (sum < num_blocks) || (sum < pb))
1517 {
1518 bp->b_error = ENOSPC;
1519 bp->b_flags |= B_ERROR;
1520 bp->b_resid = bp->b_bcount;
1521 biodone(bp);
1522 return(bp->b_error);
1523 }
1524
1525 /*
1526 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1527 */
1528
1529 if (bp->b_bcount & raidPtr->sectorMask) {
1530 bp->b_error = EINVAL;
1531 bp->b_flags |= B_ERROR;
1532 bp->b_resid = bp->b_bcount;
1533 biodone(bp);
1534 return(bp->b_error);
1535 }
1536 db1_printf(("Calling DoAccess..\n"));
1537
1538 /* don't ever condition on bp->b_flags & B_WRITE.
1539 always condition on B_READ instead */
1540 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1541 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1542 0, raid_addr, num_blocks, bp->b_un.b_addr,
1543 bp, NULL, NULL, RF_DAG_NONBLOCKING_IO|flags,
1544 NULL, cbFunc, cbArg);
1545 db1_printf(("After call to DoAccess: 0x%x 0x%x %d\n",(int)bp,
1546 (int)bp->b_data,(int)bp->b_resid));
1547 return(retcode);
1548 }
1549
1550 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1551
1552 int rf_DispatchKernelIO(queue, req)
1553 RF_DiskQueue_t *queue;
1554 RF_DiskQueueData_t *req;
1555 {
1556 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1557 struct buf *bp;
1558 struct raidbuf *raidbp=NULL;
1559 struct raid_softc *rs;
1560 int unit;
1561
1562 /* XXX along with the vnode, we also need the softc associated with
1563 this device.. */
1564
1565 req->queue = queue;
1566
1567 unit = queue->raidPtr->raidid;
1568
1569 db1_printf(("DispatchKernelIO unit: %d\n",unit));
1570
1571 if (unit >= numraid) {
1572 printf("Invalid unit number: %d %d\n",unit,numraid);
1573 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1574 }
1575
1576 rs = &raid_softc[unit];
1577
1578 /* XXX is this the right place? */
1579 disk_busy(&rs->sc_dkdev);
1580
1581 bp = req->bp;
1582
1583 raidbp = RAIDGETBUF(rs);
1584
1585 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1586
1587 /*
1588 * context for raidiodone
1589 */
1590 raidbp->rf_obp = bp;
1591 raidbp->req = req;
1592
1593 switch (req->type) {
1594 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1595 /*
1596 Dprintf2("rf_DispatchKernelIO: NOP to r %d c %d\n",
1597 queue->row, queue->col);
1598 */
1599 /* XXX need to do something extra here.. */
1600 /* I'm leaving this in, as I've never actually seen it
1601 used, and I'd like folks to report it... GO */
1602 printf(("WAKEUP CALLED\n"));
1603 queue->numOutstanding++;
1604
1605 /* XXX need to glue the original buffer into this?? */
1606
1607 KernelWakeupFunc(&raidbp->rf_buf);
1608 break;
1609
1610 case RF_IO_TYPE_READ:
1611 case RF_IO_TYPE_WRITE:
1612
1613 if (req->tracerec) {
1614 RF_ETIMER_START(req->tracerec->timer);
1615 }
1616
1617
1618 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1619 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1620 req->sectorOffset, req->numSector,
1621 req->buf, KernelWakeupFunc, (void *) req,
1622 queue->raidPtr->logBytesPerSector, req->b_proc);
1623
1624 if (rf_debugKernelAccess) {
1625 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1626 (long) bp->b_blkno));
1627 }
1628 queue->numOutstanding++;
1629 queue->last_deq_sector = req->sectorOffset;
1630 /* acc wouldn't have been let in if there were any
1631 pending reqs at any other priority */
1632 queue->curPriority = req->priority;
1633 /*
1634 Dprintf3("rf_DispatchKernelIO: %c to row %d col %d\n",
1635 req->type, queue->row, queue->col);
1636 */
1637
1638 db1_printf(("Going for %c to unit %d row %d col %d\n",
1639 req->type, unit, queue->row, queue->col));
1640 db1_printf(("sector %d count %d (%d bytes) %d\n",
1641 (int) req->sectorOffset, (int) req->numSector,
1642 (int) (req->numSector <<
1643 queue->raidPtr->logBytesPerSector),
1644 (int) queue->raidPtr->logBytesPerSector));
1645 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1646 raidbp->rf_buf.b_vp->v_numoutput++;
1647 }
1648
1649 VOP_STRATEGY(&raidbp->rf_buf);
1650
1651 break;
1652
1653 default:
1654 panic("bad req->type in rf_DispatchKernelIO");
1655 }
1656 db1_printf(("Exiting from DispatchKernelIO\n"));
1657 return(0);
1658 }
1659
1660 /* this is the callback function associated with a I/O invoked from
1661 kernel code.
1662 */
1663 static void KernelWakeupFunc(vbp)
1664 struct buf *vbp;
1665 {
1666 RF_DiskQueueData_t *req = NULL;
1667 RF_DiskQueue_t *queue;
1668 struct raidbuf *raidbp = (struct raidbuf *)vbp;
1669 struct buf *bp;
1670 struct raid_softc *rs;
1671 int unit;
1672 register int s;
1673
1674 s=splbio(); /* XXX */
1675 db1_printf(("recovering the request queue:\n"));
1676 req = raidbp->req;
1677
1678 bp = raidbp->rf_obp;
1679 db1_printf(("bp=0x%x\n",(int)bp));
1680
1681 queue = (RF_DiskQueue_t *) req->queue;
1682
1683 if (raidbp->rf_buf.b_flags & B_ERROR) {
1684 #if 0
1685 printf("Setting bp->b_flags!!! %d\n",raidbp->rf_buf.b_error);
1686 #endif
1687 bp->b_flags |= B_ERROR;
1688 bp->b_error = raidbp->rf_buf.b_error ?
1689 raidbp->rf_buf.b_error : EIO;
1690 }
1691
1692 db1_printf(("raidbp->rf_buf.b_bcount=%d\n",(int)raidbp->rf_buf.b_bcount));
1693 db1_printf(("raidbp->rf_buf.b_bufsize=%d\n",(int)raidbp->rf_buf.b_bufsize));
1694 db1_printf(("raidbp->rf_buf.b_resid=%d\n",(int)raidbp->rf_buf.b_resid));
1695 db1_printf(("raidbp->rf_buf.b_data=0x%x\n",(int)raidbp->rf_buf.b_data));
1696
1697 /* XXX methinks this could be wrong... */
1698 #if 1
1699 bp->b_resid = raidbp->rf_buf.b_resid;
1700 #endif
1701
1702 if (req->tracerec) {
1703 RF_ETIMER_STOP(req->tracerec->timer);
1704 RF_ETIMER_EVAL(req->tracerec->timer);
1705 RF_LOCK_MUTEX(rf_tracing_mutex);
1706 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1707 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1708 req->tracerec->num_phys_ios++;
1709 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1710 }
1711
1712 bp->b_bcount = raidbp->rf_buf.b_bcount;/* XXXX ?? */
1713
1714 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1715
1716 rs = &raid_softc[unit];
1717 RAIDPUTBUF(rs,raidbp);
1718
1719 if (bp->b_resid==0) {
1720 db1_printf(("Disk is no longer busy for this buffer... %d %ld %ld\n",
1721 unit, bp->b_resid, bp->b_bcount));
1722 /* XXX is this the right place for a disk_unbusy()??!??!?!? */
1723 disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
1724 } else {
1725 db1_printf(("b_resid is still %ld\n",bp->b_resid));
1726 }
1727
1728 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1729 (req->CompleteFunc)(req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1730 /* printf("Exiting KernelWakeupFunc\n"); */
1731
1732 splx(s); /* XXX */
1733 }
1734
1735
1736
1737 /*
1738 * initialize a buf structure for doing an I/O in the kernel.
1739 */
1740 static void InitBP(
1741 struct buf *bp,
1742 struct vnode *b_vp,
1743 unsigned rw_flag,
1744 dev_t dev,
1745 RF_SectorNum_t startSect,
1746 RF_SectorCount_t numSect,
1747 caddr_t buf,
1748 void (*cbFunc)(struct buf *),
1749 void *cbArg,
1750 int logBytesPerSector,
1751 struct proc *b_proc)
1752 {
1753 /* bp->b_flags = B_PHYS | rw_flag; */
1754 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1755 bp->b_bcount = numSect << logBytesPerSector;
1756 bp->b_bufsize = bp->b_bcount;
1757 bp->b_error = 0;
1758 bp->b_dev = dev;
1759 db1_printf(("bp->b_dev is %d\n", dev));
1760 bp->b_un.b_addr = buf;
1761 db1_printf(("bp->b_data=0x%x\n",(int)bp->b_data));
1762
1763 bp->b_blkno = startSect;
1764 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1765 db1_printf(("b_bcount is: %d\n",(int)bp->b_bcount));
1766 if (bp->b_bcount == 0) {
1767 panic("bp->b_bcount is zero in InitBP!!\n");
1768 }
1769 bp->b_proc = b_proc;
1770 bp->b_iodone = cbFunc;
1771 bp->b_vp = b_vp;
1772
1773 }
1774 #endif /* KERNEL */
1775
1776 /* Extras... */
1777
1778 unsigned int rpcc()
1779 {
1780 /* XXX no clue what this is supposed to do.. my guess is
1781 that it's supposed to read the CPU cycle counter... */
1782 /* db1_printf("this is supposed to do something useful too!??\n"); */
1783 return(0);
1784 }
1785
1786 #if 0
1787 int rf_GetSpareTableFromDaemon(req)
1788 RF_SparetWait_t *req;
1789 {
1790 int retcode=1;
1791 printf("This is supposed to do something useful!!\n"); /* XXX */
1792
1793 return(retcode);
1794
1795 }
1796 #endif
1797
1798 static void
1799 raidgetdefaultlabel(raidPtr, rs, lp)
1800 RF_Raid_t *raidPtr;
1801 struct raid_softc *rs;
1802 struct disklabel *lp;
1803 {
1804 db1_printf(("Building a default label...\n"));
1805 bzero(lp, sizeof(*lp));
1806
1807 /* fabricate a label... */
1808 lp->d_secperunit = raidPtr->totalSectors;
1809 lp->d_secsize = raidPtr->bytesPerSector;
1810 lp->d_nsectors = 1024 * (1024 / raidPtr->bytesPerSector);
1811 lp->d_ntracks = 1;
1812 lp->d_ncylinders = raidPtr->totalSectors / lp->d_nsectors;
1813 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1814
1815 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1816 lp->d_type = DTYPE_RAID;
1817 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1818 lp->d_rpm = 3600;
1819 lp->d_interleave = 1;
1820 lp->d_flags = 0;
1821
1822 lp->d_partitions[RAW_PART].p_offset = 0;
1823 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1824 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1825 lp->d_npartitions = RAW_PART + 1;
1826
1827 lp->d_magic = DISKMAGIC;
1828 lp->d_magic2 = DISKMAGIC;
1829 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1830
1831 }
1832
1833 /*
1834 * Read the disklabel from the raid device. If one is not present, fake one
1835 * up.
1836 */
1837 static void
1838 raidgetdisklabel(dev)
1839 dev_t dev;
1840 {
1841 int unit = raidunit(dev);
1842 struct raid_softc *rs = &raid_softc[unit];
1843 char *errstring;
1844 struct disklabel *lp = rs->sc_dkdev.dk_label;
1845 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1846 RF_Raid_t *raidPtr;
1847
1848 db1_printf(("Getting the disklabel...\n"));
1849
1850 bzero(clp, sizeof(*clp));
1851
1852 raidPtr = raidPtrs[unit];
1853
1854 raidgetdefaultlabel(raidPtr, rs, lp);
1855
1856 /*
1857 * Call the generic disklabel extraction routine.
1858 */
1859 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
1860 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
1861 if (errstring)
1862 raidmakedisklabel(rs);
1863 else {
1864 int i;
1865 struct partition *pp;
1866
1867 /*
1868 * Sanity check whether the found disklabel is valid.
1869 *
1870 * This is necessary since total size of the raid device
1871 * may vary when an interleave is changed even though exactly
1872 * same componets are used, and old disklabel may used
1873 * if that is found.
1874 */
1875 if (lp->d_secperunit != rs->sc_size)
1876 printf("WARNING: %s: "
1877 "total sector size in disklabel (%d) != "
1878 "the size of raid (%d)\n", rs->sc_xname,
1879 lp->d_secperunit, rs->sc_size);
1880 for (i = 0; i < lp->d_npartitions; i++) {
1881 pp = &lp->d_partitions[i];
1882 if (pp->p_offset + pp->p_size > rs->sc_size)
1883 printf("WARNING: %s: end of partition `%c' "
1884 "exceeds the size of raid (%d)\n",
1885 rs->sc_xname, 'a' + i, rs->sc_size);
1886 }
1887 }
1888
1889 }
1890
1891 /*
1892 * Take care of things one might want to take care of in the event
1893 * that a disklabel isn't present.
1894 */
1895 static void
1896 raidmakedisklabel(rs)
1897 struct raid_softc *rs;
1898 {
1899 struct disklabel *lp = rs->sc_dkdev.dk_label;
1900 db1_printf(("Making a label..\n"));
1901
1902 /*
1903 * For historical reasons, if there's no disklabel present
1904 * the raw partition must be marked FS_BSDFFS.
1905 */
1906
1907 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1908
1909 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
1910
1911 lp->d_checksum = dkcksum(lp);
1912 }
1913
1914 /*
1915 * Lookup the provided name in the filesystem. If the file exists,
1916 * is a valid block device, and isn't being used by anyone else,
1917 * set *vpp to the file's vnode.
1918 * You'll find the original of this in ccd.c
1919 */
1920 int
1921 raidlookup(path, p, vpp)
1922 char *path;
1923 struct proc *p;
1924 struct vnode **vpp; /* result */
1925 {
1926 struct nameidata nd;
1927 struct vnode *vp;
1928 struct vattr va;
1929 int error;
1930
1931 db1_printf(("Doing raidlookup...: %s 0x%x\n",path,(int)p));
1932
1933 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
1934 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
1935 #ifdef DEBUG
1936 printf("RAIDframe: vn_open returned %d\n",error);
1937 #endif
1938 return (error);
1939 }
1940 vp = nd.ni_vp;
1941 if (vp->v_usecount > 1) {
1942 VOP_UNLOCK(vp, 0);
1943 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1944 return (EBUSY);
1945 }
1946 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
1947 VOP_UNLOCK(vp, 0);
1948 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1949 return (error);
1950 }
1951 /* XXX: eventually we should handle VREG, too. */
1952 if (va.va_type != VBLK) {
1953 VOP_UNLOCK(vp, 0);
1954 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1955 return (ENOTBLK);
1956 }
1957 VOP_UNLOCK(vp, 0);
1958 *vpp = vp;
1959 return (0);
1960 }
1961
1962 /*
1963 * Wait interruptibly for an exclusive lock.
1964 *
1965 * XXX
1966 * Several drivers do this; it should be abstracted and made MP-safe.
1967 * (Hmm... where have we seen this warning before :-> GO )
1968 */
1969 static int
1970 raidlock(rs)
1971 struct raid_softc *rs;
1972 {
1973 int error;
1974
1975 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
1976 rs->sc_flags |= RAIDF_WANTED;
1977 if ((error =
1978 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
1979 return (error);
1980 }
1981 rs->sc_flags |= RAIDF_LOCKED;
1982 return (0);
1983 }
1984
1985 /*
1986 * Unlock and wake up any waiters.
1987 */
1988 static void
1989 raidunlock(rs)
1990 struct raid_softc *rs;
1991 {
1992
1993 rs->sc_flags &= ~RAIDF_LOCKED;
1994 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
1995 rs->sc_flags &= ~RAIDF_WANTED;
1996 wakeup(rs);
1997 }
1998 }
1999