rf_netbsdkintf.c revision 1.1 1 /* $NetBSD: rf_netbsdkintf.c,v 1.1 1998/11/13 04:20:31 oster Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115 /*
116 * :
117 * Log: rf_kintf.c,v
118 * Revision 1.57 1996/07/19 16:12:20 jimz
119 * remove addition of protectedSectors in InitBP- it's already
120 * done in the diskqueue code
121 *
122 * Revision 1.56 1996/07/17 21:00:58 jimz
123 * clean up timer interface, tracing
124 *
125 * Revision 1.55 1996/06/17 03:00:54 jimz
126 * Change RAIDFRAME_GET_INFO interface to do its own copyout()
127 * (because size of device config structure now exceeds 8k)
128 *
129 * Revision 1.54 1996/06/09 02:36:46 jimz
130 * lots of little crufty cleanup- fixup whitespace
131 * issues, comment #ifdefs, improve typing in some
132 * places (esp size-related)
133 *
134 * Revision 1.53 1996/06/07 21:33:04 jimz
135 * begin using consistent types for sector numbers,
136 * stripe numbers, row+col numbers, recon unit numbers
137 *
138 * Revision 1.52 1996/06/06 17:28:08 jimz
139 * track sector number of last I/O dequeued
140 *
141 * Revision 1.51 1996/06/05 18:06:02 jimz
142 * Major code cleanup. The Great Renaming is now done.
143 * Better modularity. Better typing. Fixed a bunch of
144 * synchronization bugs. Made a lot of global stuff
145 * per-desc or per-array. Removed dead code.
146 *
147 * Revision 1.50 1996/06/03 23:28:26 jimz
148 * more bugfixes
149 * check in tree to sync for IPDS runs with current bugfixes
150 * there still may be a problem with threads in the script test
151 * getting I/Os stuck- not trivially reproducible (runs ~50 times
152 * in a row without getting stuck)
153 *
154 * Revision 1.49 1996/06/02 17:31:48 jimz
155 * Moved a lot of global stuff into array structure, where it belongs.
156 * Fixed up paritylogging, pss modules in this manner. Some general
157 * code cleanup. Removed lots of dead code, some dead files.
158 *
159 * Revision 1.48 1996/05/31 22:26:54 jimz
160 * fix a lot of mapping problems, memory allocation problems
161 * found some weird lock issues, fixed 'em
162 * more code cleanup
163 *
164 * Revision 1.47 1996/05/30 12:59:18 jimz
165 * make etimer happier, more portable
166 *
167 * Revision 1.46 1996/05/30 11:29:41 jimz
168 * Numerous bug fixes. Stripe lock release code disagreed with the taking code
169 * about when stripes should be locked (I made it consistent: no parity, no lock)
170 * There was a lot of extra serialization of I/Os which I've removed- a lot of
171 * it was to calculate values for the cache code, which is no longer with us.
172 * More types, function, macro cleanup. Added code to properly quiesce the array
173 * on shutdown. Made a lot of stuff array-specific which was (bogusly) general
174 * before. Fixed memory allocation, freeing bugs.
175 *
176 * Revision 1.45 1996/05/27 18:56:37 jimz
177 * more code cleanup
178 * better typing
179 * compiles in all 3 environments
180 *
181 * Revision 1.44 1996/05/24 22:17:04 jimz
182 * continue code + namespace cleanup
183 * typed a bunch of flags
184 *
185 * Revision 1.43 1996/05/24 01:59:45 jimz
186 * another checkpoint in code cleanup for release
187 * time to sync kernel tree
188 *
189 * Revision 1.42 1996/05/23 22:17:54 jimz
190 * fix sector size hardcoding problems
191 *
192 * Revision 1.41 1996/05/23 21:46:35 jimz
193 * checkpoint in code cleanup (release prep)
194 * lots of types, function names have been fixed
195 *
196 * Revision 1.40 1996/05/23 13:18:07 jimz
197 * tracing_mutex -> rf_tracing_mutex
198 *
199 * Revision 1.39 1996/05/23 00:33:23 jimz
200 * code cleanup: move all debug decls to rf_options.c, all extern
201 * debug decls to rf_options.h, all debug vars preceded by rf_
202 *
203 * Revision 1.38 1996/05/20 16:15:32 jimz
204 * switch to rf_{mutex,cond}_{init,destroy}
205 *
206 * Revision 1.37 1996/05/10 16:23:47 jimz
207 * RF_offset -> RF_Offset
208 *
209 * Revision 1.36 1996/05/08 21:01:24 jimz
210 * fixed up enum type names that were conflicting with other
211 * enums and function names (ie, "panic")
212 * future naming trends will be towards RF_ and rf_ for
213 * everything raidframe-related
214 *
215 * Revision 1.35 1996/05/03 19:10:48 jimz
216 * change sanity checking for bogus I/Os to return more appropriate
217 * values (to make some user-level utilities happer with RAIDframe)
218 *
219 * Revision 1.34 1996/05/02 22:17:00 jimz
220 * When using DKUSAGE, send a bogus IO after configuring to let DKUSAGE know
221 * that we exist. This will let user-level programs doing group stats on the
222 * RF device function without error before RF gets its first IO
223 *
224 * Changed rf_device_config devs and spares fields to RF_RaidDisk_t
225 *
226 * Inc numOutstanding for the disk queue in rf_DispatchKernelIO if
227 * type is IO_TYPE_NOP. I'm not sure this is right, but it seems to be,
228 * because the disk IO completion routine wants to dec it, and doesn't
229 * care if there was no such IO.
230 *
231 * Revision 1.33 1996/05/02 15:05:44 jimz
232 * for now, rf_DoAccessKernel will reject non-sector-sized I/Os
233 * eventually, it should do something more clever...
234 * (and do it in DoAccess(), not just DoAccessKernel())
235 *
236 * Revision 1.32 1996/05/01 16:28:39 jimz
237 * get rid of uses of ccmn_ functions
238 *
239 * Revision 1.31 1996/05/01 15:42:17 jimz
240 * ccmn_* memory management is on the way out. This is an archival checkpoint-
241 * both the old and new code are in place (all the ccmn_ calls are #if 0). After
242 * this, the ccmn_ code will no longer appear.
243 *
244 * Revision 1.30 1996/04/22 15:53:13 jimz
245 * MAX_RAIDS -> NRAIDFRAME
246 *
247 * Revision 1.29 1995/12/12 18:10:06 jimz
248 * MIN -> RF_MIN, MAX -> RF_MAX, ASSERT -> RF_ASSERT
249 * fix 80-column brain damage in comments
250 *
251 * Revision 1.28 1995/12/01 19:11:01 root
252 * added copyright info
253 *
254 * Revision 1.27 1995/11/28 18:56:40 wvcii
255 * disabled buffer copy in rf_write
256 *
257 * Revision 1.26 1995/10/06 16:37:08 jimz
258 * get struct bufs from ubc, not cam
259 * copy all write data, and operate on copy
260 * (temporary hack to get around dags in PQ that want
261 * to Xor into user write buffers)
262 *
263 * Revision 1.25 1995/09/30 22:23:08 jimz
264 * do not require raid to be active to perform ACCTOTAL ioctl
265 *
266 * Revision 1.24 1995/09/30 20:39:08 jimz
267 * added new ioctls:
268 * RAIDFRAME_RESET_ACCTOTALS
269 * RAIDFRAME_GET_ACCTOTALS
270 * RAIDFRAME_KEEP_ACCTOTALS
271 *
272 * Revision 1.23 1995/09/20 21:11:59 jimz
273 * include dfstrace.h in KERNEL block
274 * (even though it's a kernel-only file, this makes the depend process
275 * at user-level happy. Why the user-level Makefile wants to depend
276 * kintf.c is less clear, but this is a workaround).
277 *
278 * Revision 1.22 1995/09/19 23:19:03 jimz
279 * added DKUSAGE support
280 *
281 */
282
283
284
285
286 #ifdef _KERNEL
287 #define KERNEL
288 #endif
289
290
291
292 #ifdef KERNEL
293
294 #include <sys/errno.h>
295
296 #ifdef __NetBSD__
297 #include "raid.h"
298 #include <sys/param.h>
299 #include <sys/pool.h>
300 #include <sys/queue.h>
301 #include <sys/disk.h>
302 #include <sys/device.h>
303 #include <sys/stat.h>
304 #include <sys/ioctl.h>
305 #include <sys/fcntl.h>
306 #include <sys/systm.h>
307 #include <sys/namei.h>
308 #include <sys/vnode.h>
309 #endif
310
311 #include <sys/param.h>
312 #include <sys/types.h>
313
314 #include <machine/types.h>
315
316 #include <sys/disklabel.h>
317
318 #include <sys/conf.h>
319
320
321 #ifdef __NetBSD__
322 #include <sys/lock.h>
323 #endif /* __NetBSD__ */
324
325
326 #include <sys/buf.h>
327 #include <sys/user.h>
328 #include "rf_raid.h"
329 #include "rf_raidframe.h"
330 #include "rf_dag.h"
331 #include "rf_dagflags.h"
332 #include "rf_diskqueue.h"
333 #include "rf_acctrace.h"
334 #include "rf_etimer.h"
335 #include "rf_general.h"
336 #include "rf_debugMem.h"
337 #include "rf_kintf.h"
338 #include "rf_options.h"
339 #include "rf_driver.h"
340 #include "rf_parityscan.h"
341 #include "rf_debugprint.h"
342 #include "rf_threadstuff.h"
343
344 int rf_kdebug_level = 0;
345
346 #define RFK_BOOT_NONE 0
347 #define RFK_BOOT_GOOD 1
348 #define RFK_BOOT_BAD 2
349 static int rf_kbooted = RFK_BOOT_NONE;
350
351 #ifdef DEBUG
352 #define db0_printf(a) printf a
353 #define db_printf(a) if (rf_kdebug_level > 0) printf a
354 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
355 #define db2_printf(a) if (rf_kdebug_level > 1) printf a
356 #define db3_printf(a) if (rf_kdebug_level > 2) printf a
357 #define db4_printf(a) if (rf_kdebug_level > 3) printf a
358 #define db5_printf(a) if (rf_kdebug_level > 4) printf a
359 #else /* DEBUG */
360 #define db0_printf(a) printf a
361 #define db1_printf(a) { }
362 #define db2_printf(a) { }
363 #define db3_printf(a) { }
364 #define db4_printf(a) { }
365 #define db5_printf(a) { }
366 #endif /* DEBUG */
367
368 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
369
370 static int rf_pending_testaccs;
371
372 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
373 RF_DECLARE_STATIC_MUTEX(rf_async_done_q_mutex)
374 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a spare table */
375 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from installation process */
376 static struct rf_test_acc *rf_async_done_qh, *rf_async_done_qt;
377
378 static struct rf_recon_req *recon_queue = NULL; /* used to communicate reconstruction requests */
379
380
381 decl_simple_lock_data(,recon_queue_mutex)
382
383
384 #define LOCK_RECON_Q_MUTEX() simple_lock(&recon_queue_mutex)
385 #define UNLOCK_RECON_Q_MUTEX() simple_unlock(&recon_queue_mutex)
386
387 /* prototypes */
388 static void KernelWakeupFunc(struct buf *bp);
389 static void InitBP(struct buf *bp, struct vnode *, unsigned rw_flag, dev_t dev,
390 RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
391 void (*cbFunc)(struct buf *), void *cbArg, int logBytesPerSector,
392 struct proc *b_proc);
393
394 #define Dprintf0(s) if (rf_queueDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
395 #define Dprintf1(s,a) if (rf_queueDebug) rf_debug_printf(s,a,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
396 #define Dprintf2(s,a,b) if (rf_queueDebug) rf_debug_printf(s,a,b,NULL,NULL,NULL,NULL,NULL,NULL)
397 #define Dprintf3(s,a,b,c) if (rf_queueDebug) rf_debug_printf(s,a,b,c,NULL,NULL,NULL,NULL,NULL)
398
399
400 /* this is so that we can compile under 2.0 as well as 3.2 */
401 #ifndef proc_to_task
402 #define proc_to_task(x) ((x)->task)
403 #endif /* !proc_to_task */
404
405 void raidattach __P((int));
406 int raidsize __P((dev_t));
407
408 void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
409 void rf_CopybackReconstructedData(RF_Raid_t *raidPtr);
410 static int raidinit __P((dev_t,RF_Raid_t *,int));
411
412
413 /*
414 * Pilfered from ccd.c
415 */
416
417 struct raidbuf {
418 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
419 struct buf *rf_obp; /* ptr. to original I/O buf */
420 int rf_flags; /* misc. flags */
421 RF_DiskQueueData_t *req; /* the request that this was part of.. */
422 };
423
424
425 #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
426 #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
427
428 /* XXX Not sure if the following should be replacing the raidPtrs above,
429 or if it should be used in conjunction with that... */
430
431 struct raid_softc {
432 int sc_unit; /* logical unit number */
433 int sc_flags; /* flags */
434 int sc_cflags; /* configuration flags */
435 size_t sc_size; /* size of the raid device */
436 dev_t sc_dev; /* our device..*/
437 char sc_xname[20]; /* XXX external name */
438 struct disk sc_dkdev; /* generic disk device info */
439 struct pool sc_cbufpool; /* component buffer pool */
440 };
441
442 /* sc_flags */
443 #define RAIDF_INITED 0x01 /* unit has been initialized */
444 #define RAIDF_WLABEL 0x02 /* label area is writable */
445 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
446 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
447 #define RAIDF_LOCKED 0x80 /* unit is locked */
448
449 #define raidunit(x) DISKUNIT(x)
450 static int numraid=0;
451
452 #define RAIDLABELDEV(dev) \
453 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
454
455 /* declared here, and made public, for the benefit of KVM stuff.. */
456 struct raid_softc *raid_softc;
457
458 static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *, struct disklabel *));
459 static void raidgetdisklabel __P((dev_t));
460 static void raidmakedisklabel __P((struct raid_softc *));
461
462 static int raidlock __P((struct raid_softc *));
463 static void raidunlock __P((struct raid_softc *));
464 int raidlookup __P((char *, struct proc *p, struct vnode **));
465
466
467 void
468 raidattach(num)
469 int num;
470 {
471 int raidID;
472
473 #ifdef DEBUG
474 printf("raidattach: Asked for %d units\n",num);
475 #endif
476
477 if (num <= 0) {
478 #ifdef DIAGNOSTIC
479 panic("raidattach: count <= 0");
480 #endif
481 return;
482 }
483 /*
484 This is where all the initialization stuff gets done.
485 */
486
487 /* Make some space for requested number of units... */
488
489 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
490 if (raidPtrs == NULL) {
491 panic("raidPtrs is NULL!!\n");
492 }
493
494
495
496 rf_kbooted = rf_boot();
497 if (rf_kbooted) {
498 panic("Serious error booting RAID!!\n");
499 }
500
501 rf_kbooted = RFK_BOOT_GOOD;
502
503 /*
504 put together some datastructures like the CCD device does..
505 This lets us lock the device and what-not when it gets opened.
506 */
507
508 raid_softc = (struct raid_softc *)
509 malloc(num * sizeof(struct raid_softc),
510 M_DEVBUF, M_NOWAIT);
511 if (raid_softc == NULL) {
512 printf("WARNING: no memory for RAIDframe driver\n");
513 return;
514 }
515 numraid = num;
516 bzero(raid_softc, num * sizeof(struct raid_softc));
517
518 for(raidID=0;raidID < num;raidID++) {
519 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
520 (RF_Raid_t *));
521 if (raidPtrs[raidID]==NULL) {
522 printf("raidPtrs[%d] is NULL\n",raidID);
523 }
524 }
525 }
526
527
528 int
529 raidsize(dev)
530 dev_t dev;
531 {
532 struct raid_softc *rs;
533 struct disklabel *lp;
534 int part, unit, omask, size;
535
536 unit = raidunit(dev);
537 if (unit >= numraid)
538 return (-1);
539 rs = &raid_softc[unit];
540
541 if ((rs->sc_flags & RAIDF_INITED) == 0)
542 return (-1);
543
544 part = DISKPART(dev);
545 omask = rs->sc_dkdev.dk_openmask & (1 << part);
546 lp = rs->sc_dkdev.dk_label;
547
548 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
549 return (-1);
550
551 if (lp->d_partitions[part].p_fstype != FS_SWAP)
552 size = -1;
553 else
554 size = lp->d_partitions[part].p_size *
555 (lp->d_secsize / DEV_BSIZE);
556
557 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
558 return (-1);
559
560 return (size);
561
562 }
563
564 int
565 raiddump(dev, blkno, va, size)
566 dev_t dev;
567 daddr_t blkno;
568 caddr_t va;
569 size_t size;
570 {
571 /* Not implemented. */
572 return ENXIO;
573 }
574
575 /* ARGSUSED */
576 int
577 raidopen(dev, flags, fmt, p)
578 dev_t dev;
579 int flags, fmt;
580 struct proc *p;
581 {
582 int unit = raidunit(dev);
583 struct raid_softc *rs;
584 struct disklabel *lp;
585 int part,pmask;
586 unsigned int raidID;
587 int rc;
588 int error = 0;
589
590 /* This whole next chunk of code is somewhat suspect... Not sure
591 it's needed here at all... XXX */
592
593 if (rf_kbooted == RFK_BOOT_NONE) {
594 printf("Doing restart on raidopen.\n");
595 rf_kbooted = RFK_BOOT_GOOD;
596 rc = rf_boot();
597 if (rc) {
598 rf_kbooted = RFK_BOOT_BAD;
599 printf("Someone is unhappy...\n");
600 return(rc);
601 }
602 }
603
604 if (unit >= numraid)
605 return (ENXIO);
606 rs = &raid_softc[unit];
607
608 if ((error = raidlock(rs)) != 0)
609 return(error);
610 lp = rs->sc_dkdev.dk_label;
611
612 raidID = raidunit(dev);
613
614 part = DISKPART(dev);
615 pmask = (1 << part);
616
617 db1_printf(("Opening raid device number: %d partition: %d\n",
618 raidID,part));
619
620
621 if ((rs->sc_flags & RAIDF_INITED) &&
622 (rs->sc_dkdev.dk_openmask == 0))
623 raidgetdisklabel(dev);
624
625 /* make sure that this partition exists */
626
627 if (part != RAW_PART) {
628 db1_printf(("Not a raw partition..\n"));
629 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
630 ((part >= lp->d_npartitions) ||
631 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
632 error = ENXIO;
633 raidunlock(rs);
634 db1_printf(("Bailing out...\n"));
635 return(error);
636 }
637 }
638
639 /* Prevent this unit from being unconfigured while open. */
640 switch (fmt) {
641 case S_IFCHR:
642 rs->sc_dkdev.dk_copenmask |= pmask;
643 break;
644
645 case S_IFBLK:
646 rs->sc_dkdev.dk_bopenmask |= pmask;
647 break;
648 }
649 rs->sc_dkdev.dk_openmask =
650 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
651
652 raidunlock(rs);
653
654 return(error);
655
656
657 }
658
659 /* ARGSUSED */
660 int
661 raidclose(dev, flags, fmt, p)
662 dev_t dev;
663 int flags, fmt;
664 struct proc *p;
665 {
666 int unit = raidunit(dev);
667 struct raid_softc *rs;
668 int error = 0;
669 int part;
670
671 if (unit >= numraid)
672 return (ENXIO);
673 rs = &raid_softc[unit];
674
675 if ((error = raidlock(rs)) != 0)
676 return (error);
677
678 part = DISKPART(dev);
679
680 /* ...that much closer to allowing unconfiguration... */
681 switch (fmt) {
682 case S_IFCHR:
683 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
684 break;
685
686 case S_IFBLK:
687 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
688 break;
689 }
690 rs->sc_dkdev.dk_openmask =
691 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
692
693 raidunlock(rs);
694 return (0);
695
696 }
697
698 void
699 raidstrategy(bp)
700 register struct buf *bp;
701 {
702 register int s;
703
704 unsigned int raidID = raidunit(bp->b_dev);
705 RF_Raid_t *raidPtr;
706 struct raid_softc *rs = &raid_softc[raidID];
707 struct disklabel *lp;
708 int wlabel;
709
710 db1_printf(("Strategy: 0x%x 0x%x\n",(int)bp,(int)bp->b_data));
711 db1_printf(("Strategy(2): bp->b_bufsize%d\n", (int)bp->b_bufsize));
712 db1_printf(("bp->b_count=%d\n",(int)bp->b_bcount));
713 db1_printf(("bp->b_resid=%d\n",(int)bp->b_resid));
714 db1_printf(("bp->b_blkno=%d\n",(int)bp->b_blkno));
715 #if 0
716 if (bp->b_flags&B_READ)
717 db1_printf(("READ\n"));
718 else
719 db1_printf(("WRITE\n"));
720 #endif
721 if (rf_kbooted != RFK_BOOT_GOOD)
722 return;
723 if (raidID >= numraid || !raidPtrs[raidID]) {
724 bp->b_error = ENODEV;
725 bp->b_flags |= B_ERROR;
726 bp->b_resid = bp->b_bcount;
727 biodone(bp);
728 return;
729 }
730 raidPtr = raidPtrs[raidID];
731 if (!raidPtr->valid) {
732 bp->b_error = ENODEV;
733 bp->b_flags |= B_ERROR;
734 bp->b_resid = bp->b_bcount;
735 biodone(bp);
736 return;
737 }
738 if (bp->b_bcount == 0) {
739 db1_printf(("b_bcount is zero..\n"));
740 biodone(bp);
741 return;
742 }
743 lp = rs->sc_dkdev.dk_label;
744
745 /*
746 * Do bounds checking and adjust transfer. If there's an
747 * error, the bounds check will flag that for us.
748 */
749
750 wlabel = rs->sc_flags & (RAIDF_WLABEL|RAIDF_LABELLING);
751 if (DISKPART(bp->b_dev) != RAW_PART)
752 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
753 db1_printf(("Bounds check failed!!:%d %d\n",
754 (int)bp->b_blkno,(int)wlabel));
755 biodone(bp);
756 return;
757 }
758
759 s = splbio(); /* XXX Needed? */
760 db1_printf(("Beginning strategy...\n"));
761
762 bp->b_resid = 0;
763 bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp,
764 NULL, NULL, NULL);
765 if (bp->b_error) {
766 bp->b_flags |= B_ERROR;
767 db1_printf(("bp->b_flags HAS B_ERROR SET!!!: %d\n",
768 bp->b_error));
769 }
770 splx(s);
771 db1_printf(("Strategy exiting: 0x%x 0x%x %d %d\n",
772 (int)bp,(int)bp->b_data,
773 (int)bp->b_bcount,(int)bp->b_resid));
774 }
775
776 /* ARGSUSED */
777 int
778 raidread(dev, uio, flags)
779 dev_t dev;
780 struct uio *uio;
781 int flags;
782 {
783 int unit = raidunit(dev);
784 struct raid_softc *rs;
785 int result;
786 int part;
787
788 if (unit >= numraid)
789 return (ENXIO);
790 rs = &raid_softc[unit];
791
792 if ((rs->sc_flags & RAIDF_INITED) == 0)
793 return (ENXIO);
794 part = DISKPART(dev);
795
796 db1_printf(("raidread: unit: %d partition: %d\n",unit,part));
797
798 #if 0
799 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
800 #endif
801 result=physio(raidstrategy, NULL, dev, B_READ, minphys, uio);
802 db1_printf(("raidread done. Result is %d %d\n",
803 result,uio->uio_resid));
804 return(result);
805
806 }
807
808 /* ARGSUSED */
809 int
810 raidwrite(dev, uio, flags)
811 dev_t dev;
812 struct uio *uio;
813 int flags;
814 {
815 int unit = raidunit(dev);
816 struct raid_softc *rs;
817
818 if (unit >= numraid)
819 return (ENXIO);
820 rs = &raid_softc[unit];
821
822 if ((rs->sc_flags & RAIDF_INITED) == 0)
823 return (ENXIO);
824 db1_printf(("raidwrite\n"));
825 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
826
827
828 }
829
830 int
831 raidioctl(dev, cmd, data, flag, p)
832 dev_t dev;
833 u_long cmd;
834 caddr_t data;
835 int flag;
836 struct proc *p;
837 {
838 int unit = raidunit(dev);
839 int error = 0;
840 int part, pmask;
841 struct raid_softc *rs;
842 #if 0
843 int r,c;
844 #endif
845 /* struct raid_ioctl *ccio = (struct ccd_ioctl *)data; */
846
847 /* struct ccdbuf *cbp; */
848 /* struct raidbuf *raidbp; */
849 RF_Config_t *k_cfg, *u_cfg;
850 u_char *specific_buf;
851 int retcode = 0;
852
853 int row;
854 struct rf_recon_req *rrcopy, *rr;
855 #if 0
856 int nbytes, spl, rw, row;
857 struct rf_test_acc *ta;
858 struct buf *bp;
859 RF_SparetWait_t *waitreq;
860 struct rf_test_acc *ta_p, *ta_copy;
861 #endif
862
863 if (unit >= numraid)
864 return (ENXIO);
865 rs = &raid_softc[unit];
866
867 db1_printf(("raidioctl: %d %d %d %d\n",(int)dev,
868 (int)DISKPART(dev),(int)unit,(int)cmd));
869
870 /* Must be open for writes for these commands... */
871 switch (cmd) {
872 case DIOCSDINFO:
873 case DIOCWDINFO:
874 case DIOCWLABEL:
875 if ((flag & FWRITE) == 0)
876 return (EBADF);
877 }
878
879 /* Must be initialized for these... */
880 switch (cmd) {
881 case DIOCGDINFO:
882 case DIOCSDINFO:
883 case DIOCWDINFO:
884 case DIOCGPART:
885 case DIOCWLABEL:
886 case DIOCGDEFLABEL:
887 case RAIDFRAME_SHUTDOWN:
888 case RAIDFRAME_REWRITEPARITY:
889 case RAIDFRAME_GET_INFO:
890 case RAIDFRAME_RESET_ACCTOTALS:
891 case RAIDFRAME_GET_ACCTOTALS:
892 case RAIDFRAME_KEEP_ACCTOTALS:
893 case RAIDFRAME_GET_SIZE:
894 case RAIDFRAME_FAIL_DISK:
895 case RAIDFRAME_COPYBACK:
896 case RAIDFRAME_CHECKRECON:
897 if ((rs->sc_flags & RAIDF_INITED) == 0)
898 return (ENXIO);
899 }
900
901 switch (cmd) {
902
903
904 /* configure the system */
905 case RAIDFRAME_CONFIGURE:
906
907 db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n"));
908 /* copy-in the configuration information */
909 /* data points to a pointer to the configuration structure */
910 u_cfg = *((RF_Config_t **) data);
911 RF_Malloc(k_cfg,sizeof(RF_Config_t),(RF_Config_t *));
912 if (k_cfg == NULL) {
913 db3_printf(("rf_ioctl: ENOMEM for config. Code is %d\n", retcode));
914 return(ENOMEM);
915 }
916 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
917 sizeof(RF_Config_t));
918 if (retcode) {
919 db3_printf(("rf_ioctl: retcode=%d copyin.1\n",
920 retcode));
921 return(retcode);
922 }
923
924 /* allocate a buffer for the layout-specific data,
925 and copy it in */
926 if (k_cfg->layoutSpecificSize) {
927 if (k_cfg->layoutSpecificSize > 10000) {
928 /* sanity check */
929 db3_printf(("rf_ioctl: EINVAL %d\n", retcode));
930 return(EINVAL);
931 }
932 RF_Malloc(specific_buf,k_cfg->layoutSpecificSize,
933 (u_char *));
934 if (specific_buf == NULL) {
935 RF_Free(k_cfg,sizeof(RF_Config_t));
936 db3_printf(("rf_ioctl: ENOMEM %d\n", retcode));
937 return(ENOMEM);
938 }
939 retcode = copyin(k_cfg->layoutSpecific,
940 (caddr_t) specific_buf,
941 k_cfg->layoutSpecificSize);
942 if (retcode) {
943 db3_printf(("rf_ioctl: retcode=%d copyin.2\n",
944 retcode));
945 return(retcode);
946 }
947 } else specific_buf = NULL;
948 k_cfg->layoutSpecific = specific_buf;
949
950 /* should do some kind of sanity check on the configuration.
951 Store the sum of all the bytes in the last byte?
952 */
953
954 db1_printf(("Considering configuring the system.:%d 0x%x\n",
955 unit,(int)p));
956
957 /* We need the pointer to this a little deeper, so
958 stash it here... */
959
960 raidPtrs[unit]->proc = p;
961
962 /* configure the system */
963 rf_pending_testaccs = 0;
964
965
966 raidPtrs[unit]->raidid = unit;
967 retcode = rf_Configure(raidPtrs[unit], k_cfg);
968
969
970 if (retcode == 0) {
971 retcode = raidinit(dev, raidPtrs[unit],unit);
972 }
973
974 /* free the buffers. No return code here. */
975 if (k_cfg->layoutSpecificSize) {
976 RF_Free(specific_buf,k_cfg->layoutSpecificSize);
977 }
978 RF_Free(k_cfg,sizeof(RF_Config_t));
979
980 db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n",
981 retcode));
982 return(retcode);
983
984 /* shutdown the system */
985 case RAIDFRAME_SHUTDOWN:
986
987 if ((error = raidlock(rs)) != 0)
988 return(error);
989
990 /*
991 * If somebody has a partition mounted, we shouldn't
992 * shutdown.
993 */
994
995 part = DISKPART(dev);
996 pmask = (1 << part);
997 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
998 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
999 (rs->sc_dkdev.dk_copenmask & pmask))) {
1000 raidunlock(rs);
1001 return (EBUSY);
1002 }
1003
1004 /* the intention here was to disallow shutdowns while
1005 raidframe is mounted, but it doesn't work because the
1006 shutdown ioctl calls rf_open
1007 */
1008 if (rf_pending_testaccs > 0) {
1009 printf("RAIDFRAME: Can't shutdown because there are %d pending test accs\n",
1010 rf_pending_testaccs);
1011 return(EINVAL);
1012 }
1013 if (rf_debugKernelAccess) {
1014 printf("call shutdown\n");
1015 }
1016 raidPtrs[unit]->proc = p; /* XXX necessary evil */
1017 retcode = rf_Shutdown(raidPtrs[unit]);
1018
1019 db1_printf("Done main shutdown\n");
1020
1021 pool_destroy(&rs->sc_cbufpool);
1022 db1_printf("Done freeing component buffer freelist\n");
1023
1024 /* It's no longer initialized... */
1025 rs->sc_flags &= ~RAIDF_INITED;
1026
1027 /* Detach the disk. */
1028 disk_detach(&rs->sc_dkdev);
1029
1030 raidunlock(rs);
1031
1032 return(retcode);
1033
1034 /* initialize all parity */
1035 case RAIDFRAME_REWRITEPARITY:
1036
1037 if (raidPtrs[unit]->Layout.map->faultsTolerated == 0)
1038 return(EINVAL);
1039 /* borrow the thread of the requesting process */
1040 raidPtrs[unit]->proc = p; /* Blah... :-p GO */
1041 retcode = rf_RewriteParity(raidPtrs[unit]);
1042 /* return I/O Error if the parity rewrite fails */
1043
1044 if (retcode)
1045 retcode = EIO;
1046 return(retcode);
1047
1048 /* issue a test-unit-ready through raidframe to the
1049 indicated device */
1050 #if 0 /* XXX not supported yet (ever?) */
1051 case RAIDFRAME_TUR:
1052 /* debug only */
1053 retcode = rf_SCSI_DoTUR(0, 0, 0, 0, *(dev_t *) data);
1054 return(retcode);
1055 #endif
1056 case RAIDFRAME_GET_INFO:
1057 {
1058 RF_Raid_t *raid = raidPtrs[unit];
1059 RF_DeviceConfig_t *cfg, **ucfgp;
1060 int i, j, d;
1061
1062 if (!raid->valid)
1063 return(ENODEV);
1064 ucfgp = (RF_DeviceConfig_t **)data;
1065 RF_Malloc(cfg,sizeof(RF_DeviceConfig_t),
1066 (RF_DeviceConfig_t *));
1067 if (cfg == NULL)
1068 return(ENOMEM);
1069 bzero((char *)cfg, sizeof(RF_DeviceConfig_t));
1070 cfg->rows = raid->numRow;
1071 cfg->cols = raid->numCol;
1072 cfg->ndevs = raid->numRow * raid->numCol;
1073 if (cfg->ndevs >= RF_MAX_DISKS) {
1074 cfg->ndevs = 0;
1075 return(ENOMEM);
1076 }
1077 cfg->nspares = raid->numSpare;
1078 if (cfg->nspares >= RF_MAX_DISKS) {
1079 cfg->nspares = 0;
1080 return(ENOMEM);
1081 }
1082 cfg->maxqdepth = raid->maxQueueDepth;
1083 d = 0;
1084 for(i=0;i<cfg->rows;i++) {
1085 for(j=0;j<cfg->cols;j++) {
1086 cfg->devs[d] = raid->Disks[i][j];
1087 d++;
1088 }
1089 }
1090 for(j=cfg->cols,i=0;i<cfg->nspares;i++,j++) {
1091 cfg->spares[i] = raid->Disks[0][j];
1092 }
1093 retcode = copyout((caddr_t)cfg, (caddr_t)*ucfgp,
1094 sizeof(RF_DeviceConfig_t));
1095 RF_Free(cfg,sizeof(RF_DeviceConfig_t));
1096
1097 return(retcode);
1098 }
1099 break;
1100
1101 case RAIDFRAME_RESET_ACCTOTALS:
1102 {
1103 RF_Raid_t *raid = raidPtrs[unit];
1104
1105 bzero(&raid->acc_totals, sizeof(raid->acc_totals));
1106 return(0);
1107 }
1108 break;
1109
1110 case RAIDFRAME_GET_ACCTOTALS:
1111 {
1112 RF_AccTotals_t *totals = (RF_AccTotals_t *)data;
1113 RF_Raid_t *raid = raidPtrs[unit];
1114
1115 *totals = raid->acc_totals;
1116 return(0);
1117 }
1118 break;
1119
1120 case RAIDFRAME_KEEP_ACCTOTALS:
1121 {
1122 RF_Raid_t *raid = raidPtrs[unit];
1123 int *keep = (int *)data;
1124
1125 raid->keep_acc_totals = *keep;
1126 return(0);
1127 }
1128 break;
1129
1130 case RAIDFRAME_GET_SIZE:
1131 *(int *) data = raidPtrs[unit]->totalSectors;
1132 return(0);
1133
1134 #define RAIDFRAME_RECON 1
1135 /* XXX The above should probably be set somewhere else!! GO */
1136 #if RAIDFRAME_RECON > 0
1137
1138 /* fail a disk & optionally start reconstruction */
1139 case RAIDFRAME_FAIL_DISK:
1140 rr = (struct rf_recon_req *) data;
1141
1142 if (rr->row < 0 || rr->row >= raidPtrs[unit]->numRow
1143 || rr->col < 0 || rr->col >= raidPtrs[unit]->numCol)
1144 return(EINVAL);
1145
1146 printf("Failing the disk: row: %d col: %d\n",rr->row,rr->col);
1147
1148 /* make a copy of the recon request so that we don't
1149 rely on the user's buffer */
1150 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1151 bcopy(rr, rrcopy, sizeof(*rr));
1152 rrcopy->raidPtr = (void *) raidPtrs[unit];
1153
1154 LOCK_RECON_Q_MUTEX();
1155 rrcopy->next = recon_queue;
1156 recon_queue = rrcopy;
1157 wakeup(&recon_queue);
1158 UNLOCK_RECON_Q_MUTEX();
1159
1160 return(0);
1161
1162 /* invoke a copyback operation after recon on whatever
1163 disk needs it, if any */
1164 case RAIDFRAME_COPYBACK:
1165 /* borrow the current thread to get this done */
1166 raidPtrs[unit]->proc = p; /* ICK.. but needed :-p GO */
1167 rf_CopybackReconstructedData(raidPtrs[unit]);
1168 return(0);
1169
1170 /* return the percentage completion of reconstruction */
1171 case RAIDFRAME_CHECKRECON:
1172 row = *(int *) data;
1173 if (row < 0 || row >= raidPtrs[unit]->numRow)
1174 return(EINVAL);
1175 if (raidPtrs[unit]->status[row] != rf_rs_reconstructing)
1176 *(int *) data = 100;
1177 else
1178 *(int *) data = raidPtrs[unit]->reconControl[row]->percentComplete;
1179 return(0);
1180
1181 /* the sparetable daemon calls this to wait for the
1182 kernel to need a spare table.
1183 * this ioctl does not return until a spare table is needed.
1184 * XXX -- calling mpsleep here in the ioctl code is almost
1185 certainly wrong and evil. -- XXX
1186 * XXX -- I should either compute the spare table in the
1187 kernel, or have a different -- XXX
1188 * XXX -- interface (a different character device) for
1189 delivering the table -- XXX
1190 */
1191 #if 0
1192 case RAIDFRAME_SPARET_WAIT:
1193 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1194 while (!rf_sparet_wait_queue) mpsleep(&rf_sparet_wait_queue, (PZERO+1)|PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1195 waitreq = rf_sparet_wait_queue;
1196 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1197 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1198
1199 *((RF_SparetWait_t *) data) = *waitreq; /* structure assignment */
1200
1201 RF_Free(waitreq, sizeof(*waitreq));
1202 return(0);
1203
1204
1205 /* wakes up a process waiting on SPARET_WAIT and puts an
1206 error code in it that will cause the dameon to exit */
1207 case RAIDFRAME_ABORT_SPARET_WAIT:
1208 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1209 waitreq->fcol = -1;
1210 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1211 waitreq->next = rf_sparet_wait_queue;
1212 rf_sparet_wait_queue = waitreq;
1213 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1214 wakeup(&rf_sparet_wait_queue);
1215 return(0);
1216
1217 /* used by the spare table daemon to deliver a spare table
1218 into the kernel */
1219 case RAIDFRAME_SEND_SPARET:
1220
1221 /* install the spare table */
1222 retcode = rf_SetSpareTable(raidPtrs[unit],*(void **) data);
1223
1224 /* respond to the requestor. the return status of the
1225 spare table installation is passed in the "fcol" field */
1226 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1227 waitreq->fcol = retcode;
1228 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1229 waitreq->next = rf_sparet_resp_queue;
1230 rf_sparet_resp_queue = waitreq;
1231 wakeup(&rf_sparet_resp_queue);
1232 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1233
1234 return(retcode);
1235 #endif
1236
1237
1238 #endif /* RAIDFRAME_RECON > 0 */
1239
1240 default: break; /* fall through to the os-specific code below */
1241
1242 }
1243
1244 if (!raidPtrs[unit]->valid)
1245 return(EINVAL);
1246
1247 /*
1248 * Add support for "regular" device ioctls here.
1249 */
1250
1251 switch (cmd) {
1252 case DIOCGDINFO:
1253 db1_printf(("DIOCGDINFO %d %d\n",(int)dev,(int)DISKPART(dev)));
1254 *(struct disklabel *)data = *(rs->sc_dkdev.dk_label);
1255 break;
1256
1257 case DIOCGPART:
1258 db1_printf(("DIOCGPART: %d %d\n",(int)dev,(int)DISKPART(dev)));
1259 ((struct partinfo *)data)->disklab = rs->sc_dkdev.dk_label;
1260 ((struct partinfo *)data)->part =
1261 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1262 break;
1263
1264 case DIOCWDINFO:
1265 db1_printf(("DIOCWDINFO\n"));
1266 case DIOCSDINFO:
1267 db1_printf(("DIOCSDINFO\n"));
1268 if ((error = raidlock(rs)) != 0)
1269 return (error);
1270
1271 rs->sc_flags |= RAIDF_LABELLING;
1272
1273 error = setdisklabel(rs->sc_dkdev.dk_label,
1274 (struct disklabel *)data, 0, rs->sc_dkdev.dk_cpulabel);
1275 if (error == 0) {
1276 if (cmd == DIOCWDINFO)
1277 error = writedisklabel(RAIDLABELDEV(dev),
1278 raidstrategy, rs->sc_dkdev.dk_label,
1279 rs->sc_dkdev.dk_cpulabel);
1280 }
1281
1282 rs->sc_flags &= ~RAIDF_LABELLING;
1283
1284 raidunlock(rs);
1285
1286 if (error)
1287 return (error);
1288 break;
1289
1290 case DIOCWLABEL:
1291 db1_printf(("DIOCWLABEL\n"));
1292 if (*(int *)data != 0)
1293 rs->sc_flags |= RAIDF_WLABEL;
1294 else
1295 rs->sc_flags &= ~RAIDF_WLABEL;
1296 break;
1297
1298 case DIOCGDEFLABEL:
1299 db1_printf(("DIOCGDEFLABEL\n"));
1300 raidgetdefaultlabel(raidPtrs[unit], rs,
1301 (struct disklabel *)data);
1302 break;
1303
1304 default:
1305 retcode = ENOTTY; /* XXXX ?? OR EINVAL ? */
1306 }
1307 return(retcode);
1308
1309 }
1310
1311
1312 /* raidinit -- complete the rest of the initialization for the
1313 RAIDframe device. */
1314
1315
1316 static int
1317 raidinit(dev, raidPtr,unit)
1318 dev_t dev;
1319 RF_Raid_t *raidPtr;
1320 int unit;
1321 {
1322 int retcode;
1323 /* int ix; */
1324 /* struct raidbuf *raidbp; */
1325 struct raid_softc *rs;
1326
1327 retcode = 0;
1328
1329 rs = &raid_softc[unit];
1330 pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1331 0, 0, "raidpl", 0, NULL, NULL, M_DEVBUF);
1332
1333
1334 /* XXX should check return code first... */
1335 rs->sc_flags |= RAIDF_INITED;
1336
1337 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds.*/
1338
1339 rs->sc_dkdev.dk_name = rs->sc_xname;
1340 /* disk_attach actually creates space for the CPU disklabel, among
1341 other things, so it's critical to call this *BEFORE* we
1342 try putzing with disklabels. */
1343 disk_attach(&rs->sc_dkdev);
1344
1345 /* XXX There may be a weird interaction here between this, and
1346 protectedSectors, as used in RAIDframe. */
1347 rs->sc_size = raidPtr->totalSectors;
1348 rs->sc_dev = dev;
1349 return(retcode);
1350 }
1351
1352
1353 /*********************************************************
1354 *
1355 * initialization code called at boot time (startup.c)
1356 *
1357 ********************************************************/
1358 int rf_boot()
1359 {
1360 int i, rc;
1361
1362 rc = rf_mutex_init(&rf_sparet_wait_mutex);
1363 if (rc) {
1364 RF_PANIC();
1365 }
1366 rc = rf_mutex_init(&rf_async_done_q_mutex);
1367 if (rc) {
1368 RF_PANIC();
1369 }
1370 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
1371 recon_queue = NULL;
1372 rf_async_done_qh = rf_async_done_qt = NULL;
1373 for (i=0; i<numraid; i++)
1374 raidPtrs[i] = NULL;
1375 rc = rf_BootRaidframe();
1376 if (rc == 0)
1377 printf("Kernelized RAIDframe activated\n");
1378 else
1379 rf_kbooted = RFK_BOOT_BAD;
1380 return(rc);
1381 }
1382
1383 /*
1384 * This kernel thread never exits. It is created once, and persists
1385 * until the system reboots.
1386 */
1387 void rf_ReconKernelThread()
1388 {
1389 struct rf_recon_req *req;
1390 int s;
1391
1392 /* XXX not sure what spl() level we should be at here... probably splbio() */
1393 s=splbio();
1394
1395 while (1) {
1396 /* grab the next reconstruction request from the queue */
1397 LOCK_RECON_Q_MUTEX();
1398 while (!recon_queue) {
1399 UNLOCK_RECON_Q_MUTEX();
1400 tsleep(&recon_queue, PRIBIO | PCATCH, "raidframe recon", 0);
1401 LOCK_RECON_Q_MUTEX();
1402 }
1403 req = recon_queue;
1404 recon_queue = recon_queue->next;
1405 UNLOCK_RECON_Q_MUTEX();
1406
1407 /*
1408 * If flags specifies that we should start recon, this call
1409 * will not return until reconstruction completes, fails, or is aborted.
1410 */
1411 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
1412 ((req->flags&RF_FDFLAGS_RECON) ? 1 : 0));
1413
1414 RF_Free(req, sizeof(*req));
1415 }
1416 }
1417 /* wake up the daemon & tell it to get us a spare table
1418 * XXX
1419 * the entries in the queues should be tagged with the raidPtr
1420 * so that in the extremely rare case that two recons happen at once, we know for
1421 * which device were requesting a spare table
1422 * XXX
1423 */
1424 int rf_GetSpareTableFromDaemon(req)
1425 RF_SparetWait_t *req;
1426 {
1427 int retcode;
1428
1429 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1430 req->next = rf_sparet_wait_queue;
1431 rf_sparet_wait_queue = req;
1432 wakeup(&rf_sparet_wait_queue);
1433
1434 /* mpsleep unlocks the mutex */
1435 while (!rf_sparet_resp_queue) {
1436 tsleep(&rf_sparet_resp_queue, PRIBIO | PCATCH,
1437 "raidframe getsparetable", 0);
1438 #if 0
1439 mpsleep(&rf_sparet_resp_queue, PZERO, "sparet resp", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1440 #endif
1441 }
1442 req = rf_sparet_resp_queue;
1443 rf_sparet_resp_queue = req->next;
1444 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1445
1446 retcode = req->fcol;
1447 RF_Free(req, sizeof(*req)); /* this is not the same req as we alloc'd */
1448 return(retcode);
1449 }
1450
1451 /* a wrapper around rf_DoAccess that extracts appropriate info from the bp & passes it down.
1452 * any calls originating in the kernel must use non-blocking I/O
1453 * do some extra sanity checking to return "appropriate" error values for
1454 * certain conditions (to make some standard utilities work)
1455 */
1456 int rf_DoAccessKernel(raidPtr, bp, flags, cbFunc, cbArg)
1457 RF_Raid_t *raidPtr;
1458 struct buf *bp;
1459 RF_RaidAccessFlags_t flags;
1460 void (*cbFunc)(struct buf *);
1461 void *cbArg;
1462 {
1463 RF_SectorCount_t num_blocks, pb, sum;
1464 RF_RaidAddr_t raid_addr;
1465 int retcode;
1466 struct partition *pp;
1467 daddr_t blocknum;
1468 int unit;
1469 struct raid_softc *rs;
1470
1471 /* XXX The dev_t used here should be for /dev/[r]raid* !!! */
1472
1473 unit = raidPtr->raidid;
1474 rs = &raid_softc[unit];
1475
1476 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1477 partition.. Need to make it absolute to the underlying
1478 device.. */
1479
1480 blocknum = bp->b_blkno;
1481 if (DISKPART(bp->b_dev) != RAW_PART) {
1482 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1483 blocknum += pp->p_offset;
1484 db1_printf(("updated: %d %d\n",DISKPART(bp->b_dev),
1485 pp->p_offset));
1486 } else {
1487 db1_printf(("Is raw..\n"));
1488 }
1489 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno, (int) blocknum));
1490
1491 db1_printf(("bp->b_bcount = %d\n",(int)bp->b_bcount));
1492 db1_printf(("bp->b_resid = %d\n",(int)bp->b_resid));
1493
1494 /* *THIS* is where we adjust what block we're going to... but
1495 DO NOT TOUCH bp->b_blkno!!! */
1496 raid_addr = blocknum;
1497
1498 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1499 pb = (bp->b_bcount&raidPtr->sectorMask) ? 1 : 0;
1500 sum = raid_addr + num_blocks + pb;
1501 if (1 || rf_debugKernelAccess) {
1502 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1503 (int)raid_addr, (int)sum,(int)num_blocks,
1504 (int)pb,(int)bp->b_resid));
1505 }
1506
1507
1508 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1509 || (sum < num_blocks) || (sum < pb))
1510 {
1511 bp->b_error = ENOSPC;
1512 bp->b_flags |= B_ERROR;
1513 bp->b_resid = bp->b_bcount;
1514 biodone(bp);
1515 return(bp->b_error);
1516 }
1517
1518 /*
1519 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1520 */
1521
1522 if (bp->b_bcount & raidPtr->sectorMask) {
1523 bp->b_error = EINVAL;
1524 bp->b_flags |= B_ERROR;
1525 bp->b_resid = bp->b_bcount;
1526 biodone(bp);
1527 return(bp->b_error);
1528 }
1529 db1_printf(("Calling DoAccess..\n"));
1530
1531 /* don't ever condition on bp->b_flags & B_WRITE.
1532 always condition on B_READ instead */
1533 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1534 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1535 0, raid_addr, num_blocks, bp->b_un.b_addr,
1536 bp, NULL, NULL, RF_DAG_NONBLOCKING_IO|flags,
1537 NULL, cbFunc, cbArg);
1538 db1_printf(("After call to DoAccess: 0x%x 0x%x %d\n",(int)bp,
1539 (int)bp->b_data,(int)bp->b_resid));
1540 return(retcode);
1541 }
1542
1543 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1544
1545 int rf_DispatchKernelIO(queue, req)
1546 RF_DiskQueue_t *queue;
1547 RF_DiskQueueData_t *req;
1548 {
1549 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1550 struct buf *bp;
1551 struct raidbuf *raidbp=NULL;
1552 struct raid_softc *rs;
1553 int unit;
1554
1555 /* XXX along with the vnode, we also need the softc associated with
1556 this device.. */
1557
1558 req->queue = queue;
1559
1560 unit = queue->raidPtr->raidid;
1561
1562 db1_printf(("DispatchKernelIO unit: %d\n",unit));
1563
1564 if (unit >= numraid) {
1565 printf("Invalid unit number: %d %d\n",unit,numraid);
1566 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1567 }
1568
1569 rs = &raid_softc[unit];
1570
1571 /* XXX is this the right place? */
1572 disk_busy(&rs->sc_dkdev);
1573
1574 bp = req->bp;
1575
1576 raidbp = RAIDGETBUF(rs);
1577
1578 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1579
1580 /*
1581 * context for raidiodone
1582 */
1583 raidbp->rf_obp = bp;
1584 raidbp->req = req;
1585
1586 switch (req->type) {
1587 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1588 /*
1589 Dprintf2("rf_DispatchKernelIO: NOP to r %d c %d\n",
1590 queue->row, queue->col);
1591 */
1592 /* XXX need to do something extra here.. */
1593 /* I'm leaving this in, as I've never actually seen it
1594 used, and I'd like folks to report it... GO */
1595 printf(("WAKEUP CALLED\n"));
1596 queue->numOutstanding++;
1597
1598 /* XXX need to glue the original buffer into this?? */
1599
1600 KernelWakeupFunc(&raidbp->rf_buf);
1601 break;
1602
1603 case RF_IO_TYPE_READ:
1604 case RF_IO_TYPE_WRITE:
1605
1606 if (req->tracerec) {
1607 RF_ETIMER_START(req->tracerec->timer);
1608 }
1609
1610
1611 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1612 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1613 req->sectorOffset, req->numSector,
1614 req->buf, KernelWakeupFunc, (void *) req,
1615 queue->raidPtr->logBytesPerSector, req->b_proc);
1616
1617 if (rf_debugKernelAccess) {
1618 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1619 (long) bp->b_blkno));
1620 }
1621 queue->numOutstanding++;
1622 queue->last_deq_sector = req->sectorOffset;
1623 /* acc wouldn't have been let in if there were any
1624 pending reqs at any other priority */
1625 queue->curPriority = req->priority;
1626 /*
1627 Dprintf3("rf_DispatchKernelIO: %c to row %d col %d\n",
1628 req->type, queue->row, queue->col);
1629 */
1630
1631 db1_printf(("Going for %c to unit %d row %d col %d\n",
1632 req->type, unit, queue->row, queue->col));
1633 db1_printf(("sector %d count %d (%d bytes) %d\n",
1634 (int) req->sectorOffset, (int) req->numSector,
1635 (int) (req->numSector <<
1636 queue->raidPtr->logBytesPerSector),
1637 (int) queue->raidPtr->logBytesPerSector));
1638 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1639 raidbp->rf_buf.b_vp->v_numoutput++;
1640 }
1641
1642 VOP_STRATEGY(&raidbp->rf_buf);
1643
1644 break;
1645
1646 default:
1647 panic("bad req->type in rf_DispatchKernelIO");
1648 }
1649 db1_printf(("Exiting from DispatchKernelIO\n"));
1650 return(0);
1651 }
1652
1653 /* this is the callback function associated with a I/O invoked from
1654 kernel code.
1655 */
1656 static void KernelWakeupFunc(vbp)
1657 struct buf *vbp;
1658 {
1659 RF_DiskQueueData_t *req = NULL;
1660 RF_DiskQueue_t *queue;
1661 struct raidbuf *raidbp = (struct raidbuf *)vbp;
1662 struct buf *bp;
1663 struct raid_softc *rs;
1664 int unit;
1665 register int s;
1666
1667 s=splbio(); /* XXX */
1668 db1_printf(("recovering the request queue:\n"));
1669 req = raidbp->req;
1670
1671 bp = raidbp->rf_obp;
1672 db1_printf(("bp=0x%x\n",(int)bp));
1673
1674 queue = (RF_DiskQueue_t *) req->queue;
1675
1676 if (raidbp->rf_buf.b_flags & B_ERROR) {
1677 #if 0
1678 printf("Setting bp->b_flags!!! %d\n",raidbp->rf_buf.b_error);
1679 #endif
1680 bp->b_flags |= B_ERROR;
1681 bp->b_error = raidbp->rf_buf.b_error ?
1682 raidbp->rf_buf.b_error : EIO;
1683 }
1684
1685 db1_printf(("raidbp->rf_buf.b_bcount=%d\n",(int)raidbp->rf_buf.b_bcount));
1686 db1_printf(("raidbp->rf_buf.b_bufsize=%d\n",(int)raidbp->rf_buf.b_bufsize));
1687 db1_printf(("raidbp->rf_buf.b_resid=%d\n",(int)raidbp->rf_buf.b_resid));
1688 db1_printf(("raidbp->rf_buf.b_data=0x%x\n",(int)raidbp->rf_buf.b_data));
1689
1690 /* XXX methinks this could be wrong... */
1691 #if 1
1692 bp->b_resid = raidbp->rf_buf.b_resid;
1693 #endif
1694
1695 if (req->tracerec) {
1696 RF_ETIMER_STOP(req->tracerec->timer);
1697 RF_ETIMER_EVAL(req->tracerec->timer);
1698 RF_LOCK_MUTEX(rf_tracing_mutex);
1699 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1700 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1701 req->tracerec->num_phys_ios++;
1702 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1703 }
1704
1705 bp->b_bcount = raidbp->rf_buf.b_bcount;/* XXXX ?? */
1706
1707 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1708
1709 rs = &raid_softc[unit];
1710 RAIDPUTBUF(rs,raidbp);
1711
1712 if (bp->b_resid==0) {
1713 db1_printf(("Disk is no longer busy for this buffer... %d %ld %ld\n",
1714 unit, bp->b_resid, bp->b_bcount));
1715 /* XXX is this the right place for a disk_unbusy()??!??!?!? */
1716 disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
1717 } else {
1718 db1_printf(("b_resid is still %ld\n",bp->b_resid));
1719 }
1720
1721 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1722 (req->CompleteFunc)(req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1723 /* printf("Exiting KernelWakeupFunc\n"); */
1724
1725 splx(s); /* XXX */
1726 }
1727
1728
1729
1730 /*
1731 * initialize a buf structure for doing an I/O in the kernel.
1732 */
1733 static void InitBP(
1734 struct buf *bp,
1735 struct vnode *b_vp,
1736 unsigned rw_flag,
1737 dev_t dev,
1738 RF_SectorNum_t startSect,
1739 RF_SectorCount_t numSect,
1740 caddr_t buf,
1741 void (*cbFunc)(struct buf *),
1742 void *cbArg,
1743 int logBytesPerSector,
1744 struct proc *b_proc)
1745 {
1746 /* bp->b_flags = B_PHYS | rw_flag; */
1747 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1748 bp->b_bcount = numSect << logBytesPerSector;
1749 bp->b_bufsize = bp->b_bcount;
1750 bp->b_error = 0;
1751 bp->b_dev = dev;
1752 db1_printf(("bp->b_dev is %d\n", dev));
1753 bp->b_un.b_addr = buf;
1754 db1_printf(("bp->b_data=0x%x\n",(int)bp->b_data));
1755
1756 bp->b_blkno = startSect;
1757 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1758 db1_printf(("b_bcount is: %d\n",(int)bp->b_bcount));
1759 if (bp->b_bcount == 0) {
1760 panic("bp->b_bcount is zero in InitBP!!\n");
1761 }
1762 bp->b_proc = b_proc;
1763 bp->b_iodone = cbFunc;
1764 bp->b_vp = b_vp;
1765
1766 }
1767 #endif /* KERNEL */
1768
1769 /* Extras... */
1770
1771 unsigned int rpcc()
1772 {
1773 /* XXX no clue what this is supposed to do.. my guess is
1774 that it's supposed to read the CPU cycle counter... */
1775 /* db1_printf("this is supposed to do something useful too!??\n"); */
1776 return(0);
1777 }
1778
1779 #if 0
1780 int rf_GetSpareTableFromDaemon(req)
1781 RF_SparetWait_t *req;
1782 {
1783 int retcode=1;
1784 printf("This is supposed to do something useful!!\n"); /* XXX */
1785
1786 return(retcode);
1787
1788 }
1789 #endif
1790
1791 static void
1792 raidgetdefaultlabel(raidPtr, rs, lp)
1793 RF_Raid_t *raidPtr;
1794 struct raid_softc *rs;
1795 struct disklabel *lp;
1796 {
1797 db1_printf(("Building a default label...\n"));
1798 bzero(lp, sizeof(*lp));
1799
1800 /* fabricate a label... */
1801 lp->d_secperunit = raidPtr->totalSectors;
1802 lp->d_secsize = raidPtr->bytesPerSector;
1803 lp->d_nsectors = 1024 * (1024 / raidPtr->bytesPerSector);
1804 lp->d_ntracks = 1;
1805 lp->d_ncylinders = raidPtr->totalSectors / lp->d_nsectors;
1806 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1807
1808 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1809 lp->d_type = DTYPE_RAID;
1810 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1811 lp->d_rpm = 3600;
1812 lp->d_interleave = 1;
1813 lp->d_flags = 0;
1814
1815 lp->d_partitions[RAW_PART].p_offset = 0;
1816 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1817 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1818 lp->d_npartitions = RAW_PART + 1;
1819
1820 lp->d_magic = DISKMAGIC;
1821 lp->d_magic2 = DISKMAGIC;
1822 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1823
1824 }
1825
1826 /*
1827 * Read the disklabel from the raid device. If one is not present, fake one
1828 * up.
1829 */
1830 static void
1831 raidgetdisklabel(dev)
1832 dev_t dev;
1833 {
1834 int unit = raidunit(dev);
1835 struct raid_softc *rs = &raid_softc[unit];
1836 char *errstring;
1837 struct disklabel *lp = rs->sc_dkdev.dk_label;
1838 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1839 RF_Raid_t *raidPtr;
1840
1841 db1_printf(("Getting the disklabel...\n"));
1842
1843 bzero(clp, sizeof(*clp));
1844
1845 raidPtr = raidPtrs[unit];
1846
1847 raidgetdefaultlabel(raidPtr, rs, lp);
1848
1849 /*
1850 * Call the generic disklabel extraction routine.
1851 */
1852 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
1853 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
1854 if (errstring)
1855 raidmakedisklabel(rs);
1856 else {
1857 int i;
1858 struct partition *pp;
1859
1860 /*
1861 * Sanity check whether the found disklabel is valid.
1862 *
1863 * This is necessary since total size of the raid device
1864 * may vary when an interleave is changed even though exactly
1865 * same componets are used, and old disklabel may used
1866 * if that is found.
1867 */
1868 if (lp->d_secperunit != rs->sc_size)
1869 printf("WARNING: %s: "
1870 "total sector size in disklabel (%d) != "
1871 "the size of raid (%d)\n", rs->sc_xname,
1872 lp->d_secperunit, rs->sc_size);
1873 for (i = 0; i < lp->d_npartitions; i++) {
1874 pp = &lp->d_partitions[i];
1875 if (pp->p_offset + pp->p_size > rs->sc_size)
1876 printf("WARNING: %s: end of partition `%c' "
1877 "exceeds the size of raid (%d)\n",
1878 rs->sc_xname, 'a' + i, rs->sc_size);
1879 }
1880 }
1881
1882 }
1883
1884 /*
1885 * Take care of things one might want to take care of in the event
1886 * that a disklabel isn't present.
1887 */
1888 static void
1889 raidmakedisklabel(rs)
1890 struct raid_softc *rs;
1891 {
1892 struct disklabel *lp = rs->sc_dkdev.dk_label;
1893 db1_printf(("Making a label..\n"));
1894
1895 /*
1896 * For historical reasons, if there's no disklabel present
1897 * the raw partition must be marked FS_BSDFFS.
1898 */
1899
1900 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1901
1902 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
1903
1904 lp->d_checksum = dkcksum(lp);
1905 }
1906
1907 /*
1908 * Lookup the provided name in the filesystem. If the file exists,
1909 * is a valid block device, and isn't being used by anyone else,
1910 * set *vpp to the file's vnode.
1911 * You'll find the original of this in ccd.c
1912 */
1913 int
1914 raidlookup(path, p, vpp)
1915 char *path;
1916 struct proc *p;
1917 struct vnode **vpp; /* result */
1918 {
1919 struct nameidata nd;
1920 struct vnode *vp;
1921 struct vattr va;
1922 int error;
1923
1924 db1_printf(("Doing raidlookup...: %s 0x%x\n",path,(int)p));
1925
1926 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
1927 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
1928 #ifdef DEBUG
1929 printf("RAIDframe: vn_open returned %d\n",error);
1930 #endif
1931 return (error);
1932 }
1933 vp = nd.ni_vp;
1934 if (vp->v_usecount > 1) {
1935 VOP_UNLOCK(vp, 0);
1936 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1937 return (EBUSY);
1938 }
1939 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
1940 VOP_UNLOCK(vp, 0);
1941 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1942 return (error);
1943 }
1944 /* XXX: eventually we should handle VREG, too. */
1945 if (va.va_type != VBLK) {
1946 VOP_UNLOCK(vp, 0);
1947 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
1948 return (ENOTBLK);
1949 }
1950 VOP_UNLOCK(vp, 0);
1951 *vpp = vp;
1952 return (0);
1953 }
1954
1955 /*
1956 * Wait interruptibly for an exclusive lock.
1957 *
1958 * XXX
1959 * Several drivers do this; it should be abstracted and made MP-safe.
1960 * (Hmm... where have we seen this warning before :-> GO )
1961 */
1962 static int
1963 raidlock(rs)
1964 struct raid_softc *rs;
1965 {
1966 int error;
1967
1968 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
1969 rs->sc_flags |= RAIDF_WANTED;
1970 if ((error =
1971 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
1972 return (error);
1973 }
1974 rs->sc_flags |= RAIDF_LOCKED;
1975 return (0);
1976 }
1977
1978 /*
1979 * Unlock and wake up any waiters.
1980 */
1981 static void
1982 raidunlock(rs)
1983 struct raid_softc *rs;
1984 {
1985
1986 rs->sc_flags &= ~RAIDF_LOCKED;
1987 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
1988 rs->sc_flags &= ~RAIDF_WANTED;
1989 wakeup(rs);
1990 }
1991 }
1992