xd.c revision 1.21 1 /* $NetBSD: xd.c,v 1.21 1998/06/30 04:35:37 mrg Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1995 Charles D. Cranor
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 *
36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r
37 *
38 * author: Chuck Cranor <chuck (at) ccrc.wustl.edu>
39 * id: &Id: xd.c,v 1.9 1995/09/25 20:12:44 chuck Exp &
40 * started: 27-Feb-95
41 * references: [1] Xylogics Model 753 User's Manual
42 * part number: 166-753-001, Revision B, May 21, 1988.
43 * "Your Partner For Performance"
44 * [2] other NetBSD disk device drivers
45 *
46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
47 * the time to answer some of my questions about the 753/7053.
48 *
49 * note: the 753 and the 7053 are programmed the same way, but are
50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U
51 * VME card (found in many VME based suns).
52 */
53
54 #undef XDC_DEBUG /* full debug */
55 #define XDC_DIAG /* extra sanity checks */
56 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
57 #define XDC_DIAG /* link in with master DIAG option */
58 #endif
59
60 #include <sys/param.h>
61 #include <sys/proc.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/file.h>
65 #include <sys/stat.h>
66 #include <sys/ioctl.h>
67 #include <sys/buf.h>
68 #include <sys/uio.h>
69 #include <sys/malloc.h>
70 #include <sys/device.h>
71 #include <sys/disklabel.h>
72 #include <sys/disk.h>
73 #include <sys/syslog.h>
74 #include <sys/dkbad.h>
75 #include <sys/conf.h>
76
77 #include <vm/vm.h>
78 #include <vm/vm_kern.h>
79
80 #include <machine/autoconf.h>
81 #include <machine/dvma.h>
82
83 #include <dev/sun/disklabel.h>
84
85 #include <sun3/dev/xdreg.h>
86 #include <sun3/dev/xdvar.h>
87 #include <sun3/dev/xio.h>
88
89 #include "locators.h"
90
91 /*
92 * macros
93 */
94
95 /*
96 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
97 */
98 #define XDC_TWAIT(SC, N) { \
99 (SC)->waitq[(SC)->waitend] = (N); \
100 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \
101 (SC)->nwait++; \
102 }
103
104 /*
105 * XDC_HWAIT: add iorq "N" to head of SC's wait queue
106 */
107 #define XDC_HWAIT(SC, N) { \
108 (SC)->waithead = ((SC)->waithead == 0) ? \
109 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \
110 (SC)->waitq[(SC)->waithead] = (N); \
111 (SC)->nwait++; \
112 }
113
114 /*
115 * XDC_GET_WAITER: gets the first request waiting on the waitq
116 * and removes it (so it can be submitted)
117 */
118 #define XDC_GET_WAITER(XDCSC, RQ) { \
119 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \
120 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \
121 xdcsc->nwait--; \
122 }
123
124 /*
125 * XDC_FREE: add iorq "N" to SC's free list
126 */
127 #define XDC_FREE(SC, N) { \
128 (SC)->freereq[(SC)->nfree++] = (N); \
129 (SC)->reqs[N].mode = 0; \
130 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \
131 }
132
133
134 /*
135 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
136 */
137 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
138
139 /*
140 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
141 */
142 #define XDC_GO(XDC, ADDR) { \
143 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \
144 (ADDR) = ((ADDR) >> 8); \
145 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \
146 (ADDR) = ((ADDR) >> 8); \
147 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \
148 (ADDR) = ((ADDR) >> 8); \
149 (XDC)->xdc_iopbaddr3 = (ADDR); \
150 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \
151 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \
152 }
153
154 /*
155 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
156 * LCV is a counter. If it goes to zero then we timed out.
157 */
158 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \
159 (LCV) = (TIME); \
160 while ((LCV) > 0) { \
161 if ((XDC)->xdc_csr & (BITS)) break; \
162 (LCV) = (LCV) - 1; \
163 DELAY(1); \
164 } \
165 }
166
167 /*
168 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
169 */
170 #define XDC_DONE(SC,RQ,ER) { \
171 if ((RQ) == XD_ERR_FAIL) { \
172 (ER) = (RQ); \
173 } else { \
174 if ((SC)->ndone-- == XDC_SUBWAITLIM) \
175 wakeup(&(SC)->ndone); \
176 (ER) = (SC)->reqs[RQ].errno; \
177 XDC_FREE((SC), (RQ)); \
178 } \
179 }
180
181 /*
182 * XDC_ADVANCE: advance iorq's pointers by a number of sectors
183 */
184 #define XDC_ADVANCE(IORQ, N) { \
185 if (N) { \
186 (IORQ)->sectcnt -= (N); \
187 (IORQ)->blockno += (N); \
188 (IORQ)->dbuf += ((N)*XDFM_BPS); \
189 } \
190 }
191
192 /*
193 * note - addresses you can sleep on:
194 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
195 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
196 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
197 * to drop below XDC_SUBWAITLIM)
198 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
199 */
200
201
202 /*
203 * function prototypes
204 * "xdc_*" functions are internal, all others are external interfaces
205 */
206
207 /* internals */
208 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int));
209 char *xdc_e2str __P((int));
210 int xdc_error __P((struct xdc_softc *, struct xd_iorq *,
211 struct xd_iopb *, int, int));
212 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *));
213 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int));
214 int xdc_piodriver __P((struct xdc_softc *, int, int));
215 int xdc_remove_iorq __P((struct xdc_softc *));
216 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *));
217 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *,
218 struct xd_softc *, int, u_long, int,
219 caddr_t, struct buf *));
220 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int));
221 void xdc_start __P((struct xdc_softc *, int));
222 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *));
223 int xdc_submit_iorq __P((struct xdc_softc *, int, int));
224 void xdc_tick __P((void *));
225 void xdc_xdreset __P((struct xdc_softc *, struct xd_softc *));
226
227 /* machine interrupt hook */
228 int xdcintr __P((void *));
229
230 /* bdevsw, cdevsw */
231 bdev_decl(xd);
232 cdev_decl(xd);
233
234 /* autoconf */
235 static int xdcmatch __P((struct device *, struct cfdata *, void *));
236 static void xdcattach __P((struct device *, struct device *, void *));
237 static int xdc_print __P((void *, const char *name));
238
239 static int xdmatch __P((struct device *, struct cfdata *, void *));
240 static void xdattach __P((struct device *, struct device *, void *));
241 static void xd_init __P((struct xd_softc *));
242
243 static void xddummystrat __P((struct buf *));
244 int xdgetdisklabel __P((struct xd_softc *, void *));
245
246 /*
247 * cfattach's: device driver interface to autoconfig
248 */
249
250 struct cfattach xdc_ca = {
251 sizeof(struct xdc_softc), xdcmatch, xdcattach
252 };
253
254 struct cfattach xd_ca = {
255 sizeof(struct xd_softc), xdmatch, xdattach
256 };
257
258 extern struct cfdriver xd_cd;
259
260 struct xdc_attach_args { /* this is the "aux" args to xdattach */
261 int driveno; /* unit number */
262 char *dvmabuf; /* scratch buffer for reading disk label */
263 int fullmode; /* submit mode */
264 int booting; /* are we booting or not? */
265 };
266
267 /*
268 * dkdriver
269 */
270
271 struct dkdriver xddkdriver = {xdstrategy};
272
273 /*
274 * start: disk label fix code (XXX)
275 */
276
277 static void *xd_labeldata;
278
279 static void
280 xddummystrat(bp)
281 struct buf *bp;
282 {
283 if (bp->b_bcount != XDFM_BPS)
284 panic("xddummystrat");
285 bcopy(xd_labeldata, bp->b_un.b_addr, XDFM_BPS);
286 bp->b_flags |= B_DONE;
287 bp->b_flags &= ~B_BUSY;
288 }
289
290 int
291 xdgetdisklabel(xd, b)
292 struct xd_softc *xd;
293 void *b;
294 {
295 char *err;
296 struct sun_disklabel *sdl;
297
298 /* We already have the label data in `b'; setup for dummy strategy */
299 xd_labeldata = b;
300
301 /* Required parameter for readdisklabel() */
302 xd->sc_dk.dk_label->d_secsize = XDFM_BPS;
303
304 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART),
305 xddummystrat,
306 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel);
307 if (err) {
308 printf("%s: %s\n", xd->sc_dev.dv_xname, err);
309 return(XD_ERR_FAIL);
310 }
311
312 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
313 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block;
314 if (sdl->sl_magic == SUN_DKMAGIC)
315 xd->pcyl = sdl->sl_pcyl;
316 else {
317 printf("%s: WARNING: no `pcyl' in disk label.\n",
318 xd->sc_dev.dv_xname);
319 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders +
320 xd->sc_dk.dk_label->d_acylinders;
321 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
322 xd->sc_dev.dv_xname, xd->pcyl);
323 }
324
325 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders;
326 xd->acyl = xd->sc_dk.dk_label->d_acylinders;
327 xd->nhead = xd->sc_dk.dk_label->d_ntracks;
328 xd->nsect = xd->sc_dk.dk_label->d_nsectors;
329 xd->sectpercyl = xd->nhead * xd->nsect;
330 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by
331 * sun->bsd */
332 return(XD_ERR_AOK);
333 }
334
335 /*
336 * end: disk label fix code (XXX)
337 */
338
339 /*
340 * a u t o c o n f i g f u n c t i o n s
341 */
342
343 /*
344 * xdcmatch: determine if xdc is present or not. we do a
345 * soft reset to detect the xdc.
346 */
347
348 int xdcmatch(parent, cf, aux)
349 struct device *parent;
350 struct cfdata *cf;
351 void *aux;
352 {
353 struct confargs *ca = aux;
354
355 /* No default VME address. */
356 if (ca->ca_paddr == -1)
357 return (0);
358
359 /* Make sure something is there... */
360 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1) == -1)
361 return (0);
362
363 /* Default interrupt priority. */
364 if (ca->ca_intpri == -1)
365 ca->ca_intpri = 2;
366
367 return (1);
368 }
369
370 /*
371 * xdcattach: attach controller
372 */
373 void
374 xdcattach(parent, self, aux)
375 struct device *parent, *self;
376 void *aux;
377 {
378 struct xdc_softc *xdc = (void *) self;
379 struct confargs *ca = aux;
380 struct xdc_attach_args xa;
381 int lcv, rqno, err;
382 struct xd_iopb_ctrl *ctl;
383
384 /* get addressing and intr level stuff from autoconfig and load it
385 * into our xdc_softc. */
386
387 xdc->xdc = (struct xdc *)
388 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc));
389 xdc->bustype = ca->ca_bustype;
390 xdc->ipl = ca->ca_intpri;
391 xdc->vector = ca->ca_intvec;
392
393 for (lcv = 0; lcv < XDC_MAXDEV; lcv++)
394 xdc->sc_drives[lcv] = (struct xd_softc *) 0;
395
396 /* allocate and zero buffers
397 *
398 * note: we simplify the code by allocating the max number of iopbs and
399 * iorq's up front. thus, we avoid linked lists and the costs
400 * associated with them in exchange for wasting a little memory. */
401
402 xdc->iopbase = (struct xd_iopb *)
403 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */
404 bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb));
405 xdc->dvmaiopb = (struct xd_iopb *)
406 dvma_kvtopa(xdc->iopbase, xdc->bustype);
407 xdc->reqs = (struct xd_iorq *)
408 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT);
409 if (xdc->reqs == NULL)
410 panic("xdc malloc");
411 bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq));
412
413 /* init free list, iorq to iopb pointers, and non-zero fields in the
414 * iopb which never change. */
415
416 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
417 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv];
418 xdc->freereq[lcv] = lcv;
419 xdc->iopbase[lcv].fixd = 1; /* always the same */
420 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */
421 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */
422 }
423 xdc->nfree = XDC_MAXIOPB;
424 xdc->nrun = 0;
425 xdc->waithead = xdc->waitend = xdc->nwait = 0;
426 xdc->ndone = 0;
427
428 /* init queue of waiting bufs */
429
430 xdc->sc_wq.b_active = 0;
431 xdc->sc_wq.b_actf = 0;
432 xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf;
433
434 /*
435 * section 7 of the manual tells us how to init the controller:
436 * - read controller parameters (6/0)
437 * - write controller parameters (5/0)
438 */
439
440 /* read controller parameters and insure we have a 753/7053 */
441
442 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
443 if (rqno == XD_ERR_FAIL) {
444 printf(": couldn't read controller params\n");
445 return; /* shouldn't ever happen */
446 }
447 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno];
448 if (ctl->ctype != XDCT_753) {
449 if (xdc->reqs[rqno].errno)
450 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno));
451 printf(": doesn't identify as a 753/7053\n");
452 XDC_DONE(xdc, rqno, err);
453 return;
454 }
455 printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
456 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev);
457 XDC_DONE(xdc, rqno, err);
458
459 /* now write controller parameters (xdc_cmd sets all params for us) */
460
461 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
462 XDC_DONE(xdc, rqno, err);
463 if (err) {
464 printf("%s: controller config error: %s\n",
465 xdc->sc_dev.dv_xname, xdc_e2str(err));
466 return;
467 }
468
469 /* link in interrupt with higher level software */
470 isr_add_vectored(xdcintr, (void *)xdc,
471 ca->ca_intpri, ca->ca_intvec);
472 evcnt_attach(&xdc->sc_dev, "intr", &xdc->sc_intrcnt);
473
474 /* now we must look for disks using autoconfig */
475 xa.booting = 1;
476 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
477 (void) config_found(self, (void *) &xa, xdc_print);
478
479 /* start the watchdog clock */
480 timeout(xdc_tick, xdc, XDC_TICKCNT);
481 }
482
483 int
484 xdc_print(aux, name)
485 void *aux;
486 const char *name;
487 {
488 struct xdc_attach_args *xa = aux;
489
490 if (name != NULL)
491 printf("%s: ", name);
492
493 if (xa->driveno != -1)
494 printf(" drive %d", xa->driveno);
495
496 return UNCONF;
497 }
498
499 /*
500 * xdmatch: probe for disk.
501 *
502 * note: we almost always say disk is present. this allows us to
503 * spin up and configure a disk after the system is booted (we can
504 * call xdattach!). Also, wire down the relationship between the
505 * xd* and xdc* devices, to simplify boot device identification.
506 */
507 int
508 xdmatch(parent, cf, aux)
509 struct device *parent;
510 struct cfdata *cf;
511 void *aux;
512 {
513 struct xdc_attach_args *xa = aux;
514 int xd_unit;
515
516 /* Match only on the "wired-down" controller+disk. */
517 xd_unit = parent->dv_unit * 2 + xa->driveno;
518 if (cf->cf_unit != xd_unit)
519 return (0);
520
521 return (1);
522 }
523
524 /*
525 * xdattach: attach a disk.
526 */
527 void
528 xdattach(parent, self, aux)
529 struct device *parent, *self;
530 void *aux;
531
532 {
533 struct xd_softc *xd = (void *) self;
534 struct xdc_softc *xdc = (void *) parent;
535 struct xdc_attach_args *xa = aux;
536
537 printf("\n");
538
539 /*
540 * Always re-initialize the disk structure. We want statistics
541 * to start with a clean slate.
542 */
543 bzero(&xd->sc_dk, sizeof(xd->sc_dk));
544 xd->sc_dk.dk_driver = &xddkdriver;
545 xd->sc_dk.dk_name = xd->sc_dev.dv_xname;
546
547 xd->state = XD_DRIVE_UNKNOWN; /* to start */
548 xd->flags = 0;
549 xd->parent = xdc;
550
551 xd->xd_drive = xa->driveno;
552 xdc->sc_drives[xa->driveno] = xd;
553
554 /* Do init work common to attach and open. */
555 xd_init(xd);
556 dk_establish(&xd->sc_dk, &xd->sc_dev);
557 }
558
559 /*
560 * end of autoconfig functions
561 */
562
563 /*
564 * Initialize a disk. This can be called from both autoconf and
565 * also from xdopen/xdstrategy.
566 */
567 static void
568 xd_init(xd)
569 struct xd_softc *xd;
570 {
571 struct xdc_softc *xdc;
572 struct dkbad *dkb;
573 struct xd_iopb_drive *driopb;
574 void *dvmabuf;
575 int rqno, err, spt, mb, blk, lcv, fullmode, newstate;
576 extern int cold;
577
578 xdc = xd->parent;
579 xd->state = XD_DRIVE_ATTACHING;
580 newstate = XD_DRIVE_UNKNOWN;
581 fullmode = (cold) ? XD_SUB_POLL : XD_SUB_WAIT;
582 dvmabuf = dvma_malloc(XDFM_BPS);
583
584 /* first try and reset the drive */
585 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fullmode);
586 XDC_DONE(xdc, rqno, err);
587 if (err == XD_ERR_NRDY) {
588 printf("%s: drive %d: off-line\n",
589 xd->sc_dev.dv_xname, xd->xd_drive);
590 goto done;
591 }
592 if (err) {
593 printf("%s: ERROR 0x%02x (%s)\n",
594 xd->sc_dev.dv_xname, err, xdc_e2str(err));
595 goto done;
596 }
597 printf("%s: drive %d ready\n",
598 xd->sc_dev.dv_xname, xd->xd_drive);
599
600 /* now set format parameters */
601
602 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive,
603 0, 0, 0, fullmode);
604 XDC_DONE(xdc, rqno, err);
605 if (err) {
606 printf("%s: write format parameters failed: %s\n",
607 xd->sc_dev.dv_xname, xdc_e2str(err));
608 goto done;
609 }
610
611 /* get drive parameters */
612 spt = 0;
613 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive,
614 0, 0, 0, fullmode);
615 if (rqno != XD_ERR_FAIL) {
616 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno];
617 spt = driopb->sectpertrk;
618 }
619 XDC_DONE(xdc, rqno, err);
620 if (err) {
621 printf("%s: read drive parameters failed: %s\n",
622 xd->sc_dev.dv_xname, xdc_e2str(err));
623 goto done;
624 }
625
626 /*
627 * now set drive parameters (to semi-bogus values) so we can read the
628 * disk label.
629 */
630 xd->pcyl = xd->ncyl = 1;
631 xd->acyl = 0;
632 xd->nhead = 1;
633 xd->nsect = 1;
634 xd->sectpercyl = 1;
635 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */
636 xd->dkb.bt_bad[lcv].bt_cyl =
637 xd->dkb.bt_bad[lcv].bt_trksec = 0xffff;
638 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive,
639 0, 0, 0, fullmode);
640 XDC_DONE(xdc, rqno, err);
641 if (err) {
642 printf("%s: write drive parameters failed: %s\n",
643 xd->sc_dev.dv_xname, xdc_e2str(err));
644 goto done;
645 }
646
647 /* read disk label */
648 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive,
649 0, 1, dvmabuf, fullmode);
650 XDC_DONE(xdc, rqno, err);
651 if (err) {
652 printf("%s: reading disk label failed: %s\n",
653 xd->sc_dev.dv_xname, xdc_e2str(err));
654 goto done;
655 }
656 newstate = XD_DRIVE_NOLABEL;
657
658 xd->hw_spt = spt;
659 /* Attach the disk: must be before getdisklabel to malloc label */
660 disk_attach(&xd->sc_dk);
661
662 if (xdgetdisklabel(xd, dvmabuf) != XD_ERR_AOK)
663 goto done;
664
665 /* inform the user of what is up */
666 printf("%s: <%s>, pcyl %d, hw_spt %d\n",
667 xd->sc_dev.dv_xname,
668 (char *)dvmabuf, xd->pcyl, spt);
669 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
670 printf("%s: %dMB, %d cyl, %d head, %d sec\n",
671 xd->sc_dev.dv_xname, mb,
672 xd->ncyl, xd->nhead, xd->nsect);
673
674 /* now set the real drive parameters! */
675 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive,
676 0, 0, 0, fullmode);
677 XDC_DONE(xdc, rqno, err);
678 if (err) {
679 printf("%s: write real drive parameters failed: %s\n",
680 xd->sc_dev.dv_xname, xdc_e2str(err));
681 goto done;
682 }
683 newstate = XD_DRIVE_ONLINE;
684
685 /*
686 * read bad144 table. this table resides on the first sector of the
687 * last track of the disk (i.e. second cyl of "acyl" area).
688 */
689 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */
690 (xd->nhead - 1) * xd->nsect; /* last head */
691 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive,
692 blk, 1, dvmabuf, fullmode);
693 XDC_DONE(xdc, rqno, err);
694 if (err) {
695 printf("%s: reading bad144 failed: %s\n",
696 xd->sc_dev.dv_xname, xdc_e2str(err));
697 goto done;
698 }
699
700 /* check dkbad for sanity */
701 dkb = (struct dkbad *) dvmabuf;
702 for (lcv = 0; lcv < 126; lcv++) {
703 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
704 dkb->bt_bad[lcv].bt_cyl == 0) &&
705 dkb->bt_bad[lcv].bt_trksec == 0xffff)
706 continue; /* blank */
707 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl)
708 break;
709 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead)
710 break;
711 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect)
712 break;
713 }
714 if (lcv != 126) {
715 printf("%s: warning: invalid bad144 sector!\n",
716 xd->sc_dev.dv_xname);
717 } else {
718 bcopy(dvmabuf, &xd->dkb, XDFM_BPS);
719 }
720
721 done:
722 xd->state = newstate;
723 dvma_free(dvmabuf, XDFM_BPS);
724 }
725
726 /*
727 * { b , c } d e v s w f u n c t i o n s
728 */
729
730 /*
731 * xdclose: close device
732 */
733 int
734 xdclose(dev, flag, fmt, p)
735 dev_t dev;
736 int flag, fmt;
737 struct proc *p;
738 {
739 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)];
740 int part = DISKPART(dev);
741
742 /* clear mask bits */
743
744 switch (fmt) {
745 case S_IFCHR:
746 xd->sc_dk.dk_copenmask &= ~(1 << part);
747 break;
748 case S_IFBLK:
749 xd->sc_dk.dk_bopenmask &= ~(1 << part);
750 break;
751 }
752 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
753
754 return 0;
755 }
756
757 /*
758 * xddump: crash dump system
759 */
760 int
761 xddump(dev, blkno, va, sz)
762 dev_t dev;
763 daddr_t blkno;
764 caddr_t va;
765 size_t sz;
766 {
767 int unit, part;
768 struct xd_softc *xd;
769
770 unit = DISKUNIT(dev);
771 if (unit >= xd_cd.cd_ndevs)
772 return ENXIO;
773 part = DISKPART(dev);
774
775 xd = xd_cd.cd_devs[unit];
776
777 printf("%s%c: crash dump not supported (yet)\n",
778 xd->sc_dev.dv_xname, 'a' + part);
779
780 return ENXIO;
781
782 /* outline: globals: "dumplo" == sector number of partition to start
783 * dump at (convert to physical sector with partition table)
784 * "dumpsize" == size of dump in clicks "physmem" == size of physical
785 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
786 * physmem)
787 *
788 * dump a copy of physical memory to the dump device starting at sector
789 * "dumplo" in the swap partition (make sure > 0). map in pages as
790 * we go. use polled I/O.
791 *
792 * XXX how to handle NON_CONTIG?
793 */
794 }
795
796 /*
797 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks.
798 */
799 int
800 xdioctl(dev, command, addr, flag, p)
801 dev_t dev;
802 u_long command;
803 caddr_t addr;
804 int flag;
805 struct proc *p;
806
807 {
808 struct xd_softc *xd;
809 struct xd_iocmd *xio;
810 int error, s, unit;
811
812 unit = DISKUNIT(dev);
813
814 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
815 return (ENXIO);
816
817 /* switch on ioctl type */
818
819 switch (command) {
820 case DIOCSBAD: /* set bad144 info */
821 if ((flag & FWRITE) == 0)
822 return EBADF;
823 s = splbio();
824 bcopy(addr, &xd->dkb, sizeof(xd->dkb));
825 splx(s);
826 return 0;
827
828 case DIOCGDINFO: /* get disk label */
829 bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel));
830 return 0;
831
832 case DIOCGPART: /* get partition info */
833 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label;
834 ((struct partinfo *) addr)->part =
835 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
836 return 0;
837
838 case DIOCSDINFO: /* set disk label */
839 if ((flag & FWRITE) == 0)
840 return EBADF;
841 error = setdisklabel(xd->sc_dk.dk_label,
842 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0,
843 xd->sc_dk.dk_cpulabel);
844 if (error == 0) {
845 if (xd->state == XD_DRIVE_NOLABEL)
846 xd->state = XD_DRIVE_ONLINE;
847 }
848 return error;
849
850 case DIOCWLABEL: /* change write status of disk label */
851 if ((flag & FWRITE) == 0)
852 return EBADF;
853 if (*(int *) addr)
854 xd->flags |= XD_WLABEL;
855 else
856 xd->flags &= ~XD_WLABEL;
857 return 0;
858
859 case DIOCWDINFO: /* write disk label */
860 if ((flag & FWRITE) == 0)
861 return EBADF;
862 error = setdisklabel(xd->sc_dk.dk_label,
863 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0,
864 xd->sc_dk.dk_cpulabel);
865 if (error == 0) {
866 if (xd->state == XD_DRIVE_NOLABEL)
867 xd->state = XD_DRIVE_ONLINE;
868
869 /* Simulate opening partition 0 so write succeeds. */
870 xd->sc_dk.dk_openmask |= (1 << 0);
871 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
872 xdstrategy, xd->sc_dk.dk_label,
873 xd->sc_dk.dk_cpulabel);
874 xd->sc_dk.dk_openmask =
875 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
876 }
877 return error;
878
879 case DIOSXDCMD:
880 xio = (struct xd_iocmd *) addr;
881 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
882 return (error);
883 return (xdc_ioctlcmd(xd, dev, xio));
884
885 default:
886 return ENOTTY;
887 }
888 }
889
890 /*
891 * xdopen: open drive
892 */
893 int
894 xdopen(dev, flag, fmt, p)
895 dev_t dev;
896 int flag, fmt;
897 struct proc *p;
898 {
899 int err, unit, part, s;
900 struct xd_softc *xd;
901
902 /* first, could it be a valid target? */
903 unit = DISKUNIT(dev);
904 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
905 return (ENXIO);
906 part = DISKPART(dev);
907 err = 0;
908
909 /*
910 * If some other processing is doing init, sleep.
911 */
912 s = splbio();
913 while (xd->state == XD_DRIVE_ATTACHING) {
914 if (tsleep(&xd->state, PRIBIO, "xdopen", 0)) {
915 err = EINTR;
916 goto done;
917 }
918 }
919 /* Do we need to init the drive? */
920 if (xd->state == XD_DRIVE_UNKNOWN) {
921 xd_init(xd);
922 wakeup(&xd->state);
923 }
924 /* Was the init successful? */
925 if (xd->state == XD_DRIVE_UNKNOWN) {
926 err = EIO;
927 goto done;
928 }
929
930 /* check for partition */
931 if (part != RAW_PART &&
932 (part >= xd->sc_dk.dk_label->d_npartitions ||
933 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
934 err = ENXIO;
935 goto done;
936 }
937
938 /* set open masks */
939 switch (fmt) {
940 case S_IFCHR:
941 xd->sc_dk.dk_copenmask |= (1 << part);
942 break;
943 case S_IFBLK:
944 xd->sc_dk.dk_bopenmask |= (1 << part);
945 break;
946 }
947 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
948
949 done:
950 splx(s);
951 return (err);
952 }
953
954 int
955 xdread(dev, uio, flags)
956 dev_t dev;
957 struct uio *uio;
958 int flags;
959 {
960
961 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio));
962 }
963
964 int
965 xdwrite(dev, uio, flags)
966 dev_t dev;
967 struct uio *uio;
968 int flags;
969 {
970
971 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio));
972 }
973
974
975 /*
976 * xdsize: return size of a partition for a dump
977 */
978 int
979 xdsize(dev)
980 dev_t dev;
981
982 {
983 struct xd_softc *xdsc;
984 int unit, part, size, omask;
985
986 /* valid unit? */
987 unit = DISKUNIT(dev);
988 if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL)
989 return (-1);
990
991 part = DISKPART(dev);
992 omask = xdsc->sc_dk.dk_openmask & (1 << part);
993
994 if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0)
995 return (-1);
996
997 /* do it */
998 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
999 size = -1; /* only give valid size for swap partitions */
1000 else
1001 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size *
1002 (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1003 if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0)
1004 return (-1);
1005 return (size);
1006 }
1007
1008 /*
1009 * xdstrategy: buffering system interface to xd.
1010 */
1011 void
1012 xdstrategy(bp)
1013 struct buf *bp;
1014
1015 {
1016 struct xd_softc *xd;
1017 struct xdc_softc *parent;
1018 struct buf *wq;
1019 int s, unit;
1020
1021 unit = DISKUNIT(bp->b_dev);
1022
1023 /* check for live device */
1024
1025 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 ||
1026 bp->b_blkno < 0 ||
1027 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) {
1028 bp->b_error = EINVAL;
1029 goto bad;
1030 }
1031
1032 /* There should always be an open first. */
1033 if (xd->state == XD_DRIVE_UNKNOWN) {
1034 bp->b_error = EIO;
1035 goto bad;
1036 }
1037
1038 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1039 /* no I/O to unlabeled disks, unless raw partition */
1040 bp->b_error = EIO;
1041 goto bad;
1042 }
1043 /* short circuit zero length request */
1044
1045 if (bp->b_bcount == 0)
1046 goto done;
1047
1048 /* check bounds with label (disksubr.c). Determine the size of the
1049 * transfer, and make sure it is within the boundaries of the
1050 * partition. Adjust transfer if needed, and signal errors or early
1051 * completion. */
1052
1053 if (bounds_check_with_label(bp, xd->sc_dk.dk_label,
1054 (xd->flags & XD_WLABEL) != 0) <= 0)
1055 goto done;
1056
1057 /*
1058 * now we know we have a valid buf structure that we need to do I/O
1059 * on.
1060 *
1061 * note that we don't disksort because the controller has a sorting
1062 * algorithm built into the hardware.
1063 */
1064
1065 s = splbio(); /* protect the queues */
1066
1067 /* first, give jobs in front of us a chance */
1068 parent = xd->parent;
1069 while (parent->nfree > 0 && parent->sc_wq.b_actf)
1070 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
1071 break;
1072
1073 /*
1074 * if there are no free iorq's, then we just queue and return. the
1075 * buffs will get picked up later by xdcintr().
1076 */
1077 if (parent->nfree == 0) {
1078 wq = &xd->parent->sc_wq;
1079 bp->b_actf = 0;
1080 bp->b_actb = wq->b_actb;
1081 *wq->b_actb = bp;
1082 wq->b_actb = &bp->b_actf;
1083 splx(s);
1084 return;
1085 }
1086
1087 /* now we have free iopb's and we are at splbio... start 'em up */
1088 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
1089 return;
1090 }
1091
1092 /* done! */
1093
1094 splx(s);
1095 return;
1096
1097 bad: /* tells upper layers we have an error */
1098 bp->b_flags |= B_ERROR;
1099 done: /* tells upper layers we are done with this
1100 * buf */
1101 bp->b_resid = bp->b_bcount;
1102 biodone(bp);
1103 }
1104 /*
1105 * end of {b,c}devsw functions
1106 */
1107
1108 /*
1109 * i n t e r r u p t f u n c t i o n
1110 *
1111 * xdcintr: hardware interrupt.
1112 */
1113 int
1114 xdcintr(v)
1115 void *v;
1116
1117 {
1118 struct xdc_softc *xdcsc = v;
1119
1120 /* kick the event counter */
1121 xdcsc->sc_intrcnt.ev_count++;
1122
1123 /* remove as many done IOPBs as possible */
1124 xdc_remove_iorq(xdcsc);
1125
1126 /* start any iorq's already waiting */
1127 xdc_start(xdcsc, XDC_MAXIOPB);
1128
1129 /* fill up any remaining iorq's with queue'd buffers */
1130 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1131 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1132 break;
1133
1134 return (1);
1135 }
1136 /*
1137 * end of interrupt function
1138 */
1139
1140 /*
1141 * i n t e r n a l f u n c t i o n s
1142 */
1143
1144 /*
1145 * xdc_rqinit: fill out the fields of an I/O request
1146 */
1147
1148 inline void
1149 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp)
1150 struct xd_iorq *rq;
1151 struct xdc_softc *xdc;
1152 struct xd_softc *xd;
1153 int md;
1154 u_long blk;
1155 int cnt;
1156 caddr_t db;
1157 struct buf *bp;
1158 {
1159 rq->xdc = xdc;
1160 rq->xd = xd;
1161 rq->ttl = XDC_MAXTTL + 10;
1162 rq->mode = md;
1163 rq->tries = rq->errno = rq->lasterror = 0;
1164 rq->blockno = blk;
1165 rq->sectcnt = cnt;
1166 rq->dbuf = rq->dbufbase = db;
1167 rq->buf = bp;
1168 }
1169
1170 /*
1171 * xdc_rqtopb: load up an IOPB based on an iorq
1172 */
1173 void
1174 xdc_rqtopb(iorq, iopb, cmd, subfun)
1175 struct xd_iorq *iorq;
1176 struct xd_iopb *iopb;
1177 int cmd, subfun;
1178
1179 {
1180 u_long block, dp;
1181
1182 /* standard stuff */
1183
1184 iopb->errs = iopb->done = 0;
1185 iopb->comm = cmd;
1186 iopb->errno = iopb->status = 0;
1187 iopb->subfun = subfun;
1188 if (iorq->xd)
1189 iopb->unit = iorq->xd->xd_drive;
1190 else
1191 iopb->unit = 0;
1192
1193 /* check for alternate IOPB format */
1194
1195 if (cmd == XDCMD_WRP) {
1196 switch (subfun) {
1197 case XDFUN_CTL:{
1198 struct xd_iopb_ctrl *ctrl =
1199 (struct xd_iopb_ctrl *) iopb;
1200 iopb->lll = 0;
1201 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1202 ? 0
1203 : iorq->xdc->ipl;
1204 ctrl->param_a = XDPA_TMOD | XDPA_DACF;
1205 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC;
1206 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR |
1207 XDPC_RBC | XDPC_ECC2;
1208 ctrl->throttle = XDC_THROTTLE;
1209 #ifdef sparc
1210 if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300)
1211 ctrl->delay = XDC_DELAY_4_300;
1212 else
1213 ctrl->delay = XDC_DELAY_SPARC;
1214 #endif
1215 #ifdef sun3
1216 ctrl->delay = XDC_DELAY_SUN3;
1217 #endif
1218 break;
1219 }
1220 case XDFUN_DRV:{
1221 struct xd_iopb_drive *drv =
1222 (struct xd_iopb_drive *)iopb;
1223 /* we assume that the disk label has the right
1224 * info */
1225 if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1226 drv->dparam_ipl = (XDC_DPARAM << 3);
1227 else
1228 drv->dparam_ipl = (XDC_DPARAM << 3) |
1229 iorq->xdc->ipl;
1230 drv->maxsect = iorq->xd->nsect - 1;
1231 drv->maxsector = drv->maxsect;
1232 /* note: maxsector != maxsect only if you are
1233 * doing cyl sparing */
1234 drv->headoff = 0;
1235 drv->maxcyl = iorq->xd->pcyl - 1;
1236 drv->maxhead = iorq->xd->nhead - 1;
1237 break;
1238 }
1239 case XDFUN_FMT:{
1240 struct xd_iopb_format *form =
1241 (struct xd_iopb_format *) iopb;
1242 if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1243 form->interleave_ipl = (XDC_INTERLEAVE << 3);
1244 else
1245 form->interleave_ipl = (XDC_INTERLEAVE << 3) |
1246 iorq->xdc->ipl;
1247 form->field1 = XDFM_FIELD1;
1248 form->field2 = XDFM_FIELD2;
1249 form->field3 = XDFM_FIELD3;
1250 form->field4 = XDFM_FIELD4;
1251 form->bytespersec = XDFM_BPS;
1252 form->field6 = XDFM_FIELD6;
1253 form->field7 = XDFM_FIELD7;
1254 break;
1255 }
1256 }
1257 } else {
1258
1259 /* normal IOPB case (harmless to RDP command) */
1260
1261 iopb->lll = 0;
1262 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1263 ? 0
1264 : iorq->xdc->ipl;
1265 iopb->sectcnt = iorq->sectcnt;
1266 block = iorq->blockno;
1267 if (iorq->xd == NULL || block == 0) {
1268 iopb->sectno = iopb->headno = iopb->cylno = 0;
1269 } else {
1270 iopb->sectno = block % iorq->xd->nsect;
1271 block = block / iorq->xd->nsect;
1272 iopb->headno = block % iorq->xd->nhead;
1273 block = block / iorq->xd->nhead;
1274 iopb->cylno = block;
1275 }
1276 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 :
1277 dvma_kvtopa(iorq->dbuf, iorq->xdc->bustype);
1278 iopb->addrmod = XDC_ADDRMOD;
1279 }
1280 }
1281
1282 /*
1283 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno.
1284 * If you've already got an IORQ, you can call submit directly (currently
1285 * there is no need to do this). NORM requests are handled seperately.
1286 */
1287 int
1288 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode)
1289 struct xdc_softc *xdcsc;
1290 int cmd, subfn, unit, block, scnt;
1291 char *dptr;
1292 int fullmode;
1293
1294 {
1295 struct xd_iorq *iorq;
1296 struct xd_iopb *iopb;
1297 int rqno, retry;
1298 int submode = XD_STATE(fullmode);
1299
1300 /* get iorq/iopb */
1301 switch (submode) {
1302 case XD_SUB_POLL:
1303 while (xdcsc->nfree == 0) {
1304 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK)
1305 return (XD_ERR_FAIL);
1306 }
1307 break;
1308 case XD_SUB_WAIT:
1309 retry = 1;
1310 while (retry) {
1311 while (xdcsc->nfree == 0) {
1312 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0))
1313 return (XD_ERR_FAIL);
1314 }
1315 while (xdcsc->ndone > XDC_SUBWAITLIM) {
1316 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0))
1317 return (XD_ERR_FAIL);
1318 }
1319 if (xdcsc->nfree)
1320 retry = 0; /* got it */
1321 }
1322 break;
1323 default:
1324 return (XD_ERR_FAIL); /* illegal */
1325 }
1326 if (xdcsc->nfree == 0)
1327 panic("xdcmd nfree");
1328 rqno = XDC_RQALLOC(xdcsc);
1329 iorq = &xdcsc->reqs[rqno];
1330 iopb = iorq->iopb;
1331
1332
1333 /* init iorq/iopb */
1334 xdc_rqinit(iorq, xdcsc,
1335 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit],
1336 fullmode, block, scnt, dptr, NULL);
1337
1338 /* load IOPB from iorq */
1339 xdc_rqtopb(iorq, iopb, cmd, subfn);
1340
1341 /* submit it for processing */
1342 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */
1343
1344 return (rqno);
1345 }
1346
1347 /*
1348 * xdc_startbuf
1349 * start a buffer running, assumes nfree > 0
1350 */
1351 int
1352 xdc_startbuf(xdcsc, xdsc, bp)
1353 struct xdc_softc *xdcsc;
1354 struct xd_softc *xdsc;
1355 struct buf *bp;
1356
1357 {
1358 int rqno, partno;
1359 struct xd_iorq *iorq;
1360 struct xd_iopb *iopb;
1361 struct buf *wq;
1362 u_long block;
1363 caddr_t dbuf;
1364
1365 if (!xdcsc->nfree)
1366 panic("xdc_startbuf free");
1367 rqno = XDC_RQALLOC(xdcsc);
1368 iorq = &xdcsc->reqs[rqno];
1369 iopb = iorq->iopb;
1370
1371 /* get buf */
1372
1373 if (bp == NULL) {
1374 bp = xdcsc->sc_wq.b_actf;
1375 if (!bp)
1376 panic("xdc_startbuf bp");
1377 wq = bp->b_actf;
1378 if (wq)
1379 wq->b_actb = bp->b_actb;
1380 else
1381 xdcsc->sc_wq.b_actb = bp->b_actb;
1382 *bp->b_actb = wq;
1383 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)];
1384 }
1385 partno = DISKPART(bp->b_dev);
1386 #ifdef XDC_DEBUG
1387 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname,
1388 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1389 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n",
1390 bp->b_bcount, bp->b_data);
1391 #endif
1392
1393 /*
1394 * load request. we have to calculate the correct block number based
1395 * on partition info.
1396 *
1397 * also, note that there are two kinds of buf structures, those with
1398 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is
1399 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users'
1400 * buffer which has already been mapped into DVMA space. (Not on sun3)
1401 * However, if B_PHYS is not set, then the buffer is a normal system
1402 * buffer which does *not* live in DVMA space. In that case we call
1403 * dvma_mapin to map it into DVMA space so we can do the DMA to it.
1404 *
1405 * in cases where we do a dvma_mapin, note that iorq points to the buffer
1406 * as mapped into DVMA space, where as the bp->b_data points to its
1407 * non-DVMA mapping.
1408 *
1409 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped
1410 * into dvma space, only that it was remapped into the kernel.
1411 * We ALWAYS have to remap the kernel buf into DVMA space.
1412 * (It is done inexpensively, using whole segments!)
1413 */
1414
1415 block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
1416 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset);
1417
1418 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0);
1419 if (dbuf == NULL) { /* out of DVMA space */
1420 printf("%s: warning: out of DVMA space\n",
1421 xdcsc->sc_dev.dv_xname);
1422 XDC_FREE(xdcsc, rqno);
1423 wq = &xdcsc->sc_wq; /* put at end of queue */
1424 bp->b_actf = 0;
1425 bp->b_actb = wq->b_actb;
1426 *wq->b_actb = bp;
1427 wq->b_actb = &bp->b_actf;
1428 return (XD_ERR_FAIL); /* XXX: need some sort of
1429 * call-back scheme here? */
1430 }
1431
1432 /* init iorq and load iopb from it */
1433
1434 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block,
1435 bp->b_bcount / XDFM_BPS, dbuf, bp);
1436
1437 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0);
1438
1439 /* Instrumentation. */
1440 disk_busy(&xdsc->sc_dk);
1441
1442 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
1443
1444 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM);
1445 return (XD_ERR_AOK);
1446 }
1447
1448
1449 /*
1450 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK
1451 * if ok. if it fail returns an error code. type is XD_SUB_*.
1452 *
1453 * note: caller frees iorq in all cases except NORM
1454 *
1455 * return value:
1456 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
1457 * WAIT: XD_AOK (success), <error-code> (failed)
1458 * POLL: <same as WAIT>
1459 * NOQ : <same as NORM>
1460 *
1461 * there are three sources for i/o requests:
1462 * [1] xdstrategy: normal block I/O, using "struct buf" system.
1463 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1464 * [3] open/ioctl: these are I/O requests done in the context of a process,
1465 * and the process should block until they are done.
1466 *
1467 * software state is stored in the iorq structure. each iorq has an
1468 * iopb structure. the hardware understands the iopb structure.
1469 * every command must go through an iopb. a 7053 can only handle
1470 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in
1471 * DVMA space at boot up time. what happens if we run out of iopb's?
1472 * for i/o type [1], the buffers are queued at the "buff" layer and
1473 * picked up later by the interrupt routine. for case [2] the
1474 * programmed i/o driver is called with a special flag that says
1475 * return when one iopb is free. for case [3] the process can sleep
1476 * on the iorq free list until some iopbs are avaliable.
1477 */
1478
1479
1480 int
1481 xdc_submit_iorq(xdcsc, iorqno, type)
1482 struct xdc_softc *xdcsc;
1483 int iorqno;
1484 int type;
1485
1486 {
1487 u_long iopbaddr;
1488 struct xd_iorq *iorq = &xdcsc->reqs[iorqno];
1489
1490 #ifdef XDC_DEBUG
1491 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname,
1492 iorqno, type);
1493 #endif
1494
1495 /* first check and see if controller is busy */
1496 if (xdcsc->xdc->xdc_csr & XDC_ADDING) {
1497 #ifdef XDC_DEBUG
1498 printf("xdc_submit_iorq: XDC not ready (ADDING)\n");
1499 #endif
1500 if (type == XD_SUB_NOQ)
1501 return (XD_ERR_FAIL); /* failed */
1502 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */
1503 switch (type) {
1504 case XD_SUB_NORM:
1505 return XD_ERR_AOK; /* success */
1506 case XD_SUB_WAIT:
1507 while (iorq->iopb->done == 0) {
1508 sleep(iorq, PRIBIO);
1509 }
1510 return (iorq->errno);
1511 case XD_SUB_POLL:
1512 return (xdc_piodriver(xdcsc, iorqno, 0));
1513 default:
1514 panic("xdc_submit_iorq adding");
1515 }
1516 }
1517 #ifdef XDC_DEBUG
1518 {
1519 u_char *rio = (u_char *) iorq->iopb;
1520 int sz = sizeof(struct xd_iopb), lcv;
1521 printf("%s: aio #%d [",
1522 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs);
1523 for (lcv = 0; lcv < sz; lcv++)
1524 printf(" %02x", rio[lcv]);
1525 printf("]\n");
1526 }
1527 #endif /* XDC_DEBUG */
1528
1529 /* controller not busy, start command */
1530 iopbaddr = dvma_kvtopa(iorq->iopb, xdcsc->bustype);
1531 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */
1532 xdcsc->nrun++;
1533 /* command now running, wrap it up */
1534 switch (type) {
1535 case XD_SUB_NORM:
1536 case XD_SUB_NOQ:
1537 return (XD_ERR_AOK); /* success */
1538 case XD_SUB_WAIT:
1539 while (iorq->iopb->done == 0) {
1540 sleep(iorq, PRIBIO);
1541 }
1542 return (iorq->errno);
1543 case XD_SUB_POLL:
1544 return (xdc_piodriver(xdcsc, iorqno, 0));
1545 default:
1546 panic("xdc_submit_iorq wrap up");
1547 }
1548 panic("xdc_submit_iorq");
1549 return 0; /* not reached */
1550 }
1551
1552
1553 /*
1554 * xdc_piodriver
1555 *
1556 * programmed i/o driver. this function takes over the computer
1557 * and drains off all i/o requests. it returns the status of the iorq
1558 * the caller is interesting in. if freeone is true, then it returns
1559 * when there is a free iorq.
1560 */
1561 int
1562 xdc_piodriver(xdcsc, iorqno, freeone)
1563 struct xdc_softc *xdcsc;
1564 char iorqno;
1565 int freeone;
1566
1567 {
1568 int nreset = 0;
1569 int retval = 0;
1570 u_long count;
1571 struct xdc *xdc = xdcsc->xdc;
1572 #ifdef XDC_DEBUG
1573 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname,
1574 iorqno, freeone);
1575 #endif
1576
1577 while (xdcsc->nwait || xdcsc->nrun) {
1578 #ifdef XDC_DEBUG
1579 printf("xdc_piodriver: wait=%d, run=%d\n",
1580 xdcsc->nwait, xdcsc->nrun);
1581 #endif
1582 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR));
1583 #ifdef XDC_DEBUG
1584 printf("xdc_piodriver: done wait with count = %d\n", count);
1585 #endif
1586 /* we expect some progress soon */
1587 if (count == 0 && nreset >= 2) {
1588 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0);
1589 #ifdef XDC_DEBUG
1590 printf("xdc_piodriver: timeout\n");
1591 #endif
1592 return (XD_ERR_FAIL);
1593 }
1594 if (count == 0) {
1595 if (xdc_reset(xdcsc, 0,
1596 (nreset++ == 0) ? XD_RSET_NONE : iorqno,
1597 XD_ERR_FAIL,
1598 0) == XD_ERR_FAIL)
1599 return (XD_ERR_FAIL); /* flushes all but POLL
1600 * requests, resets */
1601 continue;
1602 }
1603 xdc_remove_iorq(xdcsc); /* could resubmit request */
1604 if (freeone) {
1605 if (xdcsc->nrun < XDC_MAXIOPB) {
1606 #ifdef XDC_DEBUG
1607 printf("xdc_piodriver: done: one free\n");
1608 #endif
1609 return (XD_ERR_AOK);
1610 }
1611 continue; /* don't xdc_start */
1612 }
1613 xdc_start(xdcsc, XDC_MAXIOPB);
1614 }
1615
1616 /* get return value */
1617
1618 retval = xdcsc->reqs[iorqno].errno;
1619
1620 #ifdef XDC_DEBUG
1621 printf("xdc_piodriver: done, retval = 0x%x (%s)\n",
1622 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno));
1623 #endif
1624
1625 /* now that we've drained everything, start up any bufs that have
1626 * queued */
1627
1628 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1629 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1630 break;
1631
1632 return (retval);
1633 }
1634
1635 /*
1636 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset.
1637 * we steal iopb[0] for this, but we put it back when we are done.
1638 */
1639 void
1640 xdc_xdreset(xdcsc, xdsc)
1641 struct xdc_softc *xdcsc;
1642 struct xd_softc *xdsc;
1643
1644 {
1645 struct xd_iopb tmpiopb;
1646 u_long addr;
1647 int del;
1648 bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb));
1649 bzero(xdcsc->iopbase, sizeof(tmpiopb));
1650 xdcsc->iopbase->comm = XDCMD_RST;
1651 xdcsc->iopbase->unit = xdsc->xd_drive;
1652 addr = (u_long) xdcsc->dvmaiopb;
1653 XDC_GO(xdcsc->xdc, addr); /* go! */
1654 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB);
1655 if (del <= 0 || xdcsc->iopbase->errs) {
1656 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname,
1657 xdc_e2str(xdcsc->iopbase->errno));
1658 xdcsc->xdc->xdc_csr = XDC_RESET;
1659 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1660 if (del <= 0)
1661 panic("xdc_reset");
1662 } else {
1663 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */
1664 }
1665 bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb));
1666 }
1667
1668
1669 /*
1670 * xdc_reset: reset everything: requests are marked as errors except
1671 * a polled request (which is resubmitted)
1672 */
1673 int
1674 xdc_reset(xdcsc, quiet, blastmode, error, xdsc)
1675 struct xdc_softc *xdcsc;
1676 int quiet, blastmode, error;
1677 struct xd_softc *xdsc;
1678
1679 {
1680 int del = 0, lcv, retval = XD_ERR_AOK;
1681 int oldfree = xdcsc->nfree;
1682 struct xd_iorq *iorq;
1683
1684 /* soft reset hardware */
1685
1686 if (!quiet)
1687 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname);
1688 xdcsc->xdc->xdc_csr = XDC_RESET;
1689 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1690 if (del <= 0) {
1691 blastmode = XD_RSET_ALL; /* dead, flush all requests */
1692 retval = XD_ERR_FAIL;
1693 }
1694 if (xdsc)
1695 xdc_xdreset(xdcsc, xdsc);
1696
1697 /* fix queues based on "blast-mode" */
1698
1699 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
1700 iorq = &xdcsc->reqs[lcv];
1701
1702 if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
1703 XD_STATE(iorq->mode) != XD_SUB_WAIT &&
1704 XD_STATE(iorq->mode) != XD_SUB_NORM)
1705 /* is it active? */
1706 continue;
1707
1708 xdcsc->nrun--; /* it isn't running any more */
1709 if (blastmode == XD_RSET_ALL || blastmode != lcv) {
1710 /* failed */
1711 iorq->errno = error;
1712 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
1713 switch (XD_STATE(iorq->mode)) {
1714 case XD_SUB_NORM:
1715 iorq->buf->b_error = EIO;
1716 iorq->buf->b_flags |= B_ERROR;
1717 iorq->buf->b_resid =
1718 iorq->sectcnt * XDFM_BPS;
1719 /* Sun3: map/unmap regardless of B_PHYS */
1720 dvma_mapout(iorq->dbufbase,
1721 iorq->buf->b_bcount);
1722 disk_unbusy(&iorq->xd->sc_dk,
1723 (iorq->buf->b_bcount - iorq->buf->b_resid));
1724 biodone(iorq->buf);
1725 XDC_FREE(xdcsc, lcv); /* add to free list */
1726 break;
1727 case XD_SUB_WAIT:
1728 wakeup(iorq);
1729 case XD_SUB_POLL:
1730 xdcsc->ndone++;
1731 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1732 break;
1733 }
1734
1735 } else {
1736
1737 /* resubmit, put at front of wait queue */
1738 XDC_HWAIT(xdcsc, lcv);
1739 }
1740 }
1741
1742 /*
1743 * now, if stuff is waiting, start it.
1744 * since we just reset it should go
1745 */
1746 xdc_start(xdcsc, XDC_MAXIOPB);
1747
1748 /* ok, we did it */
1749 if (oldfree == 0 && xdcsc->nfree)
1750 wakeup(&xdcsc->nfree);
1751
1752 #ifdef XDC_DIAG
1753 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone;
1754 if (del != XDC_MAXIOPB)
1755 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
1756 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB);
1757 else
1758 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
1759 printf("%s: diag: lots of done jobs (%d)\n",
1760 xdcsc->sc_dev.dv_xname, xdcsc->ndone);
1761 #endif
1762 printf("RESET DONE\n");
1763 return (retval);
1764 }
1765
1766 /*
1767 * xdc_start: start all waiting buffers
1768 */
1769 void
1770 xdc_start(xdcsc, maxio)
1771 struct xdc_softc *xdcsc;
1772 int maxio;
1773
1774 {
1775 int rqno;
1776 while (maxio && xdcsc->nwait &&
1777 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) {
1778 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out"
1779 * param */
1780 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK)
1781 panic("xdc_start"); /* should never happen */
1782 maxio--;
1783 }
1784 }
1785
1786 /*
1787 * xdc_remove_iorq: remove "done" IOPB's.
1788 */
1789 int
1790 xdc_remove_iorq(xdcsc)
1791 struct xdc_softc *xdcsc;
1792
1793 {
1794 int errno, rqno, comm, errs;
1795 struct xdc *xdc = xdcsc->xdc;
1796 struct xd_iopb *iopb;
1797 struct xd_iorq *iorq;
1798 struct buf *bp;
1799
1800 if (xdc->xdc_csr & XDC_F_ERROR) {
1801 /*
1802 * FATAL ERROR: should never happen under normal use. This
1803 * error is so bad, you can't even tell which IOPB is bad, so
1804 * we dump them all.
1805 */
1806 errno = xdc->xdc_f_err;
1807 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname,
1808 errno, xdc_e2str(errno));
1809 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) {
1810 printf("%s: soft reset failed!\n",
1811 xdcsc->sc_dev.dv_xname);
1812 panic("xdc_remove_iorq: controller DEAD");
1813 }
1814 return (XD_ERR_AOK);
1815 }
1816
1817 /*
1818 * get iopb that is done
1819 *
1820 * hmm... I used to read the address of the done IOPB off the VME
1821 * registers and calculate the rqno directly from that. that worked
1822 * until I started putting a load on the controller. when loaded, i
1823 * would get interrupts but neither the REMIOPB or F_ERROR bits would
1824 * be set, even after DELAY'ing a while! later on the timeout
1825 * routine would detect IOPBs that were marked "running" but their
1826 * "done" bit was set. rather than dealing directly with this
1827 * problem, it is just easier to look at all running IOPB's for the
1828 * done bit.
1829 */
1830 if (xdc->xdc_csr & XDC_REMIOPB) {
1831 xdc->xdc_csr = XDC_CLRRIO;
1832 }
1833
1834 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) {
1835 iorq = &xdcsc->reqs[rqno];
1836 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE)
1837 continue; /* free, or done */
1838 iopb = &xdcsc->iopbase[rqno];
1839 if (iopb->done == 0)
1840 continue; /* not done yet */
1841
1842 #ifdef XDC_DEBUG
1843 {
1844 u_char *rio = (u_char *) iopb;
1845 int sz = sizeof(struct xd_iopb), lcv;
1846 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno);
1847 for (lcv = 0; lcv < sz; lcv++)
1848 printf(" %02x", rio[lcv]);
1849 printf("]\n");
1850 }
1851 #endif /* XDC_DEBUG */
1852
1853 xdcsc->nrun--;
1854
1855 comm = iopb->comm;
1856 errs = iopb->errs;
1857
1858 if (errs)
1859 iorq->errno = iopb->errno;
1860 else
1861 iorq->errno = 0;
1862
1863 /* handle non-fatal errors */
1864
1865 if (errs &&
1866 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK)
1867 continue; /* AOK: we resubmitted it */
1868
1869
1870 /* this iorq is now done (hasn't been restarted or anything) */
1871
1872 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
1873 xdc_perror(iorq, iopb, 0);
1874
1875 /* now, if read/write check to make sure we got all the data
1876 * we needed. (this may not be the case if we got an error in
1877 * the middle of a multisector request). */
1878
1879 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 &&
1880 (comm == XDCMD_RD || comm == XDCMD_WR)) {
1881 /* we just successfully processed a bad144 sector
1882 * note: if we are in bad 144 mode, the pointers have
1883 * been advanced already (see above) and are pointing
1884 * at the bad144 sector. to exit bad144 mode, we
1885 * must advance the pointers 1 sector and issue a new
1886 * request if there are still sectors left to process
1887 *
1888 */
1889 XDC_ADVANCE(iorq, 1); /* advance 1 sector */
1890
1891 /* exit b144 mode */
1892 iorq->mode = iorq->mode & (~XD_MODE_B144);
1893
1894 if (iorq->sectcnt) { /* more to go! */
1895 iorq->lasterror = iorq->errno = iopb->errno = 0;
1896 iopb->errs = iopb->done = 0;
1897 iorq->tries = 0;
1898 iopb->sectcnt = iorq->sectcnt;
1899 iopb->cylno = iorq->blockno /
1900 iorq->xd->sectpercyl;
1901 iopb->headno =
1902 (iorq->blockno / iorq->xd->nhead) %
1903 iorq->xd->nhead;
1904 iopb->sectno = iorq->blockno % XDFM_BPS;
1905 iopb->daddr =
1906 dvma_kvtopa(iorq->dbuf, xdcsc->bustype);
1907 XDC_HWAIT(xdcsc, rqno);
1908 xdc_start(xdcsc, 1); /* resubmit */
1909 continue;
1910 }
1911 }
1912 /* final cleanup, totally done with this request */
1913
1914 switch (XD_STATE(iorq->mode)) {
1915 case XD_SUB_NORM:
1916 bp = iorq->buf;
1917 if (errs) {
1918 bp->b_error = EIO;
1919 bp->b_flags |= B_ERROR;
1920 bp->b_resid = iorq->sectcnt * XDFM_BPS;
1921 } else {
1922 bp->b_resid = 0; /* done */
1923 }
1924 /* Sun3: map/unmap regardless of B_PHYS */
1925 dvma_mapout(iorq->dbufbase,
1926 iorq->buf->b_bcount);
1927 disk_unbusy(&iorq->xd->sc_dk,
1928 (bp->b_bcount - bp->b_resid));
1929 XDC_FREE(xdcsc, rqno);
1930 biodone(bp);
1931 break;
1932 case XD_SUB_WAIT:
1933 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1934 xdcsc->ndone++;
1935 wakeup(iorq);
1936 break;
1937 case XD_SUB_POLL:
1938 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1939 xdcsc->ndone++;
1940 break;
1941 }
1942 }
1943
1944 return (XD_ERR_AOK);
1945 }
1946
1947 /*
1948 * xdc_perror: print error.
1949 * - if still_trying is true: we got an error, retried and got a
1950 * different error. in that case lasterror is the old error,
1951 * and errno is the new one.
1952 * - if still_trying is not true, then if we ever had an error it
1953 * is in lasterror. also, if iorq->errno == 0, then we recovered
1954 * from that error (otherwise iorq->errno == iorq->lasterror).
1955 */
1956 void
1957 xdc_perror(iorq, iopb, still_trying)
1958 struct xd_iorq *iorq;
1959 struct xd_iopb *iopb;
1960 int still_trying;
1961
1962 {
1963
1964 int error = iorq->lasterror;
1965
1966 printf("%s", (iorq->xd) ?
1967 iorq->xd->sc_dev.dv_xname :
1968 iorq->xdc->sc_dev.dv_xname);
1969 if (iorq->buf)
1970 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev));
1971 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR)
1972 printf("%s %d/%d/%d: ",
1973 (iopb->comm == XDCMD_RD) ? "read" : "write",
1974 iopb->cylno, iopb->headno, iopb->sectno);
1975 printf("%s", xdc_e2str(error));
1976
1977 if (still_trying)
1978 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno));
1979 else
1980 if (iorq->errno == 0)
1981 printf(" [recovered in %d tries]", iorq->tries);
1982
1983 printf("\n");
1984 }
1985
1986 /*
1987 * xdc_error: non-fatal error encountered... recover.
1988 * return AOK if resubmitted, return FAIL if this iopb is done
1989 */
1990 int
1991 xdc_error(xdcsc, iorq, iopb, rqno, comm)
1992 struct xdc_softc *xdcsc;
1993 struct xd_iorq *iorq;
1994 struct xd_iopb *iopb;
1995 int rqno, comm;
1996
1997 {
1998 int errno = iorq->errno;
1999 int erract = errno & XD_ERA_MASK;
2000 int oldmode, advance, i;
2001
2002 if (erract == XD_ERA_RSET) { /* some errors require a reset */
2003 oldmode = iorq->mode;
2004 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode);
2005 xdcsc->ndone++;
2006 /* make xdc_start ignore us */
2007 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd);
2008 iorq->mode = oldmode;
2009 xdcsc->ndone--;
2010 }
2011 /* check for read/write to a sector in bad144 table if bad: redirect
2012 * request to bad144 area */
2013
2014 if ((comm == XDCMD_RD || comm == XDCMD_WR) &&
2015 (iorq->mode & XD_MODE_B144) == 0) {
2016 advance = iorq->sectcnt - iopb->sectcnt;
2017 XDC_ADVANCE(iorq, advance);
2018 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl,
2019 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead,
2020 iorq->blockno % iorq->xd->nsect)) != -1) {
2021 iorq->mode |= XD_MODE_B144; /* enter bad144 mode &
2022 * redirect */
2023 iopb->errno = iopb->done = iopb->errs = 0;
2024 iopb->sectcnt = 1;
2025 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2;
2026 /* second to last acyl */
2027 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144
2028 * standard */
2029 iopb->headno = i / iorq->xd->nhead;
2030 iopb->sectno = i % iorq->xd->nhead;
2031 XDC_HWAIT(xdcsc, rqno);
2032 xdc_start(xdcsc, 1); /* resubmit */
2033 return (XD_ERR_AOK); /* recovered! */
2034 }
2035 }
2036
2037 /*
2038 * it isn't a bad144 sector, must be real error! see if we can retry
2039 * it?
2040 */
2041 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
2042 xdc_perror(iorq, iopb, 1); /* inform of error state
2043 * change */
2044 iorq->lasterror = errno;
2045
2046 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD)
2047 && iorq->tries < XDC_MAXTRIES) { /* retry? */
2048 iorq->tries++;
2049 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2050 XDC_HWAIT(xdcsc, rqno);
2051 xdc_start(xdcsc, 1); /* restart */
2052 return (XD_ERR_AOK); /* recovered! */
2053 }
2054
2055 /* failed to recover from this error */
2056 return (XD_ERR_FAIL);
2057 }
2058
2059 /*
2060 * xdc_tick: make sure xd is still alive and ticking (err, kicking).
2061 */
2062 void
2063 xdc_tick(arg)
2064 void *arg;
2065
2066 {
2067 struct xdc_softc *xdcsc = arg;
2068 int lcv, s, reset = 0;
2069 #ifdef XDC_DIAG
2070 int wait, run, free, done, whd = 0;
2071 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB];
2072 s = splbio();
2073 wait = xdcsc->nwait;
2074 run = xdcsc->nrun;
2075 free = xdcsc->nfree;
2076 done = xdcsc->ndone;
2077 bcopy(xdcsc->waitq, wqc, sizeof(wqc));
2078 bcopy(xdcsc->freereq, fqc, sizeof(fqc));
2079 splx(s);
2080 if (wait + run + free + done != XDC_MAXIOPB) {
2081 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
2082 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB);
2083 bzero(mark, sizeof(mark));
2084 printf("FREE: ");
2085 for (lcv = free; lcv > 0; lcv--) {
2086 printf("%d ", fqc[lcv - 1]);
2087 mark[fqc[lcv - 1]] = 1;
2088 }
2089 printf("\nWAIT: ");
2090 lcv = wait;
2091 while (lcv > 0) {
2092 printf("%d ", wqc[whd]);
2093 mark[wqc[whd]] = 1;
2094 whd = (whd + 1) % XDC_MAXIOPB;
2095 lcv--;
2096 }
2097 printf("\n");
2098 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2099 if (mark[lcv] == 0)
2100 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %p\n",
2101 lcv, xdcsc->reqs[lcv].mode,
2102 xdcsc->iopbase[lcv].done,
2103 xdcsc->iopbase[lcv].errs,
2104 xdcsc->iopbase[lcv].errno,
2105 xdcsc->reqs[lcv].ttl,
2106 xdcsc->reqs[lcv].buf);
2107 }
2108 } else
2109 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM)
2110 printf("%s: diag: lots of done jobs (%d)\n",
2111 xdcsc->sc_dev.dv_xname, done);
2112
2113 #endif
2114 #ifdef XDC_DEBUG
2115 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
2116 xdcsc->sc_dev.dv_xname,
2117 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun,
2118 xdcsc->ndone);
2119 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2120 if (xdcsc->reqs[lcv].mode)
2121 printf("running %d: mode %d done %d errs %d errno 0x%x\n",
2122 lcv,
2123 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done,
2124 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno);
2125 }
2126 #endif
2127
2128 /* reduce ttl for each request if one goes to zero, reset xdc */
2129 s = splbio();
2130 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2131 if (xdcsc->reqs[lcv].mode == 0 ||
2132 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE)
2133 continue;
2134 xdcsc->reqs[lcv].ttl--;
2135 if (xdcsc->reqs[lcv].ttl == 0)
2136 reset = 1;
2137 }
2138 if (reset) {
2139 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname);
2140 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL);
2141 }
2142 splx(s);
2143
2144 /* until next time */
2145
2146 timeout(xdc_tick, xdcsc, XDC_TICKCNT);
2147 }
2148
2149 /*
2150 * xdc_ioctlcmd: this function provides a user level interface to the
2151 * controller via ioctl. this allows "format" programs to be written
2152 * in user code, and is also useful for some debugging. we return
2153 * an error code. called at user priority.
2154 */
2155 int
2156 xdc_ioctlcmd(xd, dev, xio)
2157 struct xd_softc *xd;
2158 dev_t dev;
2159 struct xd_iocmd *xio;
2160
2161 {
2162 int s, err, rqno;
2163 caddr_t dvmabuf = NULL;
2164 struct xdc_softc *xdcsc;
2165
2166 /* check sanity of requested command */
2167
2168 switch (xio->cmd) {
2169
2170 case XDCMD_NOP: /* no op: everything should be zero */
2171 if (xio->subfn || xio->dptr || xio->dlen ||
2172 xio->block || xio->sectcnt)
2173 return (EINVAL);
2174 break;
2175
2176 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */
2177 case XDCMD_WR:
2178 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2179 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL)
2180 return (EINVAL);
2181 break;
2182
2183 case XDCMD_SK: /* seek: doesn't seem useful to export this */
2184 return (EINVAL);
2185
2186 case XDCMD_WRP: /* write parameters */
2187 return (EINVAL);/* not useful, except maybe drive
2188 * parameters... but drive parameters should
2189 * go via disklabel changes */
2190
2191 case XDCMD_RDP: /* read parameters */
2192 if (xio->subfn != XDFUN_DRV ||
2193 xio->dlen || xio->block || xio->dptr)
2194 return (EINVAL); /* allow read drive params to
2195 * get hw_spt */
2196 xio->sectcnt = xd->hw_spt; /* we already know the answer */
2197 return (0);
2198 break;
2199
2200 case XDCMD_XRD: /* extended read/write */
2201 case XDCMD_XWR:
2202
2203 switch (xio->subfn) {
2204
2205 case XDFUN_THD:/* track headers */
2206 if (xio->sectcnt != xd->hw_spt ||
2207 (xio->block % xd->nsect) != 0 ||
2208 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt ||
2209 xio->dptr == NULL)
2210 return (EINVAL);
2211 xio->sectcnt = 0;
2212 break;
2213
2214 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */
2215 if (xio->cmd == XDCMD_XRD)
2216 return (EINVAL); /* no XDFUN_VFY */
2217 if (xio->sectcnt || xio->dlen ||
2218 (xio->block % xd->nsect) != 0 || xio->dptr)
2219 return (EINVAL);
2220 break;
2221
2222 case XDFUN_HDR:/* header, header verify, data, data ECC */
2223 return (EINVAL); /* not yet */
2224
2225 case XDFUN_DM: /* defect map */
2226 case XDFUN_DMX:/* defect map (alternate location) */
2227 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ ||
2228 (xio->block % xd->nsect) != 0 || xio->dptr == NULL)
2229 return (EINVAL);
2230 break;
2231
2232 default:
2233 return (EINVAL);
2234 }
2235 break;
2236
2237 case XDCMD_TST: /* diagnostics */
2238 return (EINVAL);
2239
2240 default:
2241 return (EINVAL);/* ??? */
2242 }
2243
2244 /* create DVMA buffer for request if needed */
2245
2246 if (xio->dlen) {
2247 dvmabuf = dvma_malloc(xio->dlen);
2248 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
2249 err = copyin(xio->dptr, dvmabuf, xio->dlen);
2250 if (err) {
2251 dvma_free(dvmabuf, xio->dlen);
2252 return (err);
2253 }
2254 }
2255 }
2256 /* do it! */
2257
2258 err = 0;
2259 xdcsc = xd->parent;
2260 s = splbio();
2261 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block,
2262 xio->sectcnt, dvmabuf, XD_SUB_WAIT);
2263 if (rqno == XD_ERR_FAIL) {
2264 err = EIO;
2265 goto done;
2266 }
2267 xio->errno = xdcsc->reqs[rqno].errno;
2268 xio->tries = xdcsc->reqs[rqno].tries;
2269 XDC_DONE(xdcsc, rqno, err);
2270
2271 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
2272 err = copyout(dvmabuf, xio->dptr, xio->dlen);
2273
2274 done:
2275 splx(s);
2276 if (dvmabuf)
2277 dvma_free(dvmabuf, xio->dlen);
2278 return (err);
2279 }
2280
2281 /*
2282 * xdc_e2str: convert error code number into an error string
2283 */
2284 char *
2285 xdc_e2str(no)
2286 int no;
2287 {
2288 switch (no) {
2289 case XD_ERR_FAIL:
2290 return ("Software fatal error");
2291 case XD_ERR_AOK:
2292 return ("Successful completion");
2293 case XD_ERR_ICYL:
2294 return ("Illegal cylinder address");
2295 case XD_ERR_IHD:
2296 return ("Illegal head address");
2297 case XD_ERR_ISEC:
2298 return ("Illgal sector address");
2299 case XD_ERR_CZER:
2300 return ("Count zero");
2301 case XD_ERR_UIMP:
2302 return ("Unimplemented command");
2303 case XD_ERR_IF1:
2304 return ("Illegal field length 1");
2305 case XD_ERR_IF2:
2306 return ("Illegal field length 2");
2307 case XD_ERR_IF3:
2308 return ("Illegal field length 3");
2309 case XD_ERR_IF4:
2310 return ("Illegal field length 4");
2311 case XD_ERR_IF5:
2312 return ("Illegal field length 5");
2313 case XD_ERR_IF6:
2314 return ("Illegal field length 6");
2315 case XD_ERR_IF7:
2316 return ("Illegal field length 7");
2317 case XD_ERR_ISG:
2318 return ("Illegal scatter/gather length");
2319 case XD_ERR_ISPT:
2320 return ("Not enough sectors per track");
2321 case XD_ERR_ALGN:
2322 return ("Next IOPB address alignment error");
2323 case XD_ERR_SGAL:
2324 return ("Scatter/gather address alignment error");
2325 case XD_ERR_SGEC:
2326 return ("Scatter/gather with auto-ECC");
2327 case XD_ERR_SECC:
2328 return ("Soft ECC corrected");
2329 case XD_ERR_SIGN:
2330 return ("ECC ignored");
2331 case XD_ERR_ASEK:
2332 return ("Auto-seek retry recovered");
2333 case XD_ERR_RTRY:
2334 return ("Soft retry recovered");
2335 case XD_ERR_HECC:
2336 return ("Hard data ECC");
2337 case XD_ERR_NHDR:
2338 return ("Header not found");
2339 case XD_ERR_NRDY:
2340 return ("Drive not ready");
2341 case XD_ERR_TOUT:
2342 return ("Operation timeout");
2343 case XD_ERR_VTIM:
2344 return ("VMEDMA timeout");
2345 case XD_ERR_DSEQ:
2346 return ("Disk sequencer error");
2347 case XD_ERR_HDEC:
2348 return ("Header ECC error");
2349 case XD_ERR_RVFY:
2350 return ("Read verify");
2351 case XD_ERR_VFER:
2352 return ("Fatail VMEDMA error");
2353 case XD_ERR_VBUS:
2354 return ("VMEbus error");
2355 case XD_ERR_DFLT:
2356 return ("Drive faulted");
2357 case XD_ERR_HECY:
2358 return ("Header error/cyliner");
2359 case XD_ERR_HEHD:
2360 return ("Header error/head");
2361 case XD_ERR_NOCY:
2362 return ("Drive not on-cylinder");
2363 case XD_ERR_SEEK:
2364 return ("Seek error");
2365 case XD_ERR_ILSS:
2366 return ("Illegal sector size");
2367 case XD_ERR_SEC:
2368 return ("Soft ECC");
2369 case XD_ERR_WPER:
2370 return ("Write-protect error");
2371 case XD_ERR_IRAM:
2372 return ("IRAM self test failure");
2373 case XD_ERR_MT3:
2374 return ("Maintenance test 3 failure (DSKCEL RAM)");
2375 case XD_ERR_MT4:
2376 return ("Maintenance test 4 failure (header shift reg)");
2377 case XD_ERR_MT5:
2378 return ("Maintenance test 5 failure (VMEDMA regs)");
2379 case XD_ERR_MT6:
2380 return ("Maintenance test 6 failure (REGCEL chip)");
2381 case XD_ERR_MT7:
2382 return ("Maintenance test 7 failure (buffer parity)");
2383 case XD_ERR_MT8:
2384 return ("Maintenance test 8 failure (disk FIFO)");
2385 case XD_ERR_IOCK:
2386 return ("IOPB checksum miscompare");
2387 case XD_ERR_IODM:
2388 return ("IOPB DMA fatal");
2389 case XD_ERR_IOAL:
2390 return ("IOPB address alignment error");
2391 case XD_ERR_FIRM:
2392 return ("Firmware error");
2393 case XD_ERR_MMOD:
2394 return ("Illegal maintenance mode test number");
2395 case XD_ERR_ACFL:
2396 return ("ACFAIL asserted");
2397 default:
2398 return ("Unknown error");
2399 }
2400 }
2401