xd.c revision 1.13 1 /* $NetBSD: xd.c,v 1.13 1998/06/20 13:12:54 mrg Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1995 Charles D. Cranor
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 *
36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r
37 *
38 * author: Chuck Cranor <chuck (at) ccrc.wustl.edu>
39 * started: 27-Feb-95
40 * references: [1] Xylogics Model 753 User's Manual
41 * part number: 166-753-001, Revision B, May 21, 1988.
42 * "Your Partner For Performance"
43 * [2] other NetBSD disk device drivers
44 *
45 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
46 * the time to answer some of my questions about the 753/7053.
47 *
48 * note: the 753 and the 7053 are programmed the same way, but are
49 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U
50 * VME card (found in many VME based suns).
51 */
52
53 #undef XDC_DEBUG /* full debug */
54 #define XDC_DIAG /* extra sanity checks */
55 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
56 #define XDC_DIAG /* link in with master DIAG option */
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/file.h>
64 #include <sys/stat.h>
65 #include <sys/ioctl.h>
66 #include <sys/buf.h>
67 #include <sys/uio.h>
68 #include <sys/malloc.h>
69 #include <sys/device.h>
70 #include <sys/disklabel.h>
71 #include <sys/disk.h>
72 #include <sys/syslog.h>
73 #include <sys/dkbad.h>
74 #include <sys/conf.h>
75
76 #include <vm/vm.h>
77 #include <vm/vm_kern.h>
78
79 #include <machine/autoconf.h>
80 #include <machine/bus.h>
81 #include <machine/conf.h>
82
83 #include <dev/sun/disklabel.h>
84
85 #include <dev/vme/vmevar.h>
86
87 #include <dev/vme/xdreg.h>
88 #include <dev/vme/xdvar.h>
89 #include <dev/vme/xio.h>
90
91 #include "locators.h"
92
93 /*
94 * macros
95 */
96
97 /*
98 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
99 */
100 #define XDC_TWAIT(SC, N) { \
101 (SC)->waitq[(SC)->waitend] = (N); \
102 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \
103 (SC)->nwait++; \
104 }
105
106 /*
107 * XDC_HWAIT: add iorq "N" to head of SC's wait queue
108 */
109 #define XDC_HWAIT(SC, N) { \
110 (SC)->waithead = ((SC)->waithead == 0) ? \
111 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \
112 (SC)->waitq[(SC)->waithead] = (N); \
113 (SC)->nwait++; \
114 }
115
116 /*
117 * XDC_GET_WAITER: gets the first request waiting on the waitq
118 * and removes it (so it can be submitted)
119 */
120 #define XDC_GET_WAITER(XDCSC, RQ) { \
121 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \
122 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \
123 xdcsc->nwait--; \
124 }
125
126 /*
127 * XDC_FREE: add iorq "N" to SC's free list
128 */
129 #define XDC_FREE(SC, N) { \
130 (SC)->freereq[(SC)->nfree++] = (N); \
131 (SC)->reqs[N].mode = 0; \
132 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \
133 }
134
135
136 /*
137 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
138 */
139 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
140
141 /*
142 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
143 */
144 #define XDC_GO(XDC, ADDR) { \
145 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \
146 (ADDR) = ((ADDR) >> 8); \
147 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \
148 (ADDR) = ((ADDR) >> 8); \
149 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \
150 (ADDR) = ((ADDR) >> 8); \
151 (XDC)->xdc_iopbaddr3 = (ADDR); \
152 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \
153 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \
154 }
155
156 /*
157 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
158 * LCV is a counter. If it goes to zero then we timed out.
159 */
160 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \
161 (LCV) = (TIME); \
162 while ((LCV) > 0) { \
163 if ((XDC)->xdc_csr & (BITS)) break; \
164 (LCV) = (LCV) - 1; \
165 DELAY(1); \
166 } \
167 }
168
169 /*
170 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
171 */
172 #define XDC_DONE(SC,RQ,ER) { \
173 if ((RQ) == XD_ERR_FAIL) { \
174 (ER) = (RQ); \
175 } else { \
176 if ((SC)->ndone-- == XDC_SUBWAITLIM) \
177 wakeup(&(SC)->ndone); \
178 (ER) = (SC)->reqs[RQ].errno; \
179 XDC_FREE((SC), (RQ)); \
180 } \
181 }
182
183 /*
184 * XDC_ADVANCE: advance iorq's pointers by a number of sectors
185 */
186 #define XDC_ADVANCE(IORQ, N) { \
187 if (N) { \
188 (IORQ)->sectcnt -= (N); \
189 (IORQ)->blockno += (N); \
190 (IORQ)->dbuf += ((N)*XDFM_BPS); \
191 } \
192 }
193
194 /*
195 * note - addresses you can sleep on:
196 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
197 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
198 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
199 * to drop below XDC_SUBWAITLIM)
200 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
201 */
202
203
204 /*
205 * function prototypes
206 * "xdc_*" functions are internal, all others are external interfaces
207 */
208
209 extern int pil_to_vme[]; /* from obio.c */
210
211 /* internals */
212 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int));
213 char *xdc_e2str __P((int));
214 int xdc_error __P((struct xdc_softc *, struct xd_iorq *,
215 struct xd_iopb *, int, int));
216 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *));
217 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int));
218 int xdc_piodriver __P((struct xdc_softc *, int, int));
219 int xdc_remove_iorq __P((struct xdc_softc *));
220 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *));
221 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *,
222 struct xd_softc *, int, u_long, int,
223 caddr_t, struct buf *));
224 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int));
225 void xdc_start __P((struct xdc_softc *, int));
226 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *));
227 int xdc_submit_iorq __P((struct xdc_softc *, int, int));
228 void xdc_tick __P((void *));
229 void xdc_xdreset __P((struct xdc_softc *, struct xd_softc *));
230
231 /* machine interrupt hook */
232 int xdcintr __P((void *));
233
234 /* autoconf */
235 int xdcmatch __P((struct device *, struct cfdata *, void *));
236 void xdcattach __P((struct device *, struct device *, void *));
237 int xdmatch __P((struct device *, struct cfdata *, void *));
238 void xdattach __P((struct device *, struct device *, void *));
239 static int xdc_probe __P((void *, void *));
240
241 static void xddummystrat __P((struct buf *));
242 int xdgetdisklabel __P((struct xd_softc *, void *));
243
244 /* XXX - think about this more.. xd_machdep? */
245 void md_setup __P((void));
246 int XDC_DELAY;
247 #ifdef sparc
248 #include <sparc/sparc/vaddrs.h>
249 #include <sparc/sparc/cpuvar.h>
250 void md_setup()
251 {
252 if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300)
253 XDC_DELAY = XDC_DELAY_4_300;
254 else
255 XDC_DELAY = XDC_DELAY_SPARC;
256 }
257 #endif
258 #ifdef sun3
259 void md_setup()
260 {
261 XDC_DELAY = XDC_DELAY_SUN3;
262 }
263 #endif
264 /*
265 * cfattach's: device driver interface to autoconfig
266 */
267
268 struct cfattach xdc_ca = {
269 sizeof(struct xdc_softc), xdcmatch, xdcattach
270 };
271
272
273 struct cfattach xd_ca = {
274 sizeof(struct xd_softc), xdmatch, xdattach
275 };
276
277 extern struct cfdriver xd_cd;
278
279 struct xdc_attach_args { /* this is the "aux" args to xdattach */
280 int driveno; /* unit number */
281 int fullmode; /* submit mode */
282 int booting; /* are we booting or not? */
283 };
284
285 /*
286 * dkdriver
287 */
288
289 struct dkdriver xddkdriver = {xdstrategy};
290
291 /*
292 * start: disk label fix code (XXX)
293 */
294
295 static void *xd_labeldata;
296
297 static void
298 xddummystrat(bp)
299 struct buf *bp;
300 {
301 if (bp->b_bcount != XDFM_BPS)
302 panic("xddummystrat");
303 bcopy(xd_labeldata, bp->b_un.b_addr, XDFM_BPS);
304 bp->b_flags |= B_DONE;
305 bp->b_flags &= ~B_BUSY;
306 }
307
308 int
309 xdgetdisklabel(xd, b)
310 struct xd_softc *xd;
311 void *b;
312 {
313 char *err;
314 struct sun_disklabel *sdl;
315
316 /* We already have the label data in `b'; setup for dummy strategy */
317 xd_labeldata = b;
318
319 /* Required parameter for readdisklabel() */
320 xd->sc_dk.dk_label->d_secsize = XDFM_BPS;
321
322 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART),
323 xddummystrat,
324 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel);
325 if (err) {
326 printf("%s: %s\n", xd->sc_dev.dv_xname, err);
327 return(XD_ERR_FAIL);
328 }
329
330 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
331 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block;
332 if (sdl->sl_magic == SUN_DKMAGIC)
333 xd->pcyl = sdl->sl_pcylinders;
334 else {
335 printf("%s: WARNING: no `pcyl' in disk label.\n",
336 xd->sc_dev.dv_xname);
337 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders +
338 xd->sc_dk.dk_label->d_acylinders;
339 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
340 xd->sc_dev.dv_xname, xd->pcyl);
341 }
342
343 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders;
344 xd->acyl = xd->sc_dk.dk_label->d_acylinders;
345 xd->nhead = xd->sc_dk.dk_label->d_ntracks;
346 xd->nsect = xd->sc_dk.dk_label->d_nsectors;
347 xd->sectpercyl = xd->nhead * xd->nsect;
348 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by
349 * sun->bsd */
350 return(XD_ERR_AOK);
351 }
352
353 /*
354 * end: disk label fix code (XXX)
355 */
356
357 /*
358 * a u t o c o n f i g f u n c t i o n s
359 */
360
361 /*
362 * xdcmatch: determine if xdc is present or not. we do a
363 * soft reset to detect the xdc.
364 */
365
366 int
367 xdc_probe(vaddr, arg)
368 void *vaddr;
369 void *arg;
370 {
371 struct xdc *xdc = vaddr;
372 int del = 0;
373
374 xdc->xdc_csr = XDC_RESET;
375 XDC_WAIT(xdc, del, XDC_RESETUSEC, XDC_RESET);
376 return (del > 0);
377 }
378
379 int xdcmatch(parent, cf, aux)
380 struct device *parent;
381 struct cfdata *cf;
382 void *aux;
383 {
384 struct vme_attach_args *va = aux;
385 vme_chipset_tag_t ct = va->vma_chipset_tag;
386 bus_space_tag_t bt = va->vma_bustag;
387 vme_mod_t mod;
388
389 mod = VMEMOD_A16 | VMEMOD_S | VMEMOD_D | VMEMOD_D32;
390 return (vme_bus_probe(ct, bt, va->vma_reg[0],
391 offsetof(struct xdc, xdc_csr), 1,
392 mod, xdc_probe, 0));
393 }
394
395 /*
396 * xdcattach: attach controller
397 */
398 void
399 xdcattach(parent, self, aux)
400 struct device *parent, *self;
401 void *aux;
402
403 {
404 struct vme_attach_args *va = aux;
405 vme_chipset_tag_t ct = va->vma_chipset_tag;
406 bus_space_tag_t bt = va->vma_bustag;
407 bus_space_handle_t bh;
408 vme_intr_handle_t ih;
409 vme_mod_t mod;
410 struct xdc_softc *xdc = (void *) self;
411 struct xdc_attach_args xa;
412 int lcv, rqno, error;
413 struct xd_iopb_ctrl *ctl;
414 bus_dma_segment_t seg;
415 int rseg;
416
417 md_setup();
418
419 /* get addressing and intr level stuff from autoconfig and load it
420 * into our xdc_softc. */
421
422 xdc->dmatag = va->vma_dmatag;
423 mod = VMEMOD_A16 | VMEMOD_S | VMEMOD_D | VMEMOD_D32;
424
425 if (vme_bus_map(ct, va->vma_reg[0], sizeof(struct xdc),
426 mod, bt, &bh) != 0)
427 panic("xdc: vme_map");
428
429 xdc->xdc = (struct xdc *) bh;
430 xdc->ipl = va->vma_pri;
431 xdc->vector = va->vma_vec;
432
433 for (lcv = 0; lcv < XDC_MAXDEV; lcv++)
434 xdc->sc_drives[lcv] = (struct xd_softc *) 0;
435
436 /* allocate and zero buffers
437 *
438 * note: we simplify the code by allocating the max number of iopbs and
439 * iorq's up front. thus, we avoid linked lists and the costs
440 * associated with them in exchange for wasting a little memory. */
441
442 error = bus_dmamem_alloc(xdc->dmatag,
443 XDC_MAXIOPB * sizeof(struct xd_iopb),
444 NBPG, 0,
445 &seg, 1, &rseg, BUS_DMA_NOWAIT);
446 if (error) {
447 printf("%s: DMA buffer alloc error %d\n",
448 xdc->sc_dev.dv_xname, error);
449 return;
450 }
451 xdc->dvmaiopb = (struct xd_iopb *)seg.ds_addr;
452
453 error = bus_dmamem_map(xdc->dmatag, &seg, rseg,
454 XDC_MAXIOPB * sizeof(struct xd_iopb),
455 (caddr_t *)&xdc->iopbase,
456 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
457 if (error) {
458 bus_dmamem_free(xdc->dmatag, &seg, rseg);
459 printf("%s: DMA buffer map error %d\n",
460 xdc->sc_dev.dv_xname, error);
461 return;
462 }
463 bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb));
464
465 xdc->reqs = (struct xd_iorq *)
466 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT);
467 if (xdc->reqs == NULL)
468 panic("xdc malloc");
469 bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq));
470
471 /* init free list, iorq to iopb pointers, and non-zero fields in the
472 * iopb which never change. */
473
474 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
475 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv];
476 xdc->reqs[lcv].dmaiopb = &xdc->dvmaiopb[lcv];
477 xdc->freereq[lcv] = lcv;
478 xdc->iopbase[lcv].fixd = 1; /* always the same */
479 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */
480 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */
481
482 error = bus_dmamap_create(
483 xdc->dmatag,
484 MAXPHYS, /* size */
485 1, /* nsegments */
486 MAXPHYS, /* maxsegsz */
487 0, /* boundary */
488 BUS_DMA_NOWAIT,
489 &xdc->reqs[lcv].dmamap);
490 if (error) {
491 printf("%s: DMA buffer map create error %d\n",
492 xdc->sc_dev.dv_xname, error);
493 return;
494 }
495 }
496 xdc->nfree = XDC_MAXIOPB;
497 xdc->nrun = 0;
498 xdc->waithead = xdc->waitend = xdc->nwait = 0;
499 xdc->ndone = 0;
500
501 /* init queue of waiting bufs */
502
503 xdc->sc_wq.b_active = 0;
504 xdc->sc_wq.b_actf = 0;
505 xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf;
506
507 /*
508 * section 7 of the manual tells us how to init the controller:
509 * - read controller parameters (6/0)
510 * - write controller parameters (5/0)
511 */
512
513 /* read controller parameters and insure we have a 753/7053 */
514
515 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
516 if (rqno == XD_ERR_FAIL) {
517 printf(": couldn't read controller params\n");
518 return; /* shouldn't ever happen */
519 }
520 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno];
521 if (ctl->ctype != XDCT_753) {
522 if (xdc->reqs[rqno].errno)
523 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno));
524 printf(": doesn't identify as a 753/7053\n");
525 XDC_DONE(xdc, rqno, error);
526 return;
527 }
528 printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
529 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev);
530 XDC_DONE(xdc, rqno, error);
531
532 /* now write controller parameters (xdc_cmd sets all params for us) */
533
534 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
535 XDC_DONE(xdc, rqno, error);
536 if (error) {
537 printf("%s: controller config error: %s\n",
538 xdc->sc_dev.dv_xname, xdc_e2str(error));
539 return;
540 }
541
542 /* link in interrupt with higher level software */
543 vme_intr_map(ct, va->vma_vec, va->vma_pri, &ih);
544 vme_intr_establish(ct, ih, xdcintr, xdc);
545 evcnt_attach(&xdc->sc_dev, "intr", &xdc->sc_intrcnt);
546 vme_bus_establish(ct, &xdc->sc_dev);
547
548
549 /* now we must look for disks using autoconfig */
550 xa.fullmode = XD_SUB_POLL;
551 xa.booting = 1;
552
553 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
554 (void) config_found(self, (void *) &xa, NULL);
555
556 /* start the watchdog clock */
557 timeout(xdc_tick, xdc, XDC_TICKCNT);
558
559 }
560
561 /*
562 * xdmatch: probe for disk.
563 *
564 * note: we almost always say disk is present. this allows us to
565 * spin up and configure a disk after the system is booted (we can
566 * call xdattach!).
567 */
568 int
569 xdmatch(parent, cf, aux)
570 struct device *parent;
571 struct cfdata *cf;
572 void *aux;
573 {
574 struct xdc_attach_args *xa = aux;
575
576 /* looking for autoconf wildcard or exact match */
577
578 if (cf->cf_loc[XDCCF_DRIVE] != XDCCF_DRIVE_DEFAULT &&
579 cf->cf_loc[XDCCF_DRIVE] != xa->driveno)
580 return 0;
581
582 return 1;
583
584 }
585
586 /*
587 * xdattach: attach a disk. this can be called from autoconf and also
588 * from xdopen/xdstrategy.
589 */
590 void
591 xdattach(parent, self, aux)
592 struct device *parent, *self;
593 void *aux;
594
595 {
596 struct xd_softc *xd = (void *) self;
597 struct xdc_softc *xdc = (void *) parent;
598 struct xdc_attach_args *xa = aux;
599 int rqno, spt = 0, mb, blk, lcv, fmode, s = 0, newstate;
600 struct xd_iopb_drive *driopb;
601 struct dkbad *dkb;
602 int rseg, error;
603 bus_dma_segment_t seg;
604 caddr_t dmaddr;
605 caddr_t buf;
606
607 /*
608 * Always re-initialize the disk structure. We want statistics
609 * to start with a clean slate.
610 */
611 bzero(&xd->sc_dk, sizeof(xd->sc_dk));
612 xd->sc_dk.dk_driver = &xddkdriver;
613 xd->sc_dk.dk_name = xd->sc_dev.dv_xname;
614
615 /* if booting, init the xd_softc */
616
617 if (xa->booting) {
618 xd->state = XD_DRIVE_UNKNOWN; /* to start */
619 xd->flags = 0;
620 xd->parent = xdc;
621 }
622 xd->xd_drive = xa->driveno;
623 fmode = xa->fullmode;
624 xdc->sc_drives[xa->driveno] = xd;
625
626 /* if not booting, make sure we are the only process in the attach for
627 * this drive. if locked out, sleep on it. */
628
629 if (!xa->booting) {
630 s = splbio();
631 while (xd->state == XD_DRIVE_ATTACHING) {
632 if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) {
633 splx(s);
634 return;
635 }
636 }
637 printf("%s at %s",
638 xd->sc_dev.dv_xname, xd->parent->sc_dev.dv_xname);
639 }
640
641 /* we now have control */
642 xd->state = XD_DRIVE_ATTACHING;
643 newstate = XD_DRIVE_UNKNOWN;
644
645 buf = NULL;
646 error = bus_dmamem_alloc(xdc->dmatag, XDFM_BPS, NBPG, 0,
647 &seg, 1, &rseg, BUS_DMA_NOWAIT);
648 if (error) {
649 printf("%s: DMA buffer alloc error %d\n",
650 xd->sc_dev.dv_xname, error);
651 goto done;
652 }
653 dmaddr = (caddr_t)seg.ds_addr;
654
655 error = bus_dmamem_map(xdc->dmatag, &seg, rseg, XDFM_BPS,
656 &buf,
657 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
658 if (error) {
659 printf("%s: DMA buffer alloc error %d\n",
660 xd->sc_dev.dv_xname, error);
661 bus_dmamem_free(xdc->dmatag, &seg, rseg);
662 goto done;
663 }
664
665
666 /* first try and reset the drive */
667
668 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode);
669 XDC_DONE(xdc, rqno, error);
670 if (error == XD_ERR_NRDY) {
671 printf(" drive %d: off-line\n", xa->driveno);
672 goto done;
673 }
674 if (error) {
675 printf(": ERROR 0x%02x (%s)\n", error, xdc_e2str(error));
676 goto done;
677 }
678 printf(" drive %d: ready\n", xa->driveno);
679
680 /* now set format parameters */
681
682 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode);
683 XDC_DONE(xdc, rqno, error);
684 if (error) {
685 printf("%s: write format parameters failed: %s\n",
686 xd->sc_dev.dv_xname, xdc_e2str(error));
687 goto done;
688 }
689
690 /* get drive parameters */
691 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
692 if (rqno != XD_ERR_FAIL) {
693 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno];
694 spt = driopb->sectpertrk;
695 }
696 XDC_DONE(xdc, rqno, error);
697 if (error) {
698 printf("%s: read drive parameters failed: %s\n",
699 xd->sc_dev.dv_xname, xdc_e2str(error));
700 goto done;
701 }
702
703 /*
704 * now set drive parameters (to semi-bogus values) so we can read the
705 * disk label.
706 */
707 xd->pcyl = xd->ncyl = 1;
708 xd->acyl = 0;
709 xd->nhead = 1;
710 xd->nsect = 1;
711 xd->sectpercyl = 1;
712 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */
713 xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff;
714 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
715 XDC_DONE(xdc, rqno, error);
716 if (error) {
717 printf("%s: write drive parameters failed: %s\n",
718 xd->sc_dev.dv_xname, xdc_e2str(error));
719 goto done;
720 }
721
722 /* read disk label */
723 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, dmaddr, fmode);
724 XDC_DONE(xdc, rqno, error);
725 if (error) {
726 printf("%s: reading disk label failed: %s\n",
727 xd->sc_dev.dv_xname, xdc_e2str(error));
728 goto done;
729 }
730 newstate = XD_DRIVE_NOLABEL;
731
732 xd->hw_spt = spt;
733 /* Attach the disk: must be before getdisklabel to malloc label */
734 disk_attach(&xd->sc_dk);
735
736 if (xdgetdisklabel(xd, buf) != XD_ERR_AOK)
737 goto done;
738
739 /* inform the user of what is up */
740 printf("%s: <%s>, pcyl %d, hw_spt %d\n", xd->sc_dev.dv_xname,
741 buf, xd->pcyl, spt);
742 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
743 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
744 xd->sc_dev.dv_xname, mb, xd->ncyl, xd->nhead, xd->nsect,
745 XDFM_BPS);
746
747 /* now set the real drive parameters! */
748
749 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
750 XDC_DONE(xdc, rqno, error);
751 if (error) {
752 printf("%s: write real drive parameters failed: %s\n",
753 xd->sc_dev.dv_xname, xdc_e2str(error));
754 goto done;
755 }
756 newstate = XD_DRIVE_ONLINE;
757
758 /*
759 * read bad144 table. this table resides on the first sector of the
760 * last track of the disk (i.e. second cyl of "acyl" area).
761 */
762
763 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */
764 (xd->nhead - 1) * xd->nsect; /* last head */
765 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, dmaddr, fmode);
766 XDC_DONE(xdc, rqno, error);
767 if (error) {
768 printf("%s: reading bad144 failed: %s\n",
769 xd->sc_dev.dv_xname, xdc_e2str(error));
770 goto done;
771 }
772
773 /* check dkbad for sanity */
774 dkb = (struct dkbad *) buf;
775 for (lcv = 0; lcv < 126; lcv++) {
776 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
777 dkb->bt_bad[lcv].bt_cyl == 0) &&
778 dkb->bt_bad[lcv].bt_trksec == 0xffff)
779 continue; /* blank */
780 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl)
781 break;
782 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead)
783 break;
784 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect)
785 break;
786 }
787 if (lcv != 126) {
788 printf("%s: warning: invalid bad144 sector!\n",
789 xd->sc_dev.dv_xname);
790 } else {
791 bcopy(buf, &xd->dkb, XDFM_BPS);
792 }
793
794 dk_establish(&xd->sc_dk, &xd->sc_dev); /* XXX */
795
796 done:
797 if (buf != NULL) {
798 bus_dmamem_unmap(xdc->dmatag, buf, XDFM_BPS);
799 bus_dmamem_free(xdc->dmatag, &seg, rseg);
800 }
801
802 xd->state = newstate;
803 if (!xa->booting) {
804 wakeup(&xd->state);
805 splx(s);
806 }
807 }
808
809 /*
810 * end of autoconfig functions
811 */
812
813 /*
814 * { b , c } d e v s w f u n c t i o n s
815 */
816
817 /*
818 * xdclose: close device
819 */
820 int
821 xdclose(dev, flag, fmt, p)
822 dev_t dev;
823 int flag, fmt;
824 struct proc *p;
825 {
826 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)];
827 int part = DISKPART(dev);
828
829 /* clear mask bits */
830
831 switch (fmt) {
832 case S_IFCHR:
833 xd->sc_dk.dk_copenmask &= ~(1 << part);
834 break;
835 case S_IFBLK:
836 xd->sc_dk.dk_bopenmask &= ~(1 << part);
837 break;
838 }
839 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
840
841 return 0;
842 }
843
844 /*
845 * xddump: crash dump system
846 */
847 int
848 xddump(dev, blkno, va, size)
849 dev_t dev;
850 daddr_t blkno;
851 caddr_t va;
852 size_t size;
853 {
854 int unit, part;
855 struct xd_softc *xd;
856
857 unit = DISKUNIT(dev);
858 if (unit >= xd_cd.cd_ndevs)
859 return ENXIO;
860 part = DISKPART(dev);
861
862 xd = xd_cd.cd_devs[unit];
863
864 printf("%s%c: crash dump not supported (yet)\n", xd->sc_dev.dv_xname,
865 'a' + part);
866
867 return ENXIO;
868
869 /* outline: globals: "dumplo" == sector number of partition to start
870 * dump at (convert to physical sector with partition table)
871 * "dumpsize" == size of dump in clicks "physmem" == size of physical
872 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
873 * physmem)
874 *
875 * dump a copy of physical memory to the dump device starting at sector
876 * "dumplo" in the swap partition (make sure > 0). map in pages as
877 * we go. use polled I/O.
878 *
879 * XXX how to handle NON_CONTIG? */
880
881 }
882
883 /*
884 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks.
885 */
886 int
887 xdioctl(dev, command, addr, flag, p)
888 dev_t dev;
889 u_long command;
890 caddr_t addr;
891 int flag;
892 struct proc *p;
893
894 {
895 struct xd_softc *xd;
896 struct xd_iocmd *xio;
897 int error, s, unit;
898
899 unit = DISKUNIT(dev);
900
901 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
902 return (ENXIO);
903
904 /* switch on ioctl type */
905
906 switch (command) {
907 case DIOCSBAD: /* set bad144 info */
908 if ((flag & FWRITE) == 0)
909 return EBADF;
910 s = splbio();
911 bcopy(addr, &xd->dkb, sizeof(xd->dkb));
912 splx(s);
913 return 0;
914
915 case DIOCGDINFO: /* get disk label */
916 bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel));
917 return 0;
918
919 case DIOCGPART: /* get partition info */
920 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label;
921 ((struct partinfo *) addr)->part =
922 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
923 return 0;
924
925 case DIOCSDINFO: /* set disk label */
926 if ((flag & FWRITE) == 0)
927 return EBADF;
928 error = setdisklabel(xd->sc_dk.dk_label,
929 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0,
930 xd->sc_dk.dk_cpulabel);
931 if (error == 0) {
932 if (xd->state == XD_DRIVE_NOLABEL)
933 xd->state = XD_DRIVE_ONLINE;
934 }
935 return error;
936
937 case DIOCWLABEL: /* change write status of disk label */
938 if ((flag & FWRITE) == 0)
939 return EBADF;
940 if (*(int *) addr)
941 xd->flags |= XD_WLABEL;
942 else
943 xd->flags &= ~XD_WLABEL;
944 return 0;
945
946 case DIOCWDINFO: /* write disk label */
947 if ((flag & FWRITE) == 0)
948 return EBADF;
949 error = setdisklabel(xd->sc_dk.dk_label,
950 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0,
951 xd->sc_dk.dk_cpulabel);
952 if (error == 0) {
953 if (xd->state == XD_DRIVE_NOLABEL)
954 xd->state = XD_DRIVE_ONLINE;
955
956 /* Simulate opening partition 0 so write succeeds. */
957 xd->sc_dk.dk_openmask |= (1 << 0);
958 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
959 xdstrategy, xd->sc_dk.dk_label,
960 xd->sc_dk.dk_cpulabel);
961 xd->sc_dk.dk_openmask =
962 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
963 }
964 return error;
965
966 case DIOSXDCMD:
967 xio = (struct xd_iocmd *) addr;
968 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
969 return (error);
970 return (xdc_ioctlcmd(xd, dev, xio));
971
972 default:
973 return ENOTTY;
974 }
975 }
976 /*
977 * xdopen: open drive
978 */
979
980 int
981 xdopen(dev, flag, fmt, p)
982 dev_t dev;
983 int flag, fmt;
984 struct proc *p;
985 {
986 int unit, part;
987 struct xd_softc *xd;
988 struct xdc_attach_args xa;
989
990 /* first, could it be a valid target? */
991
992 unit = DISKUNIT(dev);
993 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
994 return (ENXIO);
995 part = DISKPART(dev);
996
997 /* do we need to attach the drive? */
998
999 if (xd->state == XD_DRIVE_UNKNOWN) {
1000 xa.driveno = xd->xd_drive;
1001 xa.fullmode = XD_SUB_WAIT;
1002 xa.booting = 0;
1003 xdattach((struct device *) xd->parent, (struct device *) xd, &xa);
1004 if (xd->state == XD_DRIVE_UNKNOWN) {
1005 return (EIO);
1006 }
1007 }
1008 /* check for partition */
1009
1010 if (part != RAW_PART &&
1011 (part >= xd->sc_dk.dk_label->d_npartitions ||
1012 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
1013 return (ENXIO);
1014 }
1015 /* set open masks */
1016
1017 switch (fmt) {
1018 case S_IFCHR:
1019 xd->sc_dk.dk_copenmask |= (1 << part);
1020 break;
1021 case S_IFBLK:
1022 xd->sc_dk.dk_bopenmask |= (1 << part);
1023 break;
1024 }
1025 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
1026
1027 return 0;
1028 }
1029
1030 int
1031 xdread(dev, uio, flags)
1032 dev_t dev;
1033 struct uio *uio;
1034 int flags;
1035 {
1036
1037 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio));
1038 }
1039
1040 int
1041 xdwrite(dev, uio, flags)
1042 dev_t dev;
1043 struct uio *uio;
1044 int flags;
1045 {
1046
1047 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio));
1048 }
1049
1050
1051 /*
1052 * xdsize: return size of a partition for a dump
1053 */
1054
1055 int
1056 xdsize(dev)
1057 dev_t dev;
1058
1059 {
1060 struct xd_softc *xdsc;
1061 int unit, part, size, omask;
1062
1063 /* valid unit? */
1064 unit = DISKUNIT(dev);
1065 if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL)
1066 return (-1);
1067
1068 part = DISKPART(dev);
1069 omask = xdsc->sc_dk.dk_openmask & (1 << part);
1070
1071 if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0)
1072 return (-1);
1073
1074 /* do it */
1075 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1076 size = -1; /* only give valid size for swap partitions */
1077 else
1078 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size *
1079 (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1080 if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0)
1081 return (-1);
1082 return (size);
1083 }
1084 /*
1085 * xdstrategy: buffering system interface to xd.
1086 */
1087
1088 void
1089 xdstrategy(bp)
1090 struct buf *bp;
1091
1092 {
1093 struct xd_softc *xd;
1094 struct xdc_softc *parent;
1095 struct buf *wq;
1096 int s, unit;
1097 struct xdc_attach_args xa;
1098
1099 unit = DISKUNIT(bp->b_dev);
1100
1101 /* check for live device */
1102
1103 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 ||
1104 bp->b_blkno < 0 ||
1105 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) {
1106 bp->b_error = EINVAL;
1107 goto bad;
1108 }
1109 /* do we need to attach the drive? */
1110
1111 if (xd->state == XD_DRIVE_UNKNOWN) {
1112 xa.driveno = xd->xd_drive;
1113 xa.fullmode = XD_SUB_WAIT;
1114 xa.booting = 0;
1115 xdattach((struct device *)xd->parent, (struct device *)xd, &xa);
1116 if (xd->state == XD_DRIVE_UNKNOWN) {
1117 bp->b_error = EIO;
1118 goto bad;
1119 }
1120 }
1121 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1122 /* no I/O to unlabeled disks, unless raw partition */
1123 bp->b_error = EIO;
1124 goto bad;
1125 }
1126 /* short circuit zero length request */
1127
1128 if (bp->b_bcount == 0)
1129 goto done;
1130
1131 /* check bounds with label (disksubr.c). Determine the size of the
1132 * transfer, and make sure it is within the boundaries of the
1133 * partition. Adjust transfer if needed, and signal errors or early
1134 * completion. */
1135
1136 if (bounds_check_with_label(bp, xd->sc_dk.dk_label,
1137 (xd->flags & XD_WLABEL) != 0) <= 0)
1138 goto done;
1139
1140 /*
1141 * now we know we have a valid buf structure that we need to do I/O
1142 * on.
1143 *
1144 * note that we don't disksort because the controller has a sorting
1145 * algorithm built into the hardware.
1146 */
1147
1148 s = splbio(); /* protect the queues */
1149
1150 /* first, give jobs in front of us a chance */
1151 parent = xd->parent;
1152 while (parent->nfree > 0 && parent->sc_wq.b_actf)
1153 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
1154 break;
1155
1156 /* if there are no free iorq's, then we just queue and return. the
1157 * buffs will get picked up later by xdcintr().
1158 */
1159
1160 if (parent->nfree == 0) {
1161 wq = &xd->parent->sc_wq;
1162 bp->b_actf = 0;
1163 bp->b_actb = wq->b_actb;
1164 *wq->b_actb = bp;
1165 wq->b_actb = &bp->b_actf;
1166 splx(s);
1167 return;
1168 }
1169
1170 /* now we have free iopb's and we are at splbio... start 'em up */
1171 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
1172 return;
1173 }
1174
1175 /* done! */
1176
1177 splx(s);
1178 return;
1179
1180 bad: /* tells upper layers we have an error */
1181 bp->b_flags |= B_ERROR;
1182 done: /* tells upper layers we are done with this
1183 * buf */
1184 bp->b_resid = bp->b_bcount;
1185 biodone(bp);
1186 }
1187 /*
1188 * end of {b,c}devsw functions
1189 */
1190
1191 /*
1192 * i n t e r r u p t f u n c t i o n
1193 *
1194 * xdcintr: hardware interrupt.
1195 */
1196 int
1197 xdcintr(v)
1198 void *v;
1199
1200 {
1201 struct xdc_softc *xdcsc = v;
1202
1203 /* kick the event counter */
1204
1205 xdcsc->sc_intrcnt.ev_count++;
1206
1207 /* remove as many done IOPBs as possible */
1208
1209 xdc_remove_iorq(xdcsc);
1210
1211 /* start any iorq's already waiting */
1212
1213 xdc_start(xdcsc, XDC_MAXIOPB);
1214
1215 /* fill up any remaining iorq's with queue'd buffers */
1216
1217 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1218 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1219 break;
1220
1221 return (1);
1222 }
1223 /*
1224 * end of interrupt function
1225 */
1226
1227 /*
1228 * i n t e r n a l f u n c t i o n s
1229 */
1230
1231 /*
1232 * xdc_rqinit: fill out the fields of an I/O request
1233 */
1234
1235 inline void
1236 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp)
1237 struct xd_iorq *rq;
1238 struct xdc_softc *xdc;
1239 struct xd_softc *xd;
1240 int md;
1241 u_long blk;
1242 int cnt;
1243 caddr_t db;
1244 struct buf *bp;
1245 {
1246 rq->xdc = xdc;
1247 rq->xd = xd;
1248 rq->ttl = XDC_MAXTTL + 10;
1249 rq->mode = md;
1250 rq->tries = rq->errno = rq->lasterror = 0;
1251 rq->blockno = blk;
1252 rq->sectcnt = cnt;
1253 rq->dbuf = db;
1254 rq->buf = bp;
1255 }
1256 /*
1257 * xdc_rqtopb: load up an IOPB based on an iorq
1258 */
1259
1260 void
1261 xdc_rqtopb(iorq, iopb, cmd, subfun)
1262 struct xd_iorq *iorq;
1263 struct xd_iopb *iopb;
1264 int cmd, subfun;
1265
1266 {
1267 u_long block, dp;
1268
1269 /* standard stuff */
1270
1271 iopb->errs = iopb->done = 0;
1272 iopb->comm = cmd;
1273 iopb->errno = iopb->status = 0;
1274 iopb->subfun = subfun;
1275 if (iorq->xd)
1276 iopb->unit = iorq->xd->xd_drive;
1277 else
1278 iopb->unit = 0;
1279
1280 /* check for alternate IOPB format */
1281
1282 if (cmd == XDCMD_WRP) {
1283 switch (subfun) {
1284 case XDFUN_CTL:{
1285 struct xd_iopb_ctrl *ctrl =
1286 (struct xd_iopb_ctrl *) iopb;
1287 iopb->lll = 0;
1288 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1289 ? 0
1290 : iorq->xdc->ipl;
1291 ctrl->param_a = XDPA_TMOD | XDPA_DACF;
1292 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC;
1293 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR |
1294 XDPC_RBC | XDPC_ECC2;
1295 ctrl->throttle = XDC_THROTTLE;
1296 ctrl->delay = XDC_DELAY;
1297 break;
1298 }
1299 case XDFUN_DRV:{
1300 struct xd_iopb_drive *drv =
1301 (struct xd_iopb_drive *)iopb;
1302 /* we assume that the disk label has the right
1303 * info */
1304 if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1305 drv->dparam_ipl = (XDC_DPARAM << 3);
1306 else
1307 drv->dparam_ipl = (XDC_DPARAM << 3) |
1308 iorq->xdc->ipl;
1309 drv->maxsect = iorq->xd->nsect - 1;
1310 drv->maxsector = drv->maxsect;
1311 /* note: maxsector != maxsect only if you are
1312 * doing cyl sparing */
1313 drv->headoff = 0;
1314 drv->maxcyl = iorq->xd->pcyl - 1;
1315 drv->maxhead = iorq->xd->nhead - 1;
1316 break;
1317 }
1318 case XDFUN_FMT:{
1319 struct xd_iopb_format *form =
1320 (struct xd_iopb_format *) iopb;
1321 if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1322 form->interleave_ipl = (XDC_INTERLEAVE << 3);
1323 else
1324 form->interleave_ipl = (XDC_INTERLEAVE << 3) |
1325 iorq->xdc->ipl;
1326 form->field1 = XDFM_FIELD1;
1327 form->field2 = XDFM_FIELD2;
1328 form->field3 = XDFM_FIELD3;
1329 form->field4 = XDFM_FIELD4;
1330 form->bytespersec = XDFM_BPS;
1331 form->field6 = XDFM_FIELD6;
1332 form->field7 = XDFM_FIELD7;
1333 break;
1334 }
1335 }
1336 } else {
1337
1338 /* normal IOPB case (harmless to RDP command) */
1339
1340 iopb->lll = 0;
1341 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1342 ? 0
1343 : iorq->xdc->ipl;
1344 iopb->sectcnt = iorq->sectcnt;
1345 block = iorq->blockno;
1346 if (iorq->xd == NULL || block == 0) {
1347 iopb->sectno = iopb->headno = iopb->cylno = 0;
1348 } else {
1349 iopb->sectno = block % iorq->xd->nsect;
1350 block = block / iorq->xd->nsect;
1351 iopb->headno = block % iorq->xd->nhead;
1352 block = block / iorq->xd->nhead;
1353 iopb->cylno = block;
1354 }
1355 dp = (u_long) iorq->dbuf;
1356 dp = iopb->daddr = (iorq->dbuf == NULL) ? 0 : dp;
1357 iopb->addrmod = ((dp + (XDFM_BPS * iorq->sectcnt)) > 0x1000000)
1358 ? XDC_ADDRMOD32
1359 : XDC_ADDRMOD;
1360 }
1361 }
1362
1363 /*
1364 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno.
1365 * If you've already got an IORQ, you can call submit directly (currently
1366 * there is no need to do this). NORM requests are handled seperately.
1367 */
1368 int
1369 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode)
1370 struct xdc_softc *xdcsc;
1371 int cmd, subfn, unit, block, scnt;
1372 char *dptr;
1373 int fullmode;
1374
1375 {
1376 int rqno, submode = XD_STATE(fullmode), retry;
1377 struct xd_iorq *iorq;
1378 struct xd_iopb *iopb;
1379
1380 /* get iorq/iopb */
1381 switch (submode) {
1382 case XD_SUB_POLL:
1383 while (xdcsc->nfree == 0) {
1384 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK)
1385 return (XD_ERR_FAIL);
1386 }
1387 break;
1388 case XD_SUB_WAIT:
1389 retry = 1;
1390 while (retry) {
1391 while (xdcsc->nfree == 0) {
1392 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0))
1393 return (XD_ERR_FAIL);
1394 }
1395 while (xdcsc->ndone > XDC_SUBWAITLIM) {
1396 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0))
1397 return (XD_ERR_FAIL);
1398 }
1399 if (xdcsc->nfree)
1400 retry = 0; /* got it */
1401 }
1402 break;
1403 default:
1404 return (XD_ERR_FAIL); /* illegal */
1405 }
1406 if (xdcsc->nfree == 0)
1407 panic("xdcmd nfree");
1408 rqno = XDC_RQALLOC(xdcsc);
1409 iorq = &xdcsc->reqs[rqno];
1410 iopb = iorq->iopb;
1411
1412
1413 /* init iorq/iopb */
1414
1415 xdc_rqinit(iorq, xdcsc,
1416 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit],
1417 fullmode, block, scnt, dptr, NULL);
1418
1419 /* load IOPB from iorq */
1420
1421 xdc_rqtopb(iorq, iopb, cmd, subfn);
1422
1423 /* submit it for processing */
1424
1425 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */
1426
1427 return (rqno);
1428 }
1429 /*
1430 * xdc_startbuf
1431 * start a buffer running, assumes nfree > 0
1432 */
1433
1434 int
1435 xdc_startbuf(xdcsc, xdsc, bp)
1436 struct xdc_softc *xdcsc;
1437 struct xd_softc *xdsc;
1438 struct buf *bp;
1439
1440 {
1441 int rqno, partno;
1442 struct xd_iorq *iorq;
1443 struct xd_iopb *iopb;
1444 struct buf *wq;
1445 u_long block;
1446 /* caddr_t dbuf;*/
1447 int error;
1448
1449 if (!xdcsc->nfree)
1450 panic("xdc_startbuf free");
1451 rqno = XDC_RQALLOC(xdcsc);
1452 iorq = &xdcsc->reqs[rqno];
1453 iopb = iorq->iopb;
1454
1455 /* get buf */
1456
1457 if (bp == NULL) {
1458 bp = xdcsc->sc_wq.b_actf;
1459 if (!bp)
1460 panic("xdc_startbuf bp");
1461 wq = bp->b_actf;
1462 if (wq)
1463 wq->b_actb = bp->b_actb;
1464 else
1465 xdcsc->sc_wq.b_actb = bp->b_actb;
1466 *bp->b_actb = wq;
1467 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)];
1468 }
1469 partno = DISKPART(bp->b_dev);
1470 #ifdef XDC_DEBUG
1471 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname,
1472 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1473 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n",
1474 bp->b_bcount, bp->b_data);
1475 #endif
1476
1477 /*
1478 * load request. we have to calculate the correct block number based
1479 * on partition info.
1480 *
1481 * note that iorq points to the buffer as mapped into DVMA space,
1482 * where as the bp->b_data points to its non-DVMA mapping.
1483 */
1484
1485 block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
1486 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset);
1487
1488 error = bus_dmamap_load(xdcsc->dmatag, iorq->dmamap,
1489 bp->b_data, bp->b_bcount, 0, BUS_DMA_NOWAIT);
1490 if (error != 0) {
1491 printf("%s: warning: cannot load DMA map\n",
1492 xdcsc->sc_dev.dv_xname);
1493 XDC_FREE(xdcsc, rqno);
1494 wq = &xdcsc->sc_wq; /* put at end of queue */
1495 bp->b_actf = 0;
1496 bp->b_actb = wq->b_actb;
1497 *wq->b_actb = bp;
1498 wq->b_actb = &bp->b_actf;
1499 return (XD_ERR_FAIL); /* XXX: need some sort of
1500 * call-back scheme here? */
1501 }
1502 bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
1503 iorq->dmamap->dm_mapsize, (bp->b_flags & B_READ)
1504 ? BUS_DMASYNC_PREREAD
1505 : BUS_DMASYNC_PREWRITE);
1506
1507 /* init iorq and load iopb from it */
1508 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block,
1509 bp->b_bcount / XDFM_BPS,
1510 (caddr_t)iorq->dmamap->dm_segs[0].ds_addr,
1511 bp);
1512
1513 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0);
1514
1515 /* Instrumentation. */
1516 disk_busy(&xdsc->sc_dk);
1517
1518 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
1519
1520 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM);
1521 return (XD_ERR_AOK);
1522 }
1523
1524
1525 /*
1526 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK
1527 * if ok. if it fail returns an error code. type is XD_SUB_*.
1528 *
1529 * note: caller frees iorq in all cases except NORM
1530 *
1531 * return value:
1532 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
1533 * WAIT: XD_AOK (success), <error-code> (failed)
1534 * POLL: <same as WAIT>
1535 * NOQ : <same as NORM>
1536 *
1537 * there are three sources for i/o requests:
1538 * [1] xdstrategy: normal block I/O, using "struct buf" system.
1539 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1540 * [3] open/ioctl: these are I/O requests done in the context of a process,
1541 * and the process should block until they are done.
1542 *
1543 * software state is stored in the iorq structure. each iorq has an
1544 * iopb structure. the hardware understands the iopb structure.
1545 * every command must go through an iopb. a 7053 can only handle
1546 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in
1547 * DVMA space at boot up time. what happens if we run out of iopb's?
1548 * for i/o type [1], the buffers are queued at the "buff" layer and
1549 * picked up later by the interrupt routine. for case [2] the
1550 * programmed i/o driver is called with a special flag that says
1551 * return when one iopb is free. for case [3] the process can sleep
1552 * on the iorq free list until some iopbs are avaliable.
1553 */
1554
1555
1556 int
1557 xdc_submit_iorq(xdcsc, iorqno, type)
1558 struct xdc_softc *xdcsc;
1559 int iorqno;
1560 int type;
1561
1562 {
1563 u_long iopbaddr;
1564 struct xd_iorq *iorq = &xdcsc->reqs[iorqno];
1565
1566 #ifdef XDC_DEBUG
1567 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname,
1568 iorqno, type);
1569 #endif
1570
1571 /* first check and see if controller is busy */
1572 if (xdcsc->xdc->xdc_csr & XDC_ADDING) {
1573 #ifdef XDC_DEBUG
1574 printf("xdc_submit_iorq: XDC not ready (ADDING)\n");
1575 #endif
1576 if (type == XD_SUB_NOQ)
1577 return (XD_ERR_FAIL); /* failed */
1578 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */
1579 switch (type) {
1580 case XD_SUB_NORM:
1581 return XD_ERR_AOK; /* success */
1582 case XD_SUB_WAIT:
1583 while (iorq->iopb->done == 0) {
1584 sleep(iorq, PRIBIO);
1585 }
1586 return (iorq->errno);
1587 case XD_SUB_POLL:
1588 return (xdc_piodriver(xdcsc, iorqno, 0));
1589 default:
1590 panic("xdc_submit_iorq adding");
1591 }
1592 }
1593 #ifdef XDC_DEBUG
1594 {
1595 u_char *rio = (u_char *) iorq->iopb;
1596 int sz = sizeof(struct xd_iopb), lcv;
1597 printf("%s: aio #%d [",
1598 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs);
1599 for (lcv = 0; lcv < sz; lcv++)
1600 printf(" %02x", rio[lcv]);
1601 printf("]\n");
1602 }
1603 #endif /* XDC_DEBUG */
1604
1605 /* controller not busy, start command */
1606 iopbaddr = (u_long) iorq->dmaiopb;
1607 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */
1608 xdcsc->nrun++;
1609 /* command now running, wrap it up */
1610 switch (type) {
1611 case XD_SUB_NORM:
1612 case XD_SUB_NOQ:
1613 return (XD_ERR_AOK); /* success */
1614 case XD_SUB_WAIT:
1615 while (iorq->iopb->done == 0) {
1616 sleep(iorq, PRIBIO);
1617 }
1618 return (iorq->errno);
1619 case XD_SUB_POLL:
1620 return (xdc_piodriver(xdcsc, iorqno, 0));
1621 default:
1622 panic("xdc_submit_iorq wrap up");
1623 }
1624 panic("xdc_submit_iorq");
1625 return 0; /* not reached */
1626 }
1627
1628
1629 /*
1630 * xdc_piodriver
1631 *
1632 * programmed i/o driver. this function takes over the computer
1633 * and drains off all i/o requests. it returns the status of the iorq
1634 * the caller is interesting in. if freeone is true, then it returns
1635 * when there is a free iorq.
1636 */
1637 int
1638 xdc_piodriver(xdcsc, iorqno, freeone)
1639 struct xdc_softc *xdcsc;
1640 int iorqno;
1641 int freeone;
1642
1643 {
1644 int nreset = 0;
1645 int retval = 0;
1646 u_long count;
1647 struct xdc *xdc = xdcsc->xdc;
1648 #ifdef XDC_DEBUG
1649 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname,
1650 iorqno, freeone);
1651 #endif
1652
1653 while (xdcsc->nwait || xdcsc->nrun) {
1654 #ifdef XDC_DEBUG
1655 printf("xdc_piodriver: wait=%d, run=%d\n",
1656 xdcsc->nwait, xdcsc->nrun);
1657 #endif
1658 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR));
1659 #ifdef XDC_DEBUG
1660 printf("xdc_piodriver: done wait with count = %d\n", count);
1661 #endif
1662 /* we expect some progress soon */
1663 if (count == 0 && nreset >= 2) {
1664 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0);
1665 #ifdef XDC_DEBUG
1666 printf("xdc_piodriver: timeout\n");
1667 #endif
1668 return (XD_ERR_FAIL);
1669 }
1670 if (count == 0) {
1671 if (xdc_reset(xdcsc, 0,
1672 (nreset++ == 0) ? XD_RSET_NONE : iorqno,
1673 XD_ERR_FAIL,
1674 0) == XD_ERR_FAIL)
1675 return (XD_ERR_FAIL); /* flushes all but POLL
1676 * requests, resets */
1677 continue;
1678 }
1679 xdc_remove_iorq(xdcsc); /* could resubmit request */
1680 if (freeone) {
1681 if (xdcsc->nrun < XDC_MAXIOPB) {
1682 #ifdef XDC_DEBUG
1683 printf("xdc_piodriver: done: one free\n");
1684 #endif
1685 return (XD_ERR_AOK);
1686 }
1687 continue; /* don't xdc_start */
1688 }
1689 xdc_start(xdcsc, XDC_MAXIOPB);
1690 }
1691
1692 /* get return value */
1693
1694 retval = xdcsc->reqs[iorqno].errno;
1695
1696 #ifdef XDC_DEBUG
1697 printf("xdc_piodriver: done, retval = 0x%x (%s)\n",
1698 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno));
1699 #endif
1700
1701 /* now that we've drained everything, start up any bufs that have
1702 * queued */
1703
1704 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1705 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1706 break;
1707
1708 return (retval);
1709 }
1710
1711 /*
1712 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset.
1713 * we steal iopb[0] for this, but we put it back when we are done.
1714 */
1715 void
1716 xdc_xdreset(xdcsc, xdsc)
1717 struct xdc_softc *xdcsc;
1718 struct xd_softc *xdsc;
1719
1720 {
1721 struct xd_iopb tmpiopb;
1722 u_long addr;
1723 int del;
1724 bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb));
1725 bzero(xdcsc->iopbase, sizeof(tmpiopb));
1726 xdcsc->iopbase->comm = XDCMD_RST;
1727 xdcsc->iopbase->unit = xdsc->xd_drive;
1728 addr = (u_long) xdcsc->dvmaiopb;
1729 XDC_GO(xdcsc->xdc, addr); /* go! */
1730 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB);
1731 if (del <= 0 || xdcsc->iopbase->errs) {
1732 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname,
1733 xdc_e2str(xdcsc->iopbase->errno));
1734 xdcsc->xdc->xdc_csr = XDC_RESET;
1735 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1736 if (del <= 0)
1737 panic("xdc_reset");
1738 } else {
1739 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */
1740 }
1741 bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb));
1742 }
1743
1744
1745 /*
1746 * xdc_reset: reset everything: requests are marked as errors except
1747 * a polled request (which is resubmitted)
1748 */
1749 int
1750 xdc_reset(xdcsc, quiet, blastmode, error, xdsc)
1751 struct xdc_softc *xdcsc;
1752 int quiet, blastmode, error;
1753 struct xd_softc *xdsc;
1754
1755 {
1756 int del = 0, lcv, retval = XD_ERR_AOK;
1757 int oldfree = xdcsc->nfree;
1758
1759 /* soft reset hardware */
1760
1761 if (!quiet)
1762 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname);
1763 xdcsc->xdc->xdc_csr = XDC_RESET;
1764 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1765 if (del <= 0) {
1766 blastmode = XD_RSET_ALL; /* dead, flush all requests */
1767 retval = XD_ERR_FAIL;
1768 }
1769 if (xdsc)
1770 xdc_xdreset(xdcsc, xdsc);
1771
1772 /* fix queues based on "blast-mode" */
1773
1774 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
1775 register struct xd_iorq *iorq = &xdcsc->reqs[lcv];
1776
1777 if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
1778 XD_STATE(iorq->mode) != XD_SUB_WAIT &&
1779 XD_STATE(iorq->mode) != XD_SUB_NORM)
1780 /* is it active? */
1781 continue;
1782
1783 xdcsc->nrun--; /* it isn't running any more */
1784 if (blastmode == XD_RSET_ALL || blastmode != lcv) {
1785 /* failed */
1786 iorq->errno = error;
1787 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
1788 switch (XD_STATE(xdcsc->reqs[lcv].mode)) {
1789 case XD_SUB_NORM:
1790 iorq->buf->b_error = EIO;
1791 iorq->buf->b_flags |= B_ERROR;
1792 iorq->buf->b_resid =
1793 iorq->sectcnt * XDFM_BPS;
1794
1795 bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
1796 iorq->dmamap->dm_mapsize,
1797 (iorq->buf->b_flags & B_READ)
1798 ? BUS_DMASYNC_POSTREAD
1799 : BUS_DMASYNC_POSTWRITE);
1800
1801 bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap);
1802
1803 disk_unbusy(&xdcsc->reqs[lcv].xd->sc_dk,
1804 (xdcsc->reqs[lcv].buf->b_bcount -
1805 xdcsc->reqs[lcv].buf->b_resid));
1806 biodone(iorq->buf);
1807 XDC_FREE(xdcsc, lcv); /* add to free list */
1808 break;
1809 case XD_SUB_WAIT:
1810 wakeup(iorq);
1811 case XD_SUB_POLL:
1812 xdcsc->ndone++;
1813 iorq->mode =
1814 XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1815 break;
1816 }
1817
1818 } else {
1819
1820 /* resubmit, put at front of wait queue */
1821 XDC_HWAIT(xdcsc, lcv);
1822 }
1823 }
1824
1825 /*
1826 * now, if stuff is waiting, start it.
1827 * since we just reset it should go
1828 */
1829 xdc_start(xdcsc, XDC_MAXIOPB);
1830
1831 /* ok, we did it */
1832 if (oldfree == 0 && xdcsc->nfree)
1833 wakeup(&xdcsc->nfree);
1834
1835 #ifdef XDC_DIAG
1836 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone;
1837 if (del != XDC_MAXIOPB)
1838 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
1839 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB);
1840 else
1841 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
1842 printf("%s: diag: lots of done jobs (%d)\n",
1843 xdcsc->sc_dev.dv_xname, xdcsc->ndone);
1844 #endif
1845 printf("RESET DONE\n");
1846 return (retval);
1847 }
1848 /*
1849 * xdc_start: start all waiting buffers
1850 */
1851
1852 void
1853 xdc_start(xdcsc, maxio)
1854 struct xdc_softc *xdcsc;
1855 int maxio;
1856
1857 {
1858 int rqno;
1859 while (maxio && xdcsc->nwait &&
1860 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) {
1861 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out"
1862 * param */
1863 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK)
1864 panic("xdc_start"); /* should never happen */
1865 maxio--;
1866 }
1867 }
1868 /*
1869 * xdc_remove_iorq: remove "done" IOPB's.
1870 */
1871
1872 int
1873 xdc_remove_iorq(xdcsc)
1874 struct xdc_softc *xdcsc;
1875
1876 {
1877 int errno, rqno, comm, errs;
1878 struct xdc *xdc = xdcsc->xdc;
1879 struct xd_iopb *iopb;
1880 struct xd_iorq *iorq;
1881 struct buf *bp;
1882
1883 if (xdc->xdc_csr & XDC_F_ERROR) {
1884 /*
1885 * FATAL ERROR: should never happen under normal use. This
1886 * error is so bad, you can't even tell which IOPB is bad, so
1887 * we dump them all.
1888 */
1889 errno = xdc->xdc_f_err;
1890 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname,
1891 errno, xdc_e2str(errno));
1892 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) {
1893 printf("%s: soft reset failed!\n",
1894 xdcsc->sc_dev.dv_xname);
1895 panic("xdc_remove_iorq: controller DEAD");
1896 }
1897 return (XD_ERR_AOK);
1898 }
1899
1900 /*
1901 * get iopb that is done
1902 *
1903 * hmm... I used to read the address of the done IOPB off the VME
1904 * registers and calculate the rqno directly from that. that worked
1905 * until I started putting a load on the controller. when loaded, i
1906 * would get interrupts but neither the REMIOPB or F_ERROR bits would
1907 * be set, even after DELAY'ing a while! later on the timeout
1908 * routine would detect IOPBs that were marked "running" but their
1909 * "done" bit was set. rather than dealing directly with this
1910 * problem, it is just easier to look at all running IOPB's for the
1911 * done bit.
1912 */
1913 if (xdc->xdc_csr & XDC_REMIOPB) {
1914 xdc->xdc_csr = XDC_CLRRIO;
1915 }
1916
1917 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) {
1918 iorq = &xdcsc->reqs[rqno];
1919 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE)
1920 continue; /* free, or done */
1921 iopb = &xdcsc->iopbase[rqno];
1922 if (iopb->done == 0)
1923 continue; /* not done yet */
1924
1925 #ifdef XDC_DEBUG
1926 {
1927 u_char *rio = (u_char *) iopb;
1928 int sz = sizeof(struct xd_iopb), lcv;
1929 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno);
1930 for (lcv = 0; lcv < sz; lcv++)
1931 printf(" %02x", rio[lcv]);
1932 printf("]\n");
1933 }
1934 #endif /* XDC_DEBUG */
1935
1936 xdcsc->nrun--;
1937
1938 comm = iopb->comm;
1939 errs = iopb->errs;
1940
1941 if (errs)
1942 iorq->errno = iopb->errno;
1943 else
1944 iorq->errno = 0;
1945
1946 /* handle non-fatal errors */
1947
1948 if (errs &&
1949 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK)
1950 continue; /* AOK: we resubmitted it */
1951
1952
1953 /* this iorq is now done (hasn't been restarted or anything) */
1954
1955 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
1956 xdc_perror(iorq, iopb, 0);
1957
1958 /* now, if read/write check to make sure we got all the data
1959 * we needed. (this may not be the case if we got an error in
1960 * the middle of a multisector request). */
1961
1962 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 &&
1963 (comm == XDCMD_RD || comm == XDCMD_WR)) {
1964 /* we just successfully processed a bad144 sector
1965 * note: if we are in bad 144 mode, the pointers have
1966 * been advanced already (see above) and are pointing
1967 * at the bad144 sector. to exit bad144 mode, we
1968 * must advance the pointers 1 sector and issue a new
1969 * request if there are still sectors left to process
1970 *
1971 */
1972 XDC_ADVANCE(iorq, 1); /* advance 1 sector */
1973
1974 /* exit b144 mode */
1975 iorq->mode = iorq->mode & (~XD_MODE_B144);
1976
1977 if (iorq->sectcnt) { /* more to go! */
1978 iorq->lasterror = iorq->errno = iopb->errno = 0;
1979 iopb->errs = iopb->done = 0;
1980 iorq->tries = 0;
1981 iopb->sectcnt = iorq->sectcnt;
1982 iopb->cylno = iorq->blockno /
1983 iorq->xd->sectpercyl;
1984 iopb->headno =
1985 (iorq->blockno / iorq->xd->nhead) %
1986 iorq->xd->nhead;
1987 iopb->sectno = iorq->blockno % XDFM_BPS;
1988 iopb->daddr = (u_long) iorq->dbuf;
1989 XDC_HWAIT(xdcsc, rqno);
1990 xdc_start(xdcsc, 1); /* resubmit */
1991 continue;
1992 }
1993 }
1994 /* final cleanup, totally done with this request */
1995
1996 switch (XD_STATE(iorq->mode)) {
1997 case XD_SUB_NORM:
1998 bp = iorq->buf;
1999 if (errs) {
2000 bp->b_error = EIO;
2001 bp->b_flags |= B_ERROR;
2002 bp->b_resid = iorq->sectcnt * XDFM_BPS;
2003 } else {
2004 bp->b_resid = 0; /* done */
2005 }
2006 bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
2007 iorq->dmamap->dm_mapsize,
2008 (bp->b_flags & B_READ)
2009 ? BUS_DMASYNC_POSTREAD
2010 : BUS_DMASYNC_POSTWRITE);
2011 bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap);
2012
2013 disk_unbusy(&iorq->xd->sc_dk,
2014 (bp->b_bcount - bp->b_resid));
2015 XDC_FREE(xdcsc, rqno);
2016 biodone(bp);
2017 break;
2018 case XD_SUB_WAIT:
2019 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
2020 xdcsc->ndone++;
2021 wakeup(iorq);
2022 break;
2023 case XD_SUB_POLL:
2024 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
2025 xdcsc->ndone++;
2026 break;
2027 }
2028 }
2029
2030 return (XD_ERR_AOK);
2031 }
2032
2033 /*
2034 * xdc_perror: print error.
2035 * - if still_trying is true: we got an error, retried and got a
2036 * different error. in that case lasterror is the old error,
2037 * and errno is the new one.
2038 * - if still_trying is not true, then if we ever had an error it
2039 * is in lasterror. also, if iorq->errno == 0, then we recovered
2040 * from that error (otherwise iorq->errno == iorq->lasterror).
2041 */
2042 void
2043 xdc_perror(iorq, iopb, still_trying)
2044 struct xd_iorq *iorq;
2045 struct xd_iopb *iopb;
2046 int still_trying;
2047
2048 {
2049
2050 int error = iorq->lasterror;
2051
2052 printf("%s", (iorq->xd) ? iorq->xd->sc_dev.dv_xname
2053 : iorq->xdc->sc_dev.dv_xname);
2054 if (iorq->buf)
2055 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev));
2056 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR)
2057 printf("%s %d/%d/%d: ",
2058 (iopb->comm == XDCMD_RD) ? "read" : "write",
2059 iopb->cylno, iopb->headno, iopb->sectno);
2060 printf("%s", xdc_e2str(error));
2061
2062 if (still_trying)
2063 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno));
2064 else
2065 if (iorq->errno == 0)
2066 printf(" [recovered in %d tries]", iorq->tries);
2067
2068 printf("\n");
2069 }
2070
2071 /*
2072 * xdc_error: non-fatal error encountered... recover.
2073 * return AOK if resubmitted, return FAIL if this iopb is done
2074 */
2075 int
2076 xdc_error(xdcsc, iorq, iopb, rqno, comm)
2077 struct xdc_softc *xdcsc;
2078 struct xd_iorq *iorq;
2079 struct xd_iopb *iopb;
2080 int rqno, comm;
2081
2082 {
2083 int errno = iorq->errno;
2084 int erract = errno & XD_ERA_MASK;
2085 int oldmode, advance, i;
2086
2087 if (erract == XD_ERA_RSET) { /* some errors require a reset */
2088 oldmode = iorq->mode;
2089 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode);
2090 xdcsc->ndone++;
2091 /* make xdc_start ignore us */
2092 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd);
2093 iorq->mode = oldmode;
2094 xdcsc->ndone--;
2095 }
2096 /* check for read/write to a sector in bad144 table if bad: redirect
2097 * request to bad144 area */
2098
2099 if ((comm == XDCMD_RD || comm == XDCMD_WR) &&
2100 (iorq->mode & XD_MODE_B144) == 0) {
2101 advance = iorq->sectcnt - iopb->sectcnt;
2102 XDC_ADVANCE(iorq, advance);
2103 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl,
2104 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead,
2105 iorq->blockno % iorq->xd->nsect)) != -1) {
2106 iorq->mode |= XD_MODE_B144; /* enter bad144 mode &
2107 * redirect */
2108 iopb->errno = iopb->done = iopb->errs = 0;
2109 iopb->sectcnt = 1;
2110 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2;
2111 /* second to last acyl */
2112 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144
2113 * standard */
2114 iopb->headno = i / iorq->xd->nhead;
2115 iopb->sectno = i % iorq->xd->nhead;
2116 XDC_HWAIT(xdcsc, rqno);
2117 xdc_start(xdcsc, 1); /* resubmit */
2118 return (XD_ERR_AOK); /* recovered! */
2119 }
2120 }
2121
2122 /*
2123 * it isn't a bad144 sector, must be real error! see if we can retry
2124 * it?
2125 */
2126 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
2127 xdc_perror(iorq, iopb, 1); /* inform of error state
2128 * change */
2129 iorq->lasterror = errno;
2130
2131 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD)
2132 && iorq->tries < XDC_MAXTRIES) { /* retry? */
2133 iorq->tries++;
2134 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2135 XDC_HWAIT(xdcsc, rqno);
2136 xdc_start(xdcsc, 1); /* restart */
2137 return (XD_ERR_AOK); /* recovered! */
2138 }
2139
2140 /* failed to recover from this error */
2141 return (XD_ERR_FAIL);
2142 }
2143
2144 /*
2145 * xdc_tick: make sure xd is still alive and ticking (err, kicking).
2146 */
2147 void
2148 xdc_tick(arg)
2149 void *arg;
2150
2151 {
2152 struct xdc_softc *xdcsc = arg;
2153 int lcv, s, reset = 0;
2154 #ifdef XDC_DIAG
2155 int wait, run, free, done, whd = 0;
2156 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB];
2157 s = splbio();
2158 wait = xdcsc->nwait;
2159 run = xdcsc->nrun;
2160 free = xdcsc->nfree;
2161 done = xdcsc->ndone;
2162 bcopy(xdcsc->waitq, wqc, sizeof(wqc));
2163 bcopy(xdcsc->freereq, fqc, sizeof(fqc));
2164 splx(s);
2165 if (wait + run + free + done != XDC_MAXIOPB) {
2166 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
2167 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB);
2168 bzero(mark, sizeof(mark));
2169 printf("FREE: ");
2170 for (lcv = free; lcv > 0; lcv--) {
2171 printf("%d ", fqc[lcv - 1]);
2172 mark[fqc[lcv - 1]] = 1;
2173 }
2174 printf("\nWAIT: ");
2175 lcv = wait;
2176 while (lcv > 0) {
2177 printf("%d ", wqc[whd]);
2178 mark[wqc[whd]] = 1;
2179 whd = (whd + 1) % XDC_MAXIOPB;
2180 lcv--;
2181 }
2182 printf("\n");
2183 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2184 if (mark[lcv] == 0)
2185 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %p\n",
2186 lcv, xdcsc->reqs[lcv].mode,
2187 xdcsc->iopbase[lcv].done,
2188 xdcsc->iopbase[lcv].errs,
2189 xdcsc->iopbase[lcv].errno,
2190 xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf);
2191 }
2192 } else
2193 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM)
2194 printf("%s: diag: lots of done jobs (%d)\n",
2195 xdcsc->sc_dev.dv_xname, done);
2196
2197 #endif
2198 #ifdef XDC_DEBUG
2199 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
2200 xdcsc->sc_dev.dv_xname,
2201 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun,
2202 xdcsc->ndone);
2203 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2204 if (xdcsc->reqs[lcv].mode)
2205 printf("running %d: mode %d done %d errs %d errno 0x%x\n",
2206 lcv,
2207 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done,
2208 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno);
2209 }
2210 #endif
2211
2212 /* reduce ttl for each request if one goes to zero, reset xdc */
2213 s = splbio();
2214 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2215 if (xdcsc->reqs[lcv].mode == 0 ||
2216 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE)
2217 continue;
2218 xdcsc->reqs[lcv].ttl--;
2219 if (xdcsc->reqs[lcv].ttl == 0)
2220 reset = 1;
2221 }
2222 if (reset) {
2223 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname);
2224 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL);
2225 }
2226 splx(s);
2227
2228 /* until next time */
2229
2230 timeout(xdc_tick, xdcsc, XDC_TICKCNT);
2231 }
2232
2233 /*
2234 * xdc_ioctlcmd: this function provides a user level interface to the
2235 * controller via ioctl. this allows "format" programs to be written
2236 * in user code, and is also useful for some debugging. we return
2237 * an error code. called at user priority.
2238 */
2239 int
2240 xdc_ioctlcmd(xd, dev, xio)
2241 struct xd_softc *xd;
2242 dev_t dev;
2243 struct xd_iocmd *xio;
2244
2245 {
2246 int s, rqno, dummy;
2247 caddr_t dvmabuf = NULL, buf = NULL;
2248 struct xdc_softc *xdcsc;
2249 int rseg, error;
2250 bus_dma_segment_t seg;
2251
2252 /* check sanity of requested command */
2253
2254 switch (xio->cmd) {
2255
2256 case XDCMD_NOP: /* no op: everything should be zero */
2257 if (xio->subfn || xio->dptr || xio->dlen ||
2258 xio->block || xio->sectcnt)
2259 return (EINVAL);
2260 break;
2261
2262 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */
2263 case XDCMD_WR:
2264 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2265 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL)
2266 return (EINVAL);
2267 break;
2268
2269 case XDCMD_SK: /* seek: doesn't seem useful to export this */
2270 return (EINVAL);
2271
2272 case XDCMD_WRP: /* write parameters */
2273 return (EINVAL);/* not useful, except maybe drive
2274 * parameters... but drive parameters should
2275 * go via disklabel changes */
2276
2277 case XDCMD_RDP: /* read parameters */
2278 if (xio->subfn != XDFUN_DRV ||
2279 xio->dlen || xio->block || xio->dptr)
2280 return (EINVAL); /* allow read drive params to
2281 * get hw_spt */
2282 xio->sectcnt = xd->hw_spt; /* we already know the answer */
2283 return (0);
2284 break;
2285
2286 case XDCMD_XRD: /* extended read/write */
2287 case XDCMD_XWR:
2288
2289 switch (xio->subfn) {
2290
2291 case XDFUN_THD:/* track headers */
2292 if (xio->sectcnt != xd->hw_spt ||
2293 (xio->block % xd->nsect) != 0 ||
2294 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt ||
2295 xio->dptr == NULL)
2296 return (EINVAL);
2297 xio->sectcnt = 0;
2298 break;
2299
2300 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */
2301 if (xio->cmd == XDCMD_XRD)
2302 return (EINVAL); /* no XDFUN_VFY */
2303 if (xio->sectcnt || xio->dlen ||
2304 (xio->block % xd->nsect) != 0 || xio->dptr)
2305 return (EINVAL);
2306 break;
2307
2308 case XDFUN_HDR:/* header, header verify, data, data ECC */
2309 return (EINVAL); /* not yet */
2310
2311 case XDFUN_DM: /* defect map */
2312 case XDFUN_DMX:/* defect map (alternate location) */
2313 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ ||
2314 (xio->block % xd->nsect) != 0 || xio->dptr == NULL)
2315 return (EINVAL);
2316 break;
2317
2318 default:
2319 return (EINVAL);
2320 }
2321 break;
2322
2323 case XDCMD_TST: /* diagnostics */
2324 return (EINVAL);
2325
2326 default:
2327 return (EINVAL);/* ??? */
2328 }
2329
2330 xdcsc = xd->parent;
2331
2332 /* create DVMA buffer for request if needed */
2333 if (xio->dlen) {
2334 error = bus_dmamem_alloc(xdcsc->dmatag, xio->dlen, NBPG, 0,
2335 &seg, 1, &rseg, BUS_DMA_WAITOK);
2336 if (error)
2337 return (error);
2338
2339 dvmabuf = (caddr_t)seg.ds_addr;
2340
2341 error = bus_dmamem_map(xdcsc->dmatag, &seg, rseg, xio->dlen,
2342 &buf,
2343 BUS_DMA_WAITOK|BUS_DMA_COHERENT);
2344 if (error) {
2345 bus_dmamem_free(xdcsc->dmatag, &seg, rseg);
2346 return (error);
2347 }
2348 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
2349 if ((error = copyin(xio->dptr, buf, xio->dlen)) != 0) {
2350 bus_dmamem_unmap(xdcsc->dmatag, buf, xio->dlen);
2351 bus_dmamem_free(xdcsc->dmatag, &seg, rseg);
2352 return (error);
2353 }
2354 }
2355 }
2356
2357 /* do it! */
2358
2359 error = 0;
2360 s = splbio();
2361 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block,
2362 xio->sectcnt, dvmabuf, XD_SUB_WAIT);
2363 if (rqno == XD_ERR_FAIL) {
2364 error = EIO;
2365 goto done;
2366 }
2367 xio->errno = xdcsc->reqs[rqno].errno;
2368 xio->tries = xdcsc->reqs[rqno].tries;
2369 XDC_DONE(xdcsc, rqno, dummy);
2370
2371 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
2372 error = copyout(buf, xio->dptr, xio->dlen);
2373
2374 done:
2375 splx(s);
2376 if (dvmabuf) {
2377 bus_dmamem_unmap(xdcsc->dmatag, buf, xio->dlen);
2378 bus_dmamem_free(xdcsc->dmatag, &seg, rseg);
2379 }
2380 return (error);
2381 }
2382
2383 /*
2384 * xdc_e2str: convert error code number into an error string
2385 */
2386 char *
2387 xdc_e2str(no)
2388 int no;
2389 {
2390 switch (no) {
2391 case XD_ERR_FAIL:
2392 return ("Software fatal error");
2393 case XD_ERR_AOK:
2394 return ("Successful completion");
2395 case XD_ERR_ICYL:
2396 return ("Illegal cylinder address");
2397 case XD_ERR_IHD:
2398 return ("Illegal head address");
2399 case XD_ERR_ISEC:
2400 return ("Illgal sector address");
2401 case XD_ERR_CZER:
2402 return ("Count zero");
2403 case XD_ERR_UIMP:
2404 return ("Unimplemented command");
2405 case XD_ERR_IF1:
2406 return ("Illegal field length 1");
2407 case XD_ERR_IF2:
2408 return ("Illegal field length 2");
2409 case XD_ERR_IF3:
2410 return ("Illegal field length 3");
2411 case XD_ERR_IF4:
2412 return ("Illegal field length 4");
2413 case XD_ERR_IF5:
2414 return ("Illegal field length 5");
2415 case XD_ERR_IF6:
2416 return ("Illegal field length 6");
2417 case XD_ERR_IF7:
2418 return ("Illegal field length 7");
2419 case XD_ERR_ISG:
2420 return ("Illegal scatter/gather length");
2421 case XD_ERR_ISPT:
2422 return ("Not enough sectors per track");
2423 case XD_ERR_ALGN:
2424 return ("Next IOPB address alignment error");
2425 case XD_ERR_SGAL:
2426 return ("Scatter/gather address alignment error");
2427 case XD_ERR_SGEC:
2428 return ("Scatter/gather with auto-ECC");
2429 case XD_ERR_SECC:
2430 return ("Soft ECC corrected");
2431 case XD_ERR_SIGN:
2432 return ("ECC ignored");
2433 case XD_ERR_ASEK:
2434 return ("Auto-seek retry recovered");
2435 case XD_ERR_RTRY:
2436 return ("Soft retry recovered");
2437 case XD_ERR_HECC:
2438 return ("Hard data ECC");
2439 case XD_ERR_NHDR:
2440 return ("Header not found");
2441 case XD_ERR_NRDY:
2442 return ("Drive not ready");
2443 case XD_ERR_TOUT:
2444 return ("Operation timeout");
2445 case XD_ERR_VTIM:
2446 return ("VMEDMA timeout");
2447 case XD_ERR_DSEQ:
2448 return ("Disk sequencer error");
2449 case XD_ERR_HDEC:
2450 return ("Header ECC error");
2451 case XD_ERR_RVFY:
2452 return ("Read verify");
2453 case XD_ERR_VFER:
2454 return ("Fatail VMEDMA error");
2455 case XD_ERR_VBUS:
2456 return ("VMEbus error");
2457 case XD_ERR_DFLT:
2458 return ("Drive faulted");
2459 case XD_ERR_HECY:
2460 return ("Header error/cyliner");
2461 case XD_ERR_HEHD:
2462 return ("Header error/head");
2463 case XD_ERR_NOCY:
2464 return ("Drive not on-cylinder");
2465 case XD_ERR_SEEK:
2466 return ("Seek error");
2467 case XD_ERR_ILSS:
2468 return ("Illegal sector size");
2469 case XD_ERR_SEC:
2470 return ("Soft ECC");
2471 case XD_ERR_WPER:
2472 return ("Write-protect error");
2473 case XD_ERR_IRAM:
2474 return ("IRAM self test failure");
2475 case XD_ERR_MT3:
2476 return ("Maintenance test 3 failure (DSKCEL RAM)");
2477 case XD_ERR_MT4:
2478 return ("Maintenance test 4 failure (header shift reg)");
2479 case XD_ERR_MT5:
2480 return ("Maintenance test 5 failure (VMEDMA regs)");
2481 case XD_ERR_MT6:
2482 return ("Maintenance test 6 failure (REGCEL chip)");
2483 case XD_ERR_MT7:
2484 return ("Maintenance test 7 failure (buffer parity)");
2485 case XD_ERR_MT8:
2486 return ("Maintenance test 8 failure (disk FIFO)");
2487 case XD_ERR_IOCK:
2488 return ("IOPB checksum miscompare");
2489 case XD_ERR_IODM:
2490 return ("IOPB DMA fatal");
2491 case XD_ERR_IOAL:
2492 return ("IOPB address alignment error");
2493 case XD_ERR_FIRM:
2494 return ("Firmware error");
2495 case XD_ERR_MMOD:
2496 return ("Illegal maintenance mode test number");
2497 case XD_ERR_ACFL:
2498 return ("ACFAIL asserted");
2499 default:
2500 return ("Unknown error");
2501 }
2502 }
2503