Home | History | Annotate | Line # | Download | only in vme
xd.c revision 1.93
      1 /*	$NetBSD: xd.c,v 1.93 2014/07/25 08:10:39 dholland Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995 Charles D. Cranor
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 /*
     29  *
     30  * x d . c   x y l o g i c s   7 5 3 / 7 0 5 3   v m e / s m d   d r i v e r
     31  *
     32  * author: Chuck Cranor <chuck@netbsd>
     33  * started: 27-Feb-95
     34  * references: [1] Xylogics Model 753 User's Manual
     35  *                 part number: 166-753-001, Revision B, May 21, 1988.
     36  *                 "Your Partner For Performance"
     37  *             [2] other NetBSD disk device drivers
     38  *
     39  * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
     40  * the time to answer some of my questions about the 753/7053.
     41  *
     42  * note: the 753 and the 7053 are programmed the same way, but are
     43  * different sizes.   the 753 is a 6U VME card, while the 7053 is a 9U
     44  * VME card (found in many VME based suns).
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.93 2014/07/25 08:10:39 dholland Exp $");
     49 
     50 #undef XDC_DEBUG		/* full debug */
     51 #define XDC_DIAG		/* extra sanity checks */
     52 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
     53 #define XDC_DIAG		/* link in with master DIAG option */
     54 #endif
     55 
     56 #include <sys/param.h>
     57 #include <sys/proc.h>
     58 #include <sys/systm.h>
     59 #include <sys/kernel.h>
     60 #include <sys/file.h>
     61 #include <sys/stat.h>
     62 #include <sys/ioctl.h>
     63 #include <sys/buf.h>
     64 #include <sys/bufq.h>
     65 #include <sys/uio.h>
     66 #include <sys/malloc.h>
     67 #include <sys/device.h>
     68 #include <sys/disklabel.h>
     69 #include <sys/disk.h>
     70 #include <sys/syslog.h>
     71 #include <sys/dkbad.h>
     72 #include <sys/conf.h>
     73 #include <sys/kauth.h>
     74 
     75 #include <sys/bus.h>
     76 #include <sys/intr.h>
     77 
     78 #if defined(__sparc__) || defined(sun3)
     79 #include <dev/sun/disklabel.h>
     80 #endif
     81 
     82 #include <dev/vme/vmereg.h>
     83 #include <dev/vme/vmevar.h>
     84 
     85 #include <dev/vme/xdreg.h>
     86 #include <dev/vme/xdvar.h>
     87 #include <dev/vme/xio.h>
     88 
     89 #include "locators.h"
     90 
     91 /*
     92  * macros
     93  */
     94 
     95 /*
     96  * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
     97  */
     98 #define XDC_TWAIT(SC, N) { \
     99 	(SC)->waitq[(SC)->waitend] = (N); \
    100 	(SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \
    101 	(SC)->nwait++; \
    102 }
    103 
    104 /*
    105  * XDC_HWAIT: add iorq "N" to head of SC's wait queue
    106  */
    107 #define XDC_HWAIT(SC, N) { \
    108 	(SC)->waithead = ((SC)->waithead == 0) ? \
    109 		(XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \
    110 	(SC)->waitq[(SC)->waithead] = (N); \
    111 	(SC)->nwait++; \
    112 }
    113 
    114 /*
    115  * XDC_GET_WAITER: gets the first request waiting on the waitq
    116  * and removes it (so it can be submitted)
    117  */
    118 #define XDC_GET_WAITER(XDCSC, RQ) { \
    119 	(RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \
    120 	(XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \
    121 	xdcsc->nwait--; \
    122 }
    123 
    124 /*
    125  * XDC_FREE: add iorq "N" to SC's free list
    126  */
    127 #define XDC_FREE(SC, N) { \
    128 	(SC)->freereq[(SC)->nfree++] = (N); \
    129 	(SC)->reqs[N].mode = 0; \
    130 	if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \
    131 }
    132 
    133 
    134 /*
    135  * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
    136  */
    137 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
    138 
    139 /*
    140  * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
    141  */
    142 #define XDC_GO(XDC, ADDR) { \
    143 	(XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \
    144 	(ADDR) = ((ADDR) >> 8); \
    145 	(XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \
    146 	(ADDR) = ((ADDR) >> 8); \
    147 	(XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \
    148 	(ADDR) = ((ADDR) >> 8); \
    149 	(XDC)->xdc_iopbaddr3 = (ADDR); \
    150 	(XDC)->xdc_iopbamod = XDC_ADDRMOD; \
    151 	(XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \
    152 }
    153 
    154 /*
    155  * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
    156  *   LCV is a counter.  If it goes to zero then we timed out.
    157  */
    158 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \
    159 	(LCV) = (TIME); \
    160 	while ((LCV) > 0) { \
    161 		if ((XDC)->xdc_csr & (BITS)) break; \
    162 		(LCV) = (LCV) - 1; \
    163 		DELAY(1); \
    164 	} \
    165 }
    166 
    167 /*
    168  * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
    169  */
    170 #define XDC_DONE(SC,RQ,ER) { \
    171 	if ((RQ) == XD_ERR_FAIL) { \
    172 		(ER) = (RQ); \
    173 	} else { \
    174 		if ((SC)->ndone-- == XDC_SUBWAITLIM) \
    175 		wakeup(&(SC)->ndone); \
    176 		(ER) = (SC)->reqs[RQ].errnum; \
    177 		XDC_FREE((SC), (RQ)); \
    178 	} \
    179 }
    180 
    181 /*
    182  * XDC_ADVANCE: advance iorq's pointers by a number of sectors
    183  */
    184 #define XDC_ADVANCE(IORQ, N) { \
    185 	if (N) { \
    186 		(IORQ)->sectcnt -= (N); \
    187 		(IORQ)->blockno += (N); \
    188 		(IORQ)->dbuf += ((N)*XDFM_BPS); \
    189 	} \
    190 }
    191 
    192 /*
    193  * note - addresses you can sleep on:
    194  *   [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
    195  *   [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
    196  *   [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
    197  *                                 to drop below XDC_SUBWAITLIM)
    198  *   [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
    199  */
    200 
    201 
    202 /*
    203  * function prototypes
    204  * "xdc_*" functions are internal, all others are external interfaces
    205  */
    206 
    207 extern int pil_to_vme[];	/* from obio.c */
    208 
    209 /* internals */
    210 int	xdc_cmd(struct xdc_softc *, int, int, int, int, int, char *, int);
    211 const char *xdc_e2str(int);
    212 int	xdc_error(struct xdc_softc *, struct xd_iorq *,
    213 		   struct xd_iopb *, int, int);
    214 int	xdc_ioctlcmd(struct xd_softc *, dev_t dev, struct xd_iocmd *);
    215 void	xdc_perror(struct xd_iorq *, struct xd_iopb *, int);
    216 int	xdc_piodriver(struct xdc_softc *, int, int);
    217 int	xdc_remove_iorq(struct xdc_softc *);
    218 int	xdc_reset(struct xdc_softc *, int, int, int, struct xd_softc *);
    219 inline void xdc_rqinit(struct xd_iorq *, struct xdc_softc *,
    220 			struct xd_softc *, int, u_long, int,
    221 			void *, struct buf *);
    222 void	xdc_rqtopb(struct xd_iorq *, struct xd_iopb *, int, int);
    223 void	xdc_start(struct xdc_softc *, int);
    224 int	xdc_startbuf(struct xdc_softc *, struct xd_softc *, struct buf *);
    225 int	xdc_submit_iorq(struct xdc_softc *, int, int);
    226 void	xdc_tick(void *);
    227 void	xdc_xdreset(struct xdc_softc *, struct xd_softc *);
    228 int	xd_dmamem_alloc(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
    229 			int *, bus_size_t, void **, bus_addr_t *);
    230 void	xd_dmamem_free(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
    231 			int, bus_size_t, void *);
    232 
    233 
    234 /* machine interrupt hook */
    235 int	xdcintr(void *);
    236 
    237 /* autoconf */
    238 int	xdcmatch(device_t, cfdata_t, void *);
    239 void	xdcattach(device_t, device_t, void *);
    240 int	xdmatch(device_t, cfdata_t, void *);
    241 void	xdattach(device_t, device_t, void *);
    242 static	int xdc_probe(void *, bus_space_tag_t, bus_space_handle_t);
    243 
    244 static	void xddummystrat(struct buf *);
    245 int	xdgetdisklabel(struct xd_softc *, void *);
    246 
    247 /* XXX - think about this more.. xd_machdep? */
    248 void xdc_md_setup(void);
    249 int	XDC_DELAY;
    250 
    251 #if defined(__sparc__)
    252 #include <sparc/sparc/vaddrs.h>
    253 #include <sparc/sparc/cpuvar.h>
    254 void xdc_md_setup(void)
    255 {
    256 	if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300)
    257 		XDC_DELAY = XDC_DELAY_4_300;
    258 	else
    259 		XDC_DELAY = XDC_DELAY_SPARC;
    260 }
    261 #elif defined(sun3)
    262 void xdc_md_setup(void)
    263 {
    264 	XDC_DELAY = XDC_DELAY_SUN3;
    265 }
    266 #else
    267 void xdc_md_setup(void)
    268 {
    269 	XDC_DELAY = 0;
    270 }
    271 #endif
    272 
    273 /*
    274  * cfattach's: device driver interface to autoconfig
    275  */
    276 
    277 CFATTACH_DECL_NEW(xdc, sizeof(struct xdc_softc),
    278     xdcmatch, xdcattach, NULL, NULL);
    279 
    280 CFATTACH_DECL_NEW(xd, sizeof(struct xd_softc),
    281     xdmatch, xdattach, NULL, NULL);
    282 
    283 extern struct cfdriver xd_cd;
    284 
    285 dev_type_open(xdopen);
    286 dev_type_close(xdclose);
    287 dev_type_read(xdread);
    288 dev_type_write(xdwrite);
    289 dev_type_ioctl(xdioctl);
    290 dev_type_strategy(xdstrategy);
    291 dev_type_dump(xddump);
    292 dev_type_size(xdsize);
    293 
    294 const struct bdevsw xd_bdevsw = {
    295 	.d_open = xdopen,
    296 	.d_close = xdclose,
    297 	.d_strategy = xdstrategy,
    298 	.d_ioctl = xdioctl,
    299 	.d_dump = xddump,
    300 	.d_psize = xdsize,
    301 	.d_discard = nodiscard,
    302 	.d_flag = D_DISK
    303 };
    304 
    305 const struct cdevsw xd_cdevsw = {
    306 	.d_open = xdopen,
    307 	.d_close = xdclose,
    308 	.d_read = xdread,
    309 	.d_write = xdwrite,
    310 	.d_ioctl = xdioctl,
    311 	.d_stop = nostop,
    312 	.d_tty = notty,
    313 	.d_poll = nopoll,
    314 	.d_mmap = nommap,
    315 	.d_kqfilter = nokqfilter,
    316 	.d_discard = nodiscard,
    317 	.d_flag = D_DISK
    318 };
    319 
    320 struct xdc_attach_args {	/* this is the "aux" args to xdattach */
    321 	int	driveno;	/* unit number */
    322 	int	fullmode;	/* submit mode */
    323 	int	booting;	/* are we booting or not? */
    324 };
    325 
    326 /*
    327  * dkdriver
    328  */
    329 
    330 struct dkdriver xddkdriver = {xdstrategy};
    331 
    332 /*
    333  * start: disk label fix code (XXX)
    334  */
    335 
    336 static void *xd_labeldata;
    337 
    338 static void
    339 xddummystrat(struct buf *bp)
    340 {
    341 	if (bp->b_bcount != XDFM_BPS)
    342 		panic("xddummystrat");
    343 	memcpy(bp->b_data, xd_labeldata, XDFM_BPS);
    344 	bp->b_oflags |= BO_DONE;
    345 	bp->b_cflags &= ~BC_BUSY;
    346 }
    347 
    348 int
    349 xdgetdisklabel(struct xd_softc *xd, void *b)
    350 {
    351 	const char *err;
    352 #if defined(__sparc__) || defined(sun3)
    353 	struct sun_disklabel *sdl;
    354 #endif
    355 
    356 	/* We already have the label data in `b'; setup for dummy strategy */
    357 	xd_labeldata = b;
    358 
    359 	/* Required parameter for readdisklabel() */
    360 	xd->sc_dk.dk_label->d_secsize = XDFM_BPS;
    361 
    362 	err = readdisklabel(MAKEDISKDEV(0, device_unit(xd->sc_dev), RAW_PART),
    363 			    xddummystrat,
    364 			    xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel);
    365 	if (err) {
    366 		aprint_error_dev(xd->sc_dev, "%s\n", err);
    367 		return(XD_ERR_FAIL);
    368 	}
    369 
    370 #if defined(__sparc__) || defined(sun3)
    371 	/* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
    372 	sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block;
    373 	if (sdl->sl_magic == SUN_DKMAGIC) {
    374 		xd->pcyl = sdl->sl_pcylinders;
    375 	} else
    376 #endif
    377 	{
    378 		printf("%s: WARNING: no `pcyl' in disk label.\n",
    379 			device_xname(xd->sc_dev));
    380 		xd->pcyl = xd->sc_dk.dk_label->d_ncylinders +
    381 			xd->sc_dk.dk_label->d_acylinders;
    382 		printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
    383 			device_xname(xd->sc_dev), xd->pcyl);
    384 	}
    385 
    386 	xd->ncyl = xd->sc_dk.dk_label->d_ncylinders;
    387 	xd->acyl = xd->sc_dk.dk_label->d_acylinders;
    388 	xd->nhead = xd->sc_dk.dk_label->d_ntracks;
    389 	xd->nsect = xd->sc_dk.dk_label->d_nsectors;
    390 	xd->sectpercyl = xd->nhead * xd->nsect;
    391 	xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by
    392 						  * sun->bsd */
    393 	return(XD_ERR_AOK);
    394 }
    395 
    396 /*
    397  * end: disk label fix code (XXX)
    398  */
    399 
    400 /*
    401  * Shorthand for allocating, mapping and loading a DMA buffer
    402  */
    403 int
    404 xd_dmamem_alloc(bus_dma_tag_t tag, bus_dmamap_t map, bus_dma_segment_t *seg, int *nsegp, bus_size_t len, void * *kvap, bus_addr_t *dmap)
    405 {
    406 	int nseg;
    407 	int error;
    408 
    409 	if ((error = bus_dmamem_alloc(tag, len, 0, 0,
    410 				      seg, 1, &nseg, BUS_DMA_NOWAIT)) != 0) {
    411 		return (error);
    412 	}
    413 
    414 	if ((error = bus_dmamem_map(tag, seg, nseg,
    415 				    len, kvap,
    416 				    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    417 		bus_dmamem_free(tag, seg, nseg);
    418 		return (error);
    419 	}
    420 
    421 	if ((error = bus_dmamap_load(tag, map,
    422 				     *kvap, len, NULL,
    423 				     BUS_DMA_NOWAIT)) != 0) {
    424 		bus_dmamem_unmap(tag, *kvap, len);
    425 		bus_dmamem_free(tag, seg, nseg);
    426 		return (error);
    427 	}
    428 
    429 	*dmap = map->dm_segs[0].ds_addr;
    430 	*nsegp = nseg;
    431 	return (0);
    432 }
    433 
    434 void
    435 xd_dmamem_free(bus_dma_tag_t tag, bus_dmamap_t map, bus_dma_segment_t *seg, int nseg, bus_size_t len, void * kva)
    436 {
    437 
    438 	bus_dmamap_unload(tag, map);
    439 	bus_dmamem_unmap(tag, kva, len);
    440 	bus_dmamem_free(tag, seg, nseg);
    441 }
    442 
    443 
    444 /*
    445  * a u t o c o n f i g   f u n c t i o n s
    446  */
    447 
    448 /*
    449  * xdcmatch: determine if xdc is present or not.   we do a
    450  * soft reset to detect the xdc.
    451  */
    452 
    453 int
    454 xdc_probe(void *arg, bus_space_tag_t tag, bus_space_handle_t handle)
    455 {
    456 	struct xdc *xdc = (void *)handle; /* XXX */
    457 	int del = 0;
    458 
    459 	xdc->xdc_csr = XDC_RESET;
    460 	XDC_WAIT(xdc, del, XDC_RESETUSEC, XDC_RESET);
    461 	return (del > 0 ? 0 : EIO);
    462 }
    463 
    464 int
    465 xdcmatch(device_t parent, cfdata_t cf, void *aux)
    466 {
    467 	struct vme_attach_args	*va = aux;
    468 	vme_chipset_tag_t	ct = va->va_vct;
    469 	vme_am_t		mod;
    470 	int error;
    471 
    472 	mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
    473 	if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xdc), mod))
    474 		return (0);
    475 
    476 	error = vme_probe(ct, va->r[0].offset, sizeof(struct xdc),
    477 			  mod, VME_D32, xdc_probe, 0);
    478 	vme_space_free(va->va_vct, va->r[0].offset, sizeof(struct xdc), mod);
    479 
    480 	return (error == 0);
    481 }
    482 
    483 /*
    484  * xdcattach: attach controller
    485  */
    486 void
    487 xdcattach(device_t parent, device_t self, void *aux)
    488 {
    489 	struct vme_attach_args	*va = aux;
    490 	vme_chipset_tag_t	ct = va->va_vct;
    491 	bus_space_tag_t		bt;
    492 	bus_space_handle_t	bh;
    493 	vme_intr_handle_t	ih;
    494 	vme_am_t		mod;
    495 	struct xdc_softc	*xdc = device_private(self);
    496 	struct xdc_attach_args	xa;
    497 	int			lcv, rqno, error;
    498 	struct xd_iopb_ctrl	*ctl;
    499 	bus_dma_segment_t	seg;
    500 	int			rseg;
    501 	vme_mapresc_t resc;
    502 	bus_addr_t		busaddr;
    503 
    504 	xdc->sc_dev = self;
    505 	xdc_md_setup();
    506 
    507 	/* get addressing and intr level stuff from autoconfig and load it
    508 	 * into our xdc_softc. */
    509 
    510 	xdc->dmatag = va->va_bdt;
    511 	mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
    512 
    513 	if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xdc), mod))
    514 		panic("xdc: vme alloc");
    515 
    516 	if (vme_space_map(ct, va->r[0].offset, sizeof(struct xdc),
    517 			  mod, VME_D32, 0, &bt, &bh, &resc) != 0)
    518 		panic("xdc: vme_map");
    519 
    520 	xdc->xdc = (struct xdc *) bh; /* XXX */
    521 	xdc->ipl = va->ilevel;
    522 	xdc->vector = va->ivector;
    523 
    524 	for (lcv = 0; lcv < XDC_MAXDEV; lcv++)
    525 		xdc->sc_drives[lcv] = NULL;
    526 
    527 	/*
    528 	 * allocate and zero buffers
    529 	 *
    530 	 * note: we simplify the code by allocating the max number of iopbs and
    531 	 * iorq's up front.   thus, we avoid linked lists and the costs
    532 	 * associated with them in exchange for wasting a little memory.
    533 	 */
    534 
    535 	/* Get DMA handle for misc. transfers */
    536 	if ((error = vme_dmamap_create(
    537 				ct,		/* VME chip tag */
    538 				MAXPHYS,	/* size */
    539 				VME_AM_A24,	/* address modifier */
    540 				VME_D32,	/* data size */
    541 				0,		/* swap */
    542 				1,		/* nsegments */
    543 				MAXPHYS,	/* maxsegsz */
    544 				0,		/* boundary */
    545 				BUS_DMA_NOWAIT,
    546 				&xdc->auxmap)) != 0) {
    547 
    548 		aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n",
    549 			error);
    550 		return;
    551 	}
    552 
    553 
    554 	/* Get DMA handle for mapping iorq descriptors */
    555 	if ((error = vme_dmamap_create(
    556 				ct,		/* VME chip tag */
    557 				XDC_MAXIOPB * sizeof(struct xd_iopb),
    558 				VME_AM_A24,	/* address modifier */
    559 				VME_D32,	/* data size */
    560 				0,		/* swap */
    561 				1,		/* nsegments */
    562 				XDC_MAXIOPB * sizeof(struct xd_iopb),
    563 				0,		/* boundary */
    564 				BUS_DMA_NOWAIT,
    565 				&xdc->iopmap)) != 0) {
    566 
    567 		aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n",
    568 			error);
    569 		return;
    570 	}
    571 
    572 	/* Get DMA buffer for iorq descriptors */
    573 	if ((error = xd_dmamem_alloc(xdc->dmatag, xdc->iopmap, &seg, &rseg,
    574 				     XDC_MAXIOPB * sizeof(struct xd_iopb),
    575 				     (void **)&xdc->iopbase,
    576 				     &busaddr)) != 0) {
    577 		aprint_error_dev(xdc->sc_dev, "DMA buffer alloc error %d\n",
    578 			error);
    579 		return;
    580 	}
    581 	xdc->dvmaiopb = (struct xd_iopb *)(u_long)BUS_ADDR_PADDR(busaddr);
    582 
    583 	memset(xdc->iopbase, 0, XDC_MAXIOPB * sizeof(struct xd_iopb));
    584 
    585 	xdc->reqs = (struct xd_iorq *)
    586 	    malloc(XDC_MAXIOPB * sizeof(struct xd_iorq),
    587 	    M_DEVBUF, M_NOWAIT|M_ZERO);
    588 	if (xdc->reqs == NULL)
    589 		panic("xdc malloc");
    590 
    591 	/* init free list, iorq to iopb pointers, and non-zero fields in the
    592 	 * iopb which never change. */
    593 
    594 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
    595 		xdc->reqs[lcv].iopb = &xdc->iopbase[lcv];
    596 		xdc->reqs[lcv].dmaiopb = &xdc->dvmaiopb[lcv];
    597 		xdc->freereq[lcv] = lcv;
    598 		xdc->iopbase[lcv].fixd = 1;	/* always the same */
    599 		xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */
    600 		xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */
    601 
    602 		if ((error = vme_dmamap_create(
    603 				ct,		/* VME chip tag */
    604 				MAXPHYS,	/* size */
    605 				VME_AM_A24,	/* address modifier */
    606 				VME_D32,	/* data size */
    607 				0,		/* swap */
    608 				1,		/* nsegments */
    609 				MAXPHYS,	/* maxsegsz */
    610 				0,		/* boundary */
    611 				BUS_DMA_NOWAIT,
    612 				&xdc->reqs[lcv].dmamap)) != 0) {
    613 
    614 			aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n",
    615 				error);
    616 			return;
    617 		}
    618 	}
    619 	xdc->nfree = XDC_MAXIOPB;
    620 	xdc->nrun = 0;
    621 	xdc->waithead = xdc->waitend = xdc->nwait = 0;
    622 	xdc->ndone = 0;
    623 
    624 	/* init queue of waiting bufs */
    625 
    626 	bufq_alloc(&xdc->sc_wq, "fcfs", 0);
    627 	callout_init(&xdc->sc_tick_ch, 0);
    628 
    629 	/*
    630 	 * section 7 of the manual tells us how to init the controller:
    631 	 * - read controller parameters (6/0)
    632 	 * - write controller parameters (5/0)
    633 	 */
    634 
    635 	/* read controller parameters and insure we have a 753/7053 */
    636 
    637 	rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
    638 	if (rqno == XD_ERR_FAIL) {
    639 		printf(": couldn't read controller params\n");
    640 		return;		/* shouldn't ever happen */
    641 	}
    642 	ctl = (struct xd_iopb_ctrl *) &xdc->iopbase[rqno];
    643 	if (ctl->ctype != XDCT_753) {
    644 		if (xdc->reqs[rqno].errnum)
    645 			printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errnum));
    646 		printf(": doesn't identify as a 753/7053\n");
    647 		XDC_DONE(xdc, rqno, error);
    648 		return;
    649 	}
    650 	printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
    651 	    ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev);
    652 	XDC_DONE(xdc, rqno, error);
    653 
    654 	/* now write controller parameters (xdc_cmd sets all params for us) */
    655 
    656 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
    657 	XDC_DONE(xdc, rqno, error);
    658 	if (error) {
    659 		aprint_error_dev(xdc->sc_dev, "controller config error: %s\n",
    660 			xdc_e2str(error));
    661 		return;
    662 	}
    663 
    664 	/* link in interrupt with higher level software */
    665 	vme_intr_map(ct, va->ilevel, va->ivector, &ih);
    666 	vme_intr_establish(ct, ih, IPL_BIO, xdcintr, xdc);
    667 	evcnt_attach_dynamic(&xdc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
    668 	    device_xname(xdc->sc_dev), "intr");
    669 
    670 
    671 	/* now we must look for disks using autoconfig */
    672 	xa.fullmode = XD_SUB_POLL;
    673 	xa.booting = 1;
    674 
    675 	for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
    676 		(void) config_found(self, (void *) &xa, NULL);
    677 
    678 	/* start the watchdog clock */
    679 	callout_reset(&xdc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdc);
    680 
    681 }
    682 
    683 /*
    684  * xdmatch: probe for disk.
    685  *
    686  * note: we almost always say disk is present.   this allows us to
    687  * spin up and configure a disk after the system is booted (we can
    688  * call xdattach!).
    689  */
    690 int
    691 xdmatch(device_t parent, cfdata_t cf, void *aux)
    692 {
    693 	struct xdc_attach_args *xa = aux;
    694 
    695 	/* looking for autoconf wildcard or exact match */
    696 
    697 	if (cf->cf_loc[XDCCF_DRIVE] != XDCCF_DRIVE_DEFAULT &&
    698 	    cf->cf_loc[XDCCF_DRIVE] != xa->driveno)
    699 		return 0;
    700 
    701 	return 1;
    702 
    703 }
    704 
    705 /*
    706  * xdattach: attach a disk.   this can be called from autoconf and also
    707  * from xdopen/xdstrategy.
    708  */
    709 void
    710 xdattach(device_t parent, device_t self, void *aux)
    711 {
    712 	struct xd_softc *xd = device_private(self);
    713 	struct xdc_softc *xdc = device_private(parent);
    714 	struct xdc_attach_args *xa = aux;
    715 	int     rqno, spt = 0, mb, blk, lcv, fmode, s = 0, newstate;
    716 	struct xd_iopb_drive *driopb;
    717 	struct dkbad *dkb;
    718 	int			rseg, error;
    719 	bus_dma_segment_t	seg;
    720 	bus_addr_t		busaddr;
    721 	void *			dmaddr;
    722 	char *			buf;
    723 
    724 	xd->sc_dev = self;
    725 
    726 	/*
    727 	 * Always re-initialize the disk structure.  We want statistics
    728 	 * to start with a clean slate.
    729 	 */
    730 	memset(&xd->sc_dk, 0, sizeof(xd->sc_dk));
    731 
    732 	/* if booting, init the xd_softc */
    733 
    734 	if (xa->booting) {
    735 		xd->state = XD_DRIVE_UNKNOWN;	/* to start */
    736 		xd->flags = 0;
    737 		xd->parent = xdc;
    738 	}
    739 	xd->xd_drive = xa->driveno;
    740 	fmode = xa->fullmode;
    741 	xdc->sc_drives[xa->driveno] = xd;
    742 
    743 	/* if not booting, make sure we are the only process in the attach for
    744 	 * this drive.   if locked out, sleep on it. */
    745 
    746 	if (!xa->booting) {
    747 		s = splbio();
    748 		while (xd->state == XD_DRIVE_ATTACHING) {
    749 			if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) {
    750 				splx(s);
    751 				return;
    752 			}
    753 		}
    754 		printf("%s at %s",
    755 			device_xname(xd->sc_dev), device_xname(xd->parent->sc_dev));
    756 	}
    757 
    758 	/* we now have control */
    759 	xd->state = XD_DRIVE_ATTACHING;
    760 	newstate = XD_DRIVE_UNKNOWN;
    761 
    762 	buf = NULL;
    763 	if ((error = xd_dmamem_alloc(xdc->dmatag, xdc->auxmap, &seg, &rseg,
    764 				     XDFM_BPS,
    765 				     (void **)&buf,
    766 				     &busaddr)) != 0) {
    767 		aprint_error_dev(xdc->sc_dev, "DMA buffer alloc error %d\n",
    768 			error);
    769 		return;
    770 	}
    771 	dmaddr = (void *)(u_long)BUS_ADDR_PADDR(busaddr);
    772 
    773 	/* first try and reset the drive */
    774 
    775 	rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode);
    776 	XDC_DONE(xdc, rqno, error);
    777 	if (error == XD_ERR_NRDY) {
    778 		printf(" drive %d: off-line\n", xa->driveno);
    779 		goto done;
    780 	}
    781 	if (error) {
    782 		printf(": ERROR 0x%02x (%s)\n", error, xdc_e2str(error));
    783 		goto done;
    784 	}
    785 	printf(" drive %d: ready\n", xa->driveno);
    786 
    787 	/* now set format parameters */
    788 
    789 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode);
    790 	XDC_DONE(xdc, rqno, error);
    791 	if (error) {
    792 		aprint_error_dev(xd->sc_dev, "write format parameters failed: %s\n",
    793 			xdc_e2str(error));
    794 		goto done;
    795 	}
    796 
    797 	/* get drive parameters */
    798 	rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
    799 	if (rqno != XD_ERR_FAIL) {
    800 		driopb = (struct xd_iopb_drive *) &xdc->iopbase[rqno];
    801 		spt = driopb->sectpertrk;
    802 	}
    803 	XDC_DONE(xdc, rqno, error);
    804 	if (error) {
    805 		aprint_error_dev(xd->sc_dev, "read drive parameters failed: %s\n",
    806 			xdc_e2str(error));
    807 		goto done;
    808 	}
    809 
    810 	/*
    811 	 * now set drive parameters (to semi-bogus values) so we can read the
    812 	 * disk label.
    813 	 */
    814 	xd->pcyl = xd->ncyl = 1;
    815 	xd->acyl = 0;
    816 	xd->nhead = 1;
    817 	xd->nsect = 1;
    818 	xd->sectpercyl = 1;
    819 	for (lcv = 0; lcv < 126; lcv++)	/* init empty bad144 table */
    820 		xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff;
    821 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
    822 	XDC_DONE(xdc, rqno, error);
    823 	if (error) {
    824 		aprint_error_dev(xd->sc_dev, "write drive parameters failed: %s\n",
    825 			xdc_e2str(error));
    826 		goto done;
    827 	}
    828 
    829 	/* read disk label */
    830 	rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, dmaddr, fmode);
    831 	XDC_DONE(xdc, rqno, error);
    832 	if (error) {
    833 		aprint_error_dev(xd->sc_dev, "reading disk label failed: %s\n",
    834 			xdc_e2str(error));
    835 		goto done;
    836 	}
    837 	newstate = XD_DRIVE_NOLABEL;
    838 
    839 	xd->hw_spt = spt;
    840 	/* Attach the disk: must be before getdisklabel to malloc label */
    841 	disk_init(&xd->sc_dk, device_xname(xd->sc_dev), &xddkdriver);
    842 	disk_attach(&xd->sc_dk);
    843 
    844 	if (xdgetdisklabel(xd, buf) != XD_ERR_AOK)
    845 		goto done;
    846 
    847 	/* inform the user of what is up */
    848 	printf("%s: <%s>, pcyl %d, hw_spt %d\n", device_xname(xd->sc_dev),
    849 		buf, xd->pcyl, spt);
    850 	mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
    851 	printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
    852 		device_xname(xd->sc_dev), mb, xd->ncyl, xd->nhead, xd->nsect,
    853 		XDFM_BPS);
    854 
    855 	/* now set the real drive parameters! */
    856 
    857 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
    858 	XDC_DONE(xdc, rqno, error);
    859 	if (error) {
    860 		aprint_error_dev(xd->sc_dev, "write real drive parameters failed: %s\n",
    861 			xdc_e2str(error));
    862 		goto done;
    863 	}
    864 	newstate = XD_DRIVE_ONLINE;
    865 
    866 	/*
    867 	 * read bad144 table. this table resides on the first sector of the
    868 	 * last track of the disk (i.e. second cyl of "acyl" area).
    869 	 */
    870 
    871 	blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */
    872 	    (xd->nhead - 1) * xd->nsect;	/* last head */
    873 	rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, dmaddr, fmode);
    874 	XDC_DONE(xdc, rqno, error);
    875 	if (error) {
    876 		aprint_error_dev(xd->sc_dev, "reading bad144 failed: %s\n",
    877 			xdc_e2str(error));
    878 		goto done;
    879 	}
    880 
    881 	/* check dkbad for sanity */
    882 	dkb = (struct dkbad *) buf;
    883 	for (lcv = 0; lcv < 126; lcv++) {
    884 		if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
    885 				dkb->bt_bad[lcv].bt_cyl == 0) &&
    886 		     dkb->bt_bad[lcv].bt_trksec == 0xffff)
    887 			continue;	/* blank */
    888 		if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl)
    889 			break;
    890 		if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead)
    891 			break;
    892 		if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect)
    893 			break;
    894 	}
    895 	if (lcv != 126) {
    896 		aprint_error_dev(xd->sc_dev, "warning: invalid bad144 sector!\n");
    897 	} else {
    898 		memcpy(&xd->dkb, buf, XDFM_BPS);
    899 	}
    900 
    901 done:
    902 	if (buf != NULL) {
    903 		xd_dmamem_free(xdc->dmatag, xdc->auxmap,
    904 				&seg, rseg, XDFM_BPS, buf);
    905 	}
    906 
    907 	xd->state = newstate;
    908 	if (!xa->booting) {
    909 		wakeup(&xd->state);
    910 		splx(s);
    911 	}
    912 }
    913 
    914 /*
    915  * end of autoconfig functions
    916  */
    917 
    918 /*
    919  * { b , c } d e v s w   f u n c t i o n s
    920  */
    921 
    922 /*
    923  * xdclose: close device
    924  */
    925 int
    926 xdclose(dev_t dev, int flag, int fmt, struct lwp *l)
    927 {
    928 	struct xd_softc *xd = device_lookup_private(&xd_cd, DISKUNIT(dev));
    929 	int     part = DISKPART(dev);
    930 
    931 	/* clear mask bits */
    932 
    933 	switch (fmt) {
    934 	case S_IFCHR:
    935 		xd->sc_dk.dk_copenmask &= ~(1 << part);
    936 		break;
    937 	case S_IFBLK:
    938 		xd->sc_dk.dk_bopenmask &= ~(1 << part);
    939 		break;
    940 	}
    941 	xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
    942 
    943 	return 0;
    944 }
    945 
    946 /*
    947  * xddump: crash dump system
    948  */
    949 int
    950 xddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    951 {
    952 	int     unit, part;
    953 	struct xd_softc *xd;
    954 
    955 	unit = DISKUNIT(dev);
    956 	part = DISKPART(dev);
    957 
    958 	xd = device_lookup_private(&xd_cd, unit);
    959 	if (!xd)
    960 		return ENXIO;
    961 
    962 	printf("%s%c: crash dump not supported (yet)\n", device_xname(xd->sc_dev),
    963 	    'a' + part);
    964 
    965 	return ENXIO;
    966 
    967 	/* outline: globals: "dumplo" == sector number of partition to start
    968 	 * dump at (convert to physical sector with partition table)
    969 	 * "dumpsize" == size of dump in clicks "physmem" == size of physical
    970 	 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
    971 	 * physmem)
    972 	 *
    973 	 * dump a copy of physical memory to the dump device starting at sector
    974 	 * "dumplo" in the swap partition (make sure > 0).   map in pages as
    975 	 * we go.   use polled I/O.
    976 	 *
    977 	 * XXX how to handle NON_CONTIG? */
    978 
    979 }
    980 
    981 static enum kauth_device_req
    982 xd_getkauthreq(u_char cmd)
    983 {
    984 	enum kauth_device_req req;
    985 
    986 	switch (cmd) {
    987 	case XDCMD_WR:
    988 	case XDCMD_XWR:
    989 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE;
    990 		break;
    991 
    992 	case XDCMD_RD:
    993 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ;
    994 		break;
    995 
    996 	case XDCMD_RDP:
    997 	case XDCMD_XRD:
    998 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF;
    999 		break;
   1000 
   1001 	case XDCMD_WRP:
   1002 	case XDCMD_RST:
   1003 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF;
   1004 		break;
   1005 
   1006 	case XDCMD_NOP:
   1007 	case XDCMD_SK:
   1008 	case XDCMD_TST:
   1009 	default:
   1010 		req = 0;
   1011 		break;
   1012 	}
   1013 
   1014 	return (req);
   1015 }
   1016 
   1017 /*
   1018  * xdioctl: ioctls on XD drives.   based on ioctl's of other netbsd disks.
   1019  */
   1020 int
   1021 xdioctl(dev_t dev, u_long command, void *addr, int flag, struct lwp *l)
   1022 
   1023 {
   1024 	struct xd_softc *xd;
   1025 	struct xd_iocmd *xio;
   1026 	int     error, s, unit;
   1027 #ifdef __HAVE_OLD_DISKLABEL
   1028 	struct disklabel newlabel;
   1029 #endif
   1030 	struct disklabel *lp;
   1031 
   1032 	unit = DISKUNIT(dev);
   1033 
   1034 	if ((xd = device_lookup_private(&xd_cd, unit)) == NULL)
   1035 		return (ENXIO);
   1036 
   1037 	/* switch on ioctl type */
   1038 
   1039 	switch (command) {
   1040 	case DIOCSBAD:		/* set bad144 info */
   1041 		if ((flag & FWRITE) == 0)
   1042 			return EBADF;
   1043 		s = splbio();
   1044 		memcpy(&xd->dkb, addr, sizeof(xd->dkb));
   1045 		splx(s);
   1046 		return 0;
   1047 
   1048 	case DIOCGDINFO:	/* get disk label */
   1049 		memcpy(addr, xd->sc_dk.dk_label, sizeof(struct disklabel));
   1050 		return 0;
   1051 #ifdef __HAVE_OLD_DISKLABEL
   1052 	case ODIOCGDINFO:
   1053 		newlabel = *(xd->sc_dk.dk_label);
   1054 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1055 			return ENOTTY;
   1056 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
   1057 		return 0;
   1058 #endif
   1059 
   1060 	case DIOCGPART:	/* get partition info */
   1061 		((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label;
   1062 		((struct partinfo *) addr)->part =
   1063 		    &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
   1064 		return 0;
   1065 
   1066 	case DIOCSDINFO:	/* set disk label */
   1067 #ifdef __HAVE_OLD_DISKLABEL
   1068 	case ODIOCSDINFO:
   1069 		if (command == ODIOCSDINFO) {
   1070 			memset(&newlabel, 0, sizeof newlabel);
   1071 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
   1072 			lp = &newlabel;
   1073 		} else
   1074 #endif
   1075 		lp = (struct disklabel *)addr;
   1076 
   1077 		if ((flag & FWRITE) == 0)
   1078 			return EBADF;
   1079 		error = setdisklabel(xd->sc_dk.dk_label,
   1080 		    lp, /* xd->sc_dk.dk_openmask : */ 0,
   1081 		    xd->sc_dk.dk_cpulabel);
   1082 		if (error == 0) {
   1083 			if (xd->state == XD_DRIVE_NOLABEL)
   1084 				xd->state = XD_DRIVE_ONLINE;
   1085 		}
   1086 		return error;
   1087 
   1088 	case DIOCWLABEL:	/* change write status of disk label */
   1089 		if ((flag & FWRITE) == 0)
   1090 			return EBADF;
   1091 		if (*(int *) addr)
   1092 			xd->flags |= XD_WLABEL;
   1093 		else
   1094 			xd->flags &= ~XD_WLABEL;
   1095 		return 0;
   1096 
   1097 	case DIOCWDINFO:	/* write disk label */
   1098 #ifdef __HAVE_OLD_DISKLABEL
   1099 	case ODIOCWDINFO:
   1100 		if (command == ODIOCWDINFO) {
   1101 			memset(&newlabel, 0, sizeof newlabel);
   1102 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
   1103 			lp = &newlabel;
   1104 		} else
   1105 #endif
   1106 		lp = (struct disklabel *)addr;
   1107 
   1108 		if ((flag & FWRITE) == 0)
   1109 			return EBADF;
   1110 		error = setdisklabel(xd->sc_dk.dk_label,
   1111 		    lp, /* xd->sc_dk.dk_openmask : */ 0,
   1112 		    xd->sc_dk.dk_cpulabel);
   1113 		if (error == 0) {
   1114 			if (xd->state == XD_DRIVE_NOLABEL)
   1115 				xd->state = XD_DRIVE_ONLINE;
   1116 
   1117 			/* Simulate opening partition 0 so write succeeds. */
   1118 			xd->sc_dk.dk_openmask |= (1 << 0);
   1119 			error = writedisklabel(MAKEDISKDEV(major(dev),
   1120 			    DISKUNIT(dev), RAW_PART),
   1121 			    xdstrategy, xd->sc_dk.dk_label,
   1122 			    xd->sc_dk.dk_cpulabel);
   1123 			xd->sc_dk.dk_openmask =
   1124 			    xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
   1125 		}
   1126 		return error;
   1127 
   1128 	case DIOSXDCMD: {
   1129 		enum kauth_device_req req;
   1130 
   1131 		xio = (struct xd_iocmd *) addr;
   1132 		req = xd_getkauthreq(xio->cmd);
   1133 		if ((error = kauth_authorize_device_passthru(l->l_cred,
   1134 		    dev, req, xio)) != 0)
   1135 			return (error);
   1136 		return (xdc_ioctlcmd(xd, dev, xio));
   1137 		}
   1138 
   1139 	default:
   1140 		return ENOTTY;
   1141 	}
   1142 }
   1143 /*
   1144  * xdopen: open drive
   1145  */
   1146 
   1147 int
   1148 xdopen(dev_t dev, int flag, int fmt, struct lwp *l)
   1149 {
   1150 	int     unit, part;
   1151 	struct xd_softc *xd;
   1152 	struct xdc_attach_args xa;
   1153 
   1154 	/* first, could it be a valid target? */
   1155 
   1156 	unit = DISKUNIT(dev);
   1157 	if ((xd = device_lookup_private(&xd_cd, unit)) == NULL)
   1158 		return (ENXIO);
   1159 	part = DISKPART(dev);
   1160 
   1161 	/* do we need to attach the drive? */
   1162 
   1163 	if (xd->state == XD_DRIVE_UNKNOWN) {
   1164 		xa.driveno = xd->xd_drive;
   1165 		xa.fullmode = XD_SUB_WAIT;
   1166 		xa.booting = 0;
   1167 		xdattach(xd->parent->sc_dev, xd->sc_dev, &xa);
   1168 		if (xd->state == XD_DRIVE_UNKNOWN) {
   1169 			return (EIO);
   1170 		}
   1171 	}
   1172 	/* check for partition */
   1173 
   1174 	if (part != RAW_PART &&
   1175 	    (part >= xd->sc_dk.dk_label->d_npartitions ||
   1176 		xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
   1177 		return (ENXIO);
   1178 	}
   1179 	/* set open masks */
   1180 
   1181 	switch (fmt) {
   1182 	case S_IFCHR:
   1183 		xd->sc_dk.dk_copenmask |= (1 << part);
   1184 		break;
   1185 	case S_IFBLK:
   1186 		xd->sc_dk.dk_bopenmask |= (1 << part);
   1187 		break;
   1188 	}
   1189 	xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
   1190 
   1191 	return 0;
   1192 }
   1193 
   1194 int
   1195 xdread(dev_t dev, struct uio *uio, int flags)
   1196 {
   1197 
   1198 	return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio));
   1199 }
   1200 
   1201 int
   1202 xdwrite(dev_t dev, struct uio *uio, int flags)
   1203 {
   1204 
   1205 	return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio));
   1206 }
   1207 
   1208 
   1209 /*
   1210  * xdsize: return size of a partition for a dump
   1211  */
   1212 
   1213 int
   1214 xdsize(dev_t dev)
   1215 {
   1216 	struct xd_softc *xdsc;
   1217 	int     unit, part, size, omask;
   1218 
   1219 	/* valid unit? */
   1220 	unit = DISKUNIT(dev);
   1221 	if ((xdsc = device_lookup_private(&xd_cd, unit)) == NULL)
   1222 		return (-1);
   1223 
   1224 	part = DISKPART(dev);
   1225 	omask = xdsc->sc_dk.dk_openmask & (1 << part);
   1226 
   1227 	if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0)
   1228 		return (-1);
   1229 
   1230 	/* do it */
   1231 	if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
   1232 		size = -1;	/* only give valid size for swap partitions */
   1233 	else
   1234 		size = xdsc->sc_dk.dk_label->d_partitions[part].p_size *
   1235 		    (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
   1236 	if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0)
   1237 		return (-1);
   1238 	return (size);
   1239 }
   1240 /*
   1241  * xdstrategy: buffering system interface to xd.
   1242  */
   1243 
   1244 void
   1245 xdstrategy(struct buf *bp)
   1246 {
   1247 	struct xd_softc *xd;
   1248 	struct xdc_softc *parent;
   1249 	int     s, unit;
   1250 	struct xdc_attach_args xa;
   1251 
   1252 	unit = DISKUNIT(bp->b_dev);
   1253 
   1254 	/* check for live device */
   1255 
   1256 	if (!(xd = device_lookup_private(&xd_cd, unit)) ||
   1257 	    bp->b_blkno < 0 ||
   1258 	    (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) {
   1259 		bp->b_error = EINVAL;
   1260 		goto done;
   1261 	}
   1262 	/* do we need to attach the drive? */
   1263 
   1264 	if (xd->state == XD_DRIVE_UNKNOWN) {
   1265 		xa.driveno = xd->xd_drive;
   1266 		xa.fullmode = XD_SUB_WAIT;
   1267 		xa.booting = 0;
   1268 		xdattach(xd->parent->sc_dev, xd->sc_dev, &xa);
   1269 		if (xd->state == XD_DRIVE_UNKNOWN) {
   1270 			bp->b_error = EIO;
   1271 			goto done;
   1272 		}
   1273 	}
   1274 	if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
   1275 		/* no I/O to unlabeled disks, unless raw partition */
   1276 		bp->b_error = EIO;
   1277 		goto done;
   1278 	}
   1279 	/* short circuit zero length request */
   1280 
   1281 	if (bp->b_bcount == 0)
   1282 		goto done;
   1283 
   1284 	/* check bounds with label (disksubr.c).  Determine the size of the
   1285 	 * transfer, and make sure it is within the boundaries of the
   1286 	 * partition. Adjust transfer if needed, and signal errors or early
   1287 	 * completion. */
   1288 
   1289 	if (bounds_check_with_label(&xd->sc_dk, bp,
   1290 		(xd->flags & XD_WLABEL) != 0) <= 0)
   1291 		goto done;
   1292 
   1293 	/*
   1294 	 * now we know we have a valid buf structure that we need to do I/O
   1295 	 * on.
   1296 	 *
   1297 	 * note that we don't disksort because the controller has a sorting
   1298 	 * algorithm built into the hardware.
   1299 	 */
   1300 
   1301 	s = splbio();		/* protect the queues */
   1302 
   1303 	/* first, give jobs in front of us a chance */
   1304 	parent = xd->parent;
   1305 	while (parent->nfree > 0 && bufq_peek(parent->sc_wq) != NULL)
   1306 		if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
   1307 			break;
   1308 
   1309 	/* if there are no free iorq's, then we just queue and return. the
   1310 	 * buffs will get picked up later by xdcintr().
   1311 	 */
   1312 
   1313 	if (parent->nfree == 0) {
   1314 		bufq_put(parent->sc_wq, bp);
   1315 		splx(s);
   1316 		return;
   1317 	}
   1318 
   1319 	/* now we have free iopb's and we are at splbio... start 'em up */
   1320 	if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
   1321 		return;
   1322 	}
   1323 
   1324 	/* done! */
   1325 
   1326 	splx(s);
   1327 	return;
   1328 
   1329 done:				/* tells upper layers we are done with this
   1330 				 * buf */
   1331 	bp->b_resid = bp->b_bcount;
   1332 	biodone(bp);
   1333 }
   1334 /*
   1335  * end of {b,c}devsw functions
   1336  */
   1337 
   1338 /*
   1339  * i n t e r r u p t   f u n c t i o n
   1340  *
   1341  * xdcintr: hardware interrupt.
   1342  */
   1343 int
   1344 xdcintr(void *v)
   1345 {
   1346 	struct xdc_softc *xdcsc = v;
   1347 
   1348 	/* kick the event counter */
   1349 
   1350 	xdcsc->sc_intrcnt.ev_count++;
   1351 
   1352 	/* remove as many done IOPBs as possible */
   1353 
   1354 	xdc_remove_iorq(xdcsc);
   1355 
   1356 	/* start any iorq's already waiting */
   1357 
   1358 	xdc_start(xdcsc, XDC_MAXIOPB);
   1359 
   1360 	/* fill up any remaining iorq's with queue'd buffers */
   1361 
   1362 	while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL)
   1363 		if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
   1364 			break;
   1365 
   1366 	return (1);
   1367 }
   1368 /*
   1369  * end of interrupt function
   1370  */
   1371 
   1372 /*
   1373  * i n t e r n a l   f u n c t i o n s
   1374  */
   1375 
   1376 /*
   1377  * xdc_rqinit: fill out the fields of an I/O request
   1378  */
   1379 
   1380 inline void
   1381 xdc_rqinit(struct xd_iorq *rq, struct xdc_softc *xdc, struct xd_softc *xd, int md, u_long blk, int cnt, void *db, struct buf *bp)
   1382 {
   1383 	rq->xdc = xdc;
   1384 	rq->xd = xd;
   1385 	rq->ttl = XDC_MAXTTL + 10;
   1386 	rq->mode = md;
   1387 	rq->tries = rq->errnum = rq->lasterror = 0;
   1388 	rq->blockno = blk;
   1389 	rq->sectcnt = cnt;
   1390 	rq->dbuf = db;
   1391 	rq->buf = bp;
   1392 }
   1393 /*
   1394  * xdc_rqtopb: load up an IOPB based on an iorq
   1395  */
   1396 
   1397 void
   1398 xdc_rqtopb(struct xd_iorq *iorq, struct xd_iopb *iopb, int cmd, int subfun)
   1399 {
   1400 	u_long  block, dp;
   1401 
   1402 	/* standard stuff */
   1403 
   1404 	iopb->errs = iopb->done = 0;
   1405 	iopb->comm = cmd;
   1406 	iopb->errnum = iopb->status = 0;
   1407 	iopb->subfun = subfun;
   1408 	if (iorq->xd)
   1409 		iopb->unit = iorq->xd->xd_drive;
   1410 	else
   1411 		iopb->unit = 0;
   1412 
   1413 	/* check for alternate IOPB format */
   1414 
   1415 	if (cmd == XDCMD_WRP) {
   1416 		switch (subfun) {
   1417 		case XDFUN_CTL:{
   1418 			struct xd_iopb_ctrl *ctrl =
   1419 				(struct xd_iopb_ctrl *) iopb;
   1420 			iopb->lll = 0;
   1421 			iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
   1422 					? 0
   1423 					: iorq->xdc->ipl;
   1424 			ctrl->param_a = XDPA_TMOD | XDPA_DACF;
   1425 			ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC;
   1426 			ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR |
   1427 					XDPC_RBC | XDPC_ECC2;
   1428 			ctrl->throttle = XDC_THROTTLE;
   1429 			ctrl->delay = XDC_DELAY;
   1430 			break;
   1431 			}
   1432 		case XDFUN_DRV:{
   1433 			struct xd_iopb_drive *drv =
   1434 				(struct xd_iopb_drive *)iopb;
   1435 			/* we assume that the disk label has the right
   1436 			 * info */
   1437 			if (XD_STATE(iorq->mode) == XD_SUB_POLL)
   1438 				drv->dparam_ipl = (XDC_DPARAM << 3);
   1439 			else
   1440 				drv->dparam_ipl = (XDC_DPARAM << 3) |
   1441 						  iorq->xdc->ipl;
   1442 			drv->maxsect = iorq->xd->nsect - 1;
   1443 			drv->maxsector = drv->maxsect;
   1444 			/* note: maxsector != maxsect only if you are
   1445 			 * doing cyl sparing */
   1446 			drv->headoff = 0;
   1447 			drv->maxcyl = iorq->xd->pcyl - 1;
   1448 			drv->maxhead = iorq->xd->nhead - 1;
   1449 			break;
   1450 			}
   1451 		case XDFUN_FMT:{
   1452 			struct xd_iopb_format *form =
   1453 					(struct xd_iopb_format *) iopb;
   1454 			if (XD_STATE(iorq->mode) == XD_SUB_POLL)
   1455 				form->interleave_ipl = (XDC_INTERLEAVE << 3);
   1456 			else
   1457 				form->interleave_ipl = (XDC_INTERLEAVE << 3) |
   1458 						       iorq->xdc->ipl;
   1459 			form->field1 = XDFM_FIELD1;
   1460 			form->field2 = XDFM_FIELD2;
   1461 			form->field3 = XDFM_FIELD3;
   1462 			form->field4 = XDFM_FIELD4;
   1463 			form->bytespersec = XDFM_BPS;
   1464 			form->field6 = XDFM_FIELD6;
   1465 			form->field7 = XDFM_FIELD7;
   1466 			break;
   1467 			}
   1468 		}
   1469 	} else {
   1470 
   1471 		/* normal IOPB case (harmless to RDP command) */
   1472 
   1473 		iopb->lll = 0;
   1474 		iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
   1475 				? 0
   1476 				: iorq->xdc->ipl;
   1477 		iopb->sectcnt = iorq->sectcnt;
   1478 		block = iorq->blockno;
   1479 		if (iorq->xd == NULL || block == 0) {
   1480 			iopb->sectno = iopb->headno = iopb->cylno = 0;
   1481 		} else {
   1482 			iopb->sectno = block % iorq->xd->nsect;
   1483 			block = block / iorq->xd->nsect;
   1484 			iopb->headno = block % iorq->xd->nhead;
   1485 			block = block / iorq->xd->nhead;
   1486 			iopb->cylno = block;
   1487 		}
   1488 		dp = (u_long) iorq->dbuf;
   1489 		dp = iopb->daddr = (iorq->dbuf == NULL) ? 0 : dp;
   1490 		iopb->addrmod = ((dp + (XDFM_BPS * iorq->sectcnt)) > 0x1000000)
   1491 					? XDC_ADDRMOD32
   1492 					: XDC_ADDRMOD;
   1493 	}
   1494 }
   1495 
   1496 /*
   1497  * xdc_cmd: front end for POLL'd and WAIT'd commands.  Returns rqno.
   1498  * If you've already got an IORQ, you can call submit directly (currently
   1499  * there is no need to do this).    NORM requests are handled separately.
   1500  */
   1501 int
   1502 xdc_cmd(struct xdc_softc *xdcsc, int cmd, int subfn, int unit, int block,
   1503 	int scnt, char *dptr, int fullmode)
   1504 {
   1505 	int     rqno, submode = XD_STATE(fullmode), retry;
   1506 	struct xd_iorq *iorq;
   1507 	struct xd_iopb *iopb;
   1508 
   1509 	/* get iorq/iopb */
   1510 	switch (submode) {
   1511 	case XD_SUB_POLL:
   1512 		while (xdcsc->nfree == 0) {
   1513 			if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK)
   1514 				return (XD_ERR_FAIL);
   1515 		}
   1516 		break;
   1517 	case XD_SUB_WAIT:
   1518 		retry = 1;
   1519 		while (retry) {
   1520 			while (xdcsc->nfree == 0) {
   1521 			    if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0))
   1522 				return (XD_ERR_FAIL);
   1523 			}
   1524 			while (xdcsc->ndone > XDC_SUBWAITLIM) {
   1525 			    if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0))
   1526 				return (XD_ERR_FAIL);
   1527 			}
   1528 			if (xdcsc->nfree)
   1529 				retry = 0;	/* got it */
   1530 		}
   1531 		break;
   1532 	default:
   1533 		return (XD_ERR_FAIL);	/* illegal */
   1534 	}
   1535 	if (xdcsc->nfree == 0)
   1536 		panic("xdcmd nfree");
   1537 	rqno = XDC_RQALLOC(xdcsc);
   1538 	iorq = &xdcsc->reqs[rqno];
   1539 	iopb = iorq->iopb;
   1540 
   1541 
   1542 	/* init iorq/iopb */
   1543 
   1544 	xdc_rqinit(iorq, xdcsc,
   1545 	    (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit],
   1546 	    fullmode, block, scnt, dptr, NULL);
   1547 
   1548 	/* load IOPB from iorq */
   1549 
   1550 	xdc_rqtopb(iorq, iopb, cmd, subfn);
   1551 
   1552 	/* submit it for processing */
   1553 
   1554 	xdc_submit_iorq(xdcsc, rqno, fullmode);	/* error code will be in iorq */
   1555 
   1556 	return (rqno);
   1557 }
   1558 /*
   1559  * xdc_startbuf
   1560  * start a buffer running, assumes nfree > 0
   1561  */
   1562 
   1563 int
   1564 xdc_startbuf(struct xdc_softc *xdcsc, struct xd_softc *xdsc, struct buf *bp)
   1565 {
   1566 	int     rqno, partno;
   1567 	struct xd_iorq *iorq;
   1568 	struct xd_iopb *iopb;
   1569 	u_long  block;
   1570 /*	void *dbuf;*/
   1571 	int error;
   1572 
   1573 	if (!xdcsc->nfree)
   1574 		panic("xdc_startbuf free");
   1575 	rqno = XDC_RQALLOC(xdcsc);
   1576 	iorq = &xdcsc->reqs[rqno];
   1577 	iopb = iorq->iopb;
   1578 
   1579 	/* get buf */
   1580 
   1581 	if (bp == NULL) {
   1582 		bp = bufq_get(xdcsc->sc_wq);
   1583 		if (bp == NULL)
   1584 			panic("xdc_startbuf bp");
   1585 		xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)];
   1586 	}
   1587 	partno = DISKPART(bp->b_dev);
   1588 #ifdef XDC_DEBUG
   1589 	printf("xdc_startbuf: %s%c: %s block %d\n", device_xname(xdsc->sc_dev),
   1590 	    'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
   1591 	printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n",
   1592 	    bp->b_bcount, bp->b_data);
   1593 #endif
   1594 
   1595 	/*
   1596 	 * load request.  we have to calculate the correct block number based
   1597 	 * on partition info.
   1598 	 *
   1599 	 * note that iorq points to the buffer as mapped into DVMA space,
   1600 	 * where as the bp->b_data points to its non-DVMA mapping.
   1601 	 */
   1602 
   1603 	block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
   1604 	    xdsc->sc_dk.dk_label->d_partitions[partno].p_offset);
   1605 
   1606 	error = bus_dmamap_load(xdcsc->dmatag, iorq->dmamap,
   1607 			 bp->b_data, bp->b_bcount, 0, BUS_DMA_NOWAIT);
   1608 	if (error != 0) {
   1609 		aprint_error_dev(xdcsc->sc_dev, "warning: cannot load DMA map\n");
   1610 		XDC_FREE(xdcsc, rqno);
   1611 		bufq_put(xdcsc->sc_wq, bp);
   1612 		return (XD_ERR_FAIL);	/* XXX: need some sort of
   1613 					 * call-back scheme here? */
   1614 	}
   1615 	bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
   1616 			iorq->dmamap->dm_mapsize, (bp->b_flags & B_READ)
   1617 				? BUS_DMASYNC_PREREAD
   1618 				: BUS_DMASYNC_PREWRITE);
   1619 
   1620 	/* init iorq and load iopb from it */
   1621 	xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block,
   1622 		   bp->b_bcount / XDFM_BPS,
   1623 		   (void *)(u_long)iorq->dmamap->dm_segs[0].ds_addr,
   1624 		   bp);
   1625 
   1626 	xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0);
   1627 
   1628 	/* Instrumentation. */
   1629 	disk_busy(&xdsc->sc_dk);
   1630 
   1631 	/* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
   1632 
   1633 	xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM);
   1634 	return (XD_ERR_AOK);
   1635 }
   1636 
   1637 
   1638 /*
   1639  * xdc_submit_iorq: submit an iorq for processing.  returns XD_ERR_AOK
   1640  * if ok.  if it fail returns an error code.  type is XD_SUB_*.
   1641  *
   1642  * note: caller frees iorq in all cases except NORM
   1643  *
   1644  * return value:
   1645  *   NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
   1646  *   WAIT: XD_AOK (success), <error-code> (failed)
   1647  *   POLL: <same as WAIT>
   1648  *   NOQ : <same as NORM>
   1649  *
   1650  * there are three sources for i/o requests:
   1651  * [1] xdstrategy: normal block I/O, using "struct buf" system.
   1652  * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
   1653  * [3] open/ioctl: these are I/O requests done in the context of a process,
   1654  *                 and the process should block until they are done.
   1655  *
   1656  * software state is stored in the iorq structure.  each iorq has an
   1657  * iopb structure.  the hardware understands the iopb structure.
   1658  * every command must go through an iopb.  a 7053 can only handle
   1659  * XDC_MAXIOPB (31) active iopbs at one time.  iopbs are allocated in
   1660  * DVMA space at boot up time.  what happens if we run out of iopb's?
   1661  * for i/o type [1], the buffers are queued at the "buff" layer and
   1662  * picked up later by the interrupt routine.  for case [2] the
   1663  * programmed i/o driver is called with a special flag that says
   1664  * return when one iopb is free.  for case [3] the process can sleep
   1665  * on the iorq free list until some iopbs are available.
   1666  */
   1667 
   1668 
   1669 int
   1670 xdc_submit_iorq(struct xdc_softc *xdcsc, int iorqno, int type)
   1671 {
   1672 	u_long  iopbaddr;
   1673 	struct xd_iorq *iorq = &xdcsc->reqs[iorqno];
   1674 
   1675 #ifdef XDC_DEBUG
   1676 	printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", device_xname(xdcsc->sc_dev),
   1677 	    iorqno, type);
   1678 #endif
   1679 
   1680 	/* first check and see if controller is busy */
   1681 	if (xdcsc->xdc->xdc_csr & XDC_ADDING) {
   1682 #ifdef XDC_DEBUG
   1683 		printf("xdc_submit_iorq: XDC not ready (ADDING)\n");
   1684 #endif
   1685 		if (type == XD_SUB_NOQ)
   1686 			return (XD_ERR_FAIL);	/* failed */
   1687 		XDC_TWAIT(xdcsc, iorqno);	/* put at end of waitq */
   1688 		switch (type) {
   1689 		case XD_SUB_NORM:
   1690 			return XD_ERR_AOK;	/* success */
   1691 		case XD_SUB_WAIT:
   1692 			while (iorq->iopb->done == 0) {
   1693 				(void) tsleep(iorq, PRIBIO, "xdciorq", 0);
   1694 			}
   1695 			return (iorq->errnum);
   1696 		case XD_SUB_POLL:
   1697 			return (xdc_piodriver(xdcsc, iorqno, 0));
   1698 		default:
   1699 			panic("xdc_submit_iorq adding");
   1700 		}
   1701 	}
   1702 #ifdef XDC_DEBUG
   1703 	{
   1704 		u_char *rio = (u_char *) iorq->iopb;
   1705 		int     sz = sizeof(struct xd_iopb), lcv;
   1706 		printf("%s: aio #%d [",
   1707 			device_xname(xdcsc->sc_dev), iorq - xdcsc->reqs);
   1708 		for (lcv = 0; lcv < sz; lcv++)
   1709 			printf(" %02x", rio[lcv]);
   1710 		printf("]\n");
   1711 	}
   1712 #endif				/* XDC_DEBUG */
   1713 
   1714 	/* controller not busy, start command */
   1715 	iopbaddr = (u_long) iorq->dmaiopb;
   1716 	XDC_GO(xdcsc->xdc, iopbaddr);	/* go! */
   1717 	xdcsc->nrun++;
   1718 	/* command now running, wrap it up */
   1719 	switch (type) {
   1720 	case XD_SUB_NORM:
   1721 	case XD_SUB_NOQ:
   1722 		return (XD_ERR_AOK);	/* success */
   1723 	case XD_SUB_WAIT:
   1724 		while (iorq->iopb->done == 0) {
   1725 			(void) tsleep(iorq, PRIBIO, "xdciorq", 0);
   1726 		}
   1727 		return (iorq->errnum);
   1728 	case XD_SUB_POLL:
   1729 		return (xdc_piodriver(xdcsc, iorqno, 0));
   1730 	default:
   1731 		panic("xdc_submit_iorq wrap up");
   1732 	}
   1733 	panic("xdc_submit_iorq");
   1734 	return 0;	/* not reached */
   1735 }
   1736 
   1737 
   1738 /*
   1739  * xdc_piodriver
   1740  *
   1741  * programmed i/o driver.   this function takes over the computer
   1742  * and drains off all i/o requests.   it returns the status of the iorq
   1743  * the caller is interesting in.   if freeone is true, then it returns
   1744  * when there is a free iorq.
   1745  */
   1746 int
   1747 xdc_piodriver(struct xdc_softc *xdcsc, int iorqno, int freeone)
   1748 {
   1749 	int	nreset = 0;
   1750 	int	retval = 0;
   1751 	u_long	count;
   1752 	struct	xdc *xdc = xdcsc->xdc;
   1753 #ifdef XDC_DEBUG
   1754 	printf("xdc_piodriver(%s, %d, freeone=%d)\n", device_xname(xdcsc->sc_dev),
   1755 	    iorqno, freeone);
   1756 #endif
   1757 
   1758 	while (xdcsc->nwait || xdcsc->nrun) {
   1759 #ifdef XDC_DEBUG
   1760 		printf("xdc_piodriver: wait=%d, run=%d\n",
   1761 			xdcsc->nwait, xdcsc->nrun);
   1762 #endif
   1763 		XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR));
   1764 #ifdef XDC_DEBUG
   1765 		printf("xdc_piodriver: done wait with count = %d\n", count);
   1766 #endif
   1767 		/* we expect some progress soon */
   1768 		if (count == 0 && nreset >= 2) {
   1769 			xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0);
   1770 #ifdef XDC_DEBUG
   1771 			printf("xdc_piodriver: timeout\n");
   1772 #endif
   1773 			return (XD_ERR_FAIL);
   1774 		}
   1775 		if (count == 0) {
   1776 			if (xdc_reset(xdcsc, 0,
   1777 				      (nreset++ == 0) ? XD_RSET_NONE : iorqno,
   1778 				      XD_ERR_FAIL,
   1779 				      0) == XD_ERR_FAIL)
   1780 				return (XD_ERR_FAIL);	/* flushes all but POLL
   1781 							 * requests, resets */
   1782 			continue;
   1783 		}
   1784 		xdc_remove_iorq(xdcsc);	/* could resubmit request */
   1785 		if (freeone) {
   1786 			if (xdcsc->nrun < XDC_MAXIOPB) {
   1787 #ifdef XDC_DEBUG
   1788 				printf("xdc_piodriver: done: one free\n");
   1789 #endif
   1790 				return (XD_ERR_AOK);
   1791 			}
   1792 			continue;	/* don't xdc_start */
   1793 		}
   1794 		xdc_start(xdcsc, XDC_MAXIOPB);
   1795 	}
   1796 
   1797 	/* get return value */
   1798 
   1799 	retval = xdcsc->reqs[iorqno].errnum;
   1800 
   1801 #ifdef XDC_DEBUG
   1802 	printf("xdc_piodriver: done, retval = 0x%x (%s)\n",
   1803 	    xdcsc->reqs[iorqno].errnum, xdc_e2str(xdcsc->reqs[iorqno].errnum));
   1804 #endif
   1805 
   1806 	/* now that we've drained everything, start up any bufs that have
   1807 	 * queued */
   1808 
   1809 	while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL)
   1810 		if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
   1811 			break;
   1812 
   1813 	return (retval);
   1814 }
   1815 
   1816 /*
   1817  * xdc_reset: reset one drive.   NOTE: assumes xdc was just reset.
   1818  * we steal iopb[0] for this, but we put it back when we are done.
   1819  */
   1820 void
   1821 xdc_xdreset(struct xdc_softc *xdcsc, struct xd_softc *xdsc)
   1822 {
   1823 	struct xd_iopb tmpiopb;
   1824 	u_long  addr;
   1825 	int     del;
   1826 	memcpy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb));
   1827 	memset(xdcsc->iopbase, 0, sizeof(tmpiopb));
   1828 	xdcsc->iopbase->comm = XDCMD_RST;
   1829 	xdcsc->iopbase->unit = xdsc->xd_drive;
   1830 	addr = (u_long) xdcsc->dvmaiopb;
   1831 	XDC_GO(xdcsc->xdc, addr);	/* go! */
   1832 	XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB);
   1833 	if (del <= 0 || xdcsc->iopbase->errs) {
   1834 		printf("%s: off-line: %s\n", device_xname(xdcsc->sc_dev),
   1835 		    xdc_e2str(xdcsc->iopbase->errnum));
   1836 		xdcsc->xdc->xdc_csr = XDC_RESET;
   1837 		XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
   1838 		if (del <= 0)
   1839 			panic("xdc_reset");
   1840 	} else {
   1841 		xdcsc->xdc->xdc_csr = XDC_CLRRIO;	/* clear RIO */
   1842 	}
   1843 	memcpy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb));
   1844 }
   1845 
   1846 
   1847 /*
   1848  * xdc_reset: reset everything: requests are marked as errors except
   1849  * a polled request (which is resubmitted)
   1850  */
   1851 int
   1852 xdc_reset(struct xdc_softc *xdcsc, int quiet, int blastmode, int error,
   1853 	struct xd_softc *xdsc)
   1854 
   1855 {
   1856 	int     del = 0, lcv, retval = XD_ERR_AOK;
   1857 	int     oldfree = xdcsc->nfree;
   1858 
   1859 	/* soft reset hardware */
   1860 
   1861 	if (!quiet)
   1862 		printf("%s: soft reset\n", device_xname(xdcsc->sc_dev));
   1863 	xdcsc->xdc->xdc_csr = XDC_RESET;
   1864 	XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
   1865 	if (del <= 0) {
   1866 		blastmode = XD_RSET_ALL;	/* dead, flush all requests */
   1867 		retval = XD_ERR_FAIL;
   1868 	}
   1869 	if (xdsc)
   1870 		xdc_xdreset(xdcsc, xdsc);
   1871 
   1872 	/* fix queues based on "blast-mode" */
   1873 
   1874 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
   1875 		register struct xd_iorq *iorq = &xdcsc->reqs[lcv];
   1876 
   1877 		if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
   1878 		    XD_STATE(iorq->mode) != XD_SUB_WAIT &&
   1879 		    XD_STATE(iorq->mode) != XD_SUB_NORM)
   1880 			/* is it active? */
   1881 			continue;
   1882 
   1883 		xdcsc->nrun--;	/* it isn't running any more */
   1884 		if (blastmode == XD_RSET_ALL || blastmode != lcv) {
   1885 			/* failed */
   1886 			iorq->errnum = error;
   1887 			xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
   1888 			switch (XD_STATE(xdcsc->reqs[lcv].mode)) {
   1889 			case XD_SUB_NORM:
   1890 			    iorq->buf->b_error = EIO;
   1891 			    iorq->buf->b_resid =
   1892 			       iorq->sectcnt * XDFM_BPS;
   1893 
   1894 			    bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
   1895 					    iorq->dmamap->dm_mapsize,
   1896 					    (iorq->buf->b_flags & B_READ)
   1897 						? BUS_DMASYNC_POSTREAD
   1898 						: BUS_DMASYNC_POSTWRITE);
   1899 
   1900 			    bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap);
   1901 
   1902 			    disk_unbusy(&xdcsc->reqs[lcv].xd->sc_dk,
   1903 				(xdcsc->reqs[lcv].buf->b_bcount -
   1904 				xdcsc->reqs[lcv].buf->b_resid),
   1905 				(iorq->buf->b_flags & B_READ));
   1906 			    biodone(iorq->buf);
   1907 			    XDC_FREE(xdcsc, lcv);	/* add to free list */
   1908 			    break;
   1909 			case XD_SUB_WAIT:
   1910 			    wakeup(iorq);
   1911 			case XD_SUB_POLL:
   1912 			    xdcsc->ndone++;
   1913 			    iorq->mode =
   1914 				XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
   1915 			    break;
   1916 			}
   1917 
   1918 		} else {
   1919 
   1920 			/* resubmit, put at front of wait queue */
   1921 			XDC_HWAIT(xdcsc, lcv);
   1922 		}
   1923 	}
   1924 
   1925 	/*
   1926 	 * now, if stuff is waiting, start it.
   1927 	 * since we just reset it should go
   1928 	 */
   1929 	xdc_start(xdcsc, XDC_MAXIOPB);
   1930 
   1931 	/* ok, we did it */
   1932 	if (oldfree == 0 && xdcsc->nfree)
   1933 		wakeup(&xdcsc->nfree);
   1934 
   1935 #ifdef XDC_DIAG
   1936 	del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone;
   1937 	if (del != XDC_MAXIOPB)
   1938 		printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
   1939 		    device_xname(xdcsc->sc_dev), del, XDC_MAXIOPB);
   1940 	else
   1941 		if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
   1942 			printf("%s: diag: lots of done jobs (%d)\n",
   1943 			    device_xname(xdcsc->sc_dev), xdcsc->ndone);
   1944 #endif
   1945 	printf("RESET DONE\n");
   1946 	return (retval);
   1947 }
   1948 /*
   1949  * xdc_start: start all waiting buffers
   1950  */
   1951 
   1952 void
   1953 xdc_start(struct xdc_softc *xdcsc, int maxio)
   1954 
   1955 {
   1956 	int     rqno;
   1957 	while (maxio && xdcsc->nwait &&
   1958 		(xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) {
   1959 		XDC_GET_WAITER(xdcsc, rqno);	/* note: rqno is an "out"
   1960 						 * param */
   1961 		if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK)
   1962 			panic("xdc_start");	/* should never happen */
   1963 		maxio--;
   1964 	}
   1965 }
   1966 /*
   1967  * xdc_remove_iorq: remove "done" IOPB's.
   1968  */
   1969 
   1970 int
   1971 xdc_remove_iorq(struct xdc_softc *xdcsc)
   1972 {
   1973 	int     errnum, rqno, comm, errs;
   1974 	struct xdc *xdc = xdcsc->xdc;
   1975 	struct xd_iopb *iopb;
   1976 	struct xd_iorq *iorq;
   1977 	struct buf *bp;
   1978 
   1979 	if (xdc->xdc_csr & XDC_F_ERROR) {
   1980 		/*
   1981 		 * FATAL ERROR: should never happen under normal use. This
   1982 		 * error is so bad, you can't even tell which IOPB is bad, so
   1983 		 * we dump them all.
   1984 		 */
   1985 		errnum = xdc->xdc_f_err;
   1986 		aprint_error_dev(xdcsc->sc_dev, "fatal error 0x%02x: %s\n",
   1987 		    errnum, xdc_e2str(errnum));
   1988 		if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errnum, 0) != XD_ERR_AOK) {
   1989 			aprint_error_dev(xdcsc->sc_dev, "soft reset failed!\n");
   1990 			panic("xdc_remove_iorq: controller DEAD");
   1991 		}
   1992 		return (XD_ERR_AOK);
   1993 	}
   1994 
   1995 	/*
   1996 	 * get iopb that is done
   1997 	 *
   1998 	 * hmm... I used to read the address of the done IOPB off the VME
   1999 	 * registers and calculate the rqno directly from that.   that worked
   2000 	 * until I started putting a load on the controller.   when loaded, i
   2001 	 * would get interrupts but neither the REMIOPB or F_ERROR bits would
   2002 	 * be set, even after DELAY'ing a while!   later on the timeout
   2003 	 * routine would detect IOPBs that were marked "running" but their
   2004 	 * "done" bit was set.   rather than dealing directly with this
   2005 	 * problem, it is just easier to look at all running IOPB's for the
   2006 	 * done bit.
   2007 	 */
   2008 	if (xdc->xdc_csr & XDC_REMIOPB) {
   2009 		xdc->xdc_csr = XDC_CLRRIO;
   2010 	}
   2011 
   2012 	for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) {
   2013 		iorq = &xdcsc->reqs[rqno];
   2014 		if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE)
   2015 			continue;	/* free, or done */
   2016 		iopb = &xdcsc->iopbase[rqno];
   2017 		if (iopb->done == 0)
   2018 			continue;	/* not done yet */
   2019 
   2020 #ifdef XDC_DEBUG
   2021 		{
   2022 			u_char *rio = (u_char *) iopb;
   2023 			int     sz = sizeof(struct xd_iopb), lcv;
   2024 			printf("%s: rio #%d [", device_xname(xdcsc->sc_dev), rqno);
   2025 			for (lcv = 0; lcv < sz; lcv++)
   2026 				printf(" %02x", rio[lcv]);
   2027 			printf("]\n");
   2028 		}
   2029 #endif				/* XDC_DEBUG */
   2030 
   2031 		xdcsc->nrun--;
   2032 
   2033 		comm = iopb->comm;
   2034 		errs = iopb->errs;
   2035 
   2036 		if (errs)
   2037 			iorq->errnum = iopb->errnum;
   2038 		else
   2039 			iorq->errnum = 0;
   2040 
   2041 		/* handle non-fatal errors */
   2042 
   2043 		if (errs &&
   2044 		    xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK)
   2045 			continue;	/* AOK: we resubmitted it */
   2046 
   2047 
   2048 		/* this iorq is now done (hasn't been restarted or anything) */
   2049 
   2050 		if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
   2051 			xdc_perror(iorq, iopb, 0);
   2052 
   2053 		/* now, if read/write check to make sure we got all the data
   2054 		 * we needed. (this may not be the case if we got an error in
   2055 		 * the middle of a multisector request).   */
   2056 
   2057 		if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 &&
   2058 		    (comm == XDCMD_RD || comm == XDCMD_WR)) {
   2059 			/* we just successfully processed a bad144 sector
   2060 			 * note: if we are in bad 144 mode, the pointers have
   2061 			 * been advanced already (see above) and are pointing
   2062 			 * at the bad144 sector.   to exit bad144 mode, we
   2063 			 * must advance the pointers 1 sector and issue a new
   2064 			 * request if there are still sectors left to process
   2065 			 *
   2066 			 */
   2067 			XDC_ADVANCE(iorq, 1);	/* advance 1 sector */
   2068 
   2069 			/* exit b144 mode */
   2070 			iorq->mode = iorq->mode & (~XD_MODE_B144);
   2071 
   2072 			if (iorq->sectcnt) {	/* more to go! */
   2073 				iorq->lasterror = iorq->errnum = iopb->errnum = 0;
   2074 				iopb->errs = iopb->done = 0;
   2075 				iorq->tries = 0;
   2076 				iopb->sectcnt = iorq->sectcnt;
   2077 				iopb->cylno = iorq->blockno /
   2078 						iorq->xd->sectpercyl;
   2079 				iopb->headno =
   2080 					(iorq->blockno / iorq->xd->nhead) %
   2081 						iorq->xd->nhead;
   2082 				iopb->sectno = iorq->blockno % XDFM_BPS;
   2083 				iopb->daddr = (u_long) iorq->dbuf;
   2084 				XDC_HWAIT(xdcsc, rqno);
   2085 				xdc_start(xdcsc, 1);	/* resubmit */
   2086 				continue;
   2087 			}
   2088 		}
   2089 		/* final cleanup, totally done with this request */
   2090 
   2091 		switch (XD_STATE(iorq->mode)) {
   2092 		case XD_SUB_NORM:
   2093 			bp = iorq->buf;
   2094 			if (errs) {
   2095 				bp->b_error = EIO;
   2096 				bp->b_resid = iorq->sectcnt * XDFM_BPS;
   2097 			} else {
   2098 				bp->b_resid = 0;	/* done */
   2099 			}
   2100 			bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0,
   2101 					iorq->dmamap->dm_mapsize,
   2102 					(bp->b_flags & B_READ)
   2103 						? BUS_DMASYNC_POSTREAD
   2104 						: BUS_DMASYNC_POSTWRITE);
   2105 			bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap);
   2106 
   2107 			disk_unbusy(&iorq->xd->sc_dk,
   2108 			    (bp->b_bcount - bp->b_resid),
   2109 			    (bp->b_flags & B_READ));
   2110 			XDC_FREE(xdcsc, rqno);
   2111 			biodone(bp);
   2112 			break;
   2113 		case XD_SUB_WAIT:
   2114 			iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
   2115 			xdcsc->ndone++;
   2116 			wakeup(iorq);
   2117 			break;
   2118 		case XD_SUB_POLL:
   2119 			iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
   2120 			xdcsc->ndone++;
   2121 			break;
   2122 		}
   2123 	}
   2124 
   2125 	return (XD_ERR_AOK);
   2126 }
   2127 
   2128 /*
   2129  * xdc_perror: print error.
   2130  * - if still_trying is true: we got an error, retried and got a
   2131  *   different error.  in that case lasterror is the old error,
   2132  *   and errnum is the new one.
   2133  * - if still_trying is not true, then if we ever had an error it
   2134  *   is in lasterror. also, if iorq->errnum == 0, then we recovered
   2135  *   from that error (otherwise iorq->errnum == iorq->lasterror).
   2136  */
   2137 void
   2138 xdc_perror(struct xd_iorq *iorq, struct xd_iopb *iopb, int still_trying)
   2139 
   2140 {
   2141 
   2142 	int     error = iorq->lasterror;
   2143 
   2144 	printf("%s", (iorq->xd) ? device_xname(iorq->xd->sc_dev)
   2145 	    : device_xname(iorq->xdc->sc_dev));
   2146 	if (iorq->buf)
   2147 		printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev));
   2148 	if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR)
   2149 		printf("%s %d/%d/%d: ",
   2150 			(iopb->comm == XDCMD_RD) ? "read" : "write",
   2151 			iopb->cylno, iopb->headno, iopb->sectno);
   2152 	printf("%s", xdc_e2str(error));
   2153 
   2154 	if (still_trying)
   2155 		printf(" [still trying, new error=%s]", xdc_e2str(iorq->errnum));
   2156 	else
   2157 		if (iorq->errnum == 0)
   2158 			printf(" [recovered in %d tries]", iorq->tries);
   2159 
   2160 	printf("\n");
   2161 }
   2162 
   2163 /*
   2164  * xdc_error: non-fatal error encountered... recover.
   2165  * return AOK if resubmitted, return FAIL if this iopb is done
   2166  */
   2167 int
   2168 xdc_error(struct xdc_softc *xdcsc, struct xd_iorq *iorq, struct xd_iopb *iopb,
   2169 	int rqno, int comm)
   2170 
   2171 {
   2172 	int     errnum = iorq->errnum;
   2173 	int     erract = errnum & XD_ERA_MASK;
   2174 	int     oldmode, advance;
   2175 #ifdef __sparc__
   2176 	int i;
   2177 #endif
   2178 
   2179 	if (erract == XD_ERA_RSET) {	/* some errors require a reset */
   2180 		oldmode = iorq->mode;
   2181 		iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode);
   2182 		xdcsc->ndone++;
   2183 		/* make xdc_start ignore us */
   2184 		xdc_reset(xdcsc, 1, XD_RSET_NONE, errnum, iorq->xd);
   2185 		iorq->mode = oldmode;
   2186 		xdcsc->ndone--;
   2187 	}
   2188 	/* check for read/write to a sector in bad144 table if bad: redirect
   2189 	 * request to bad144 area */
   2190 
   2191 	if ((comm == XDCMD_RD || comm == XDCMD_WR) &&
   2192 	    (iorq->mode & XD_MODE_B144) == 0) {
   2193 		advance = iorq->sectcnt - iopb->sectcnt;
   2194 		XDC_ADVANCE(iorq, advance);
   2195 #ifdef __sparc__
   2196 		if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl,
   2197 			    (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead,
   2198 			    iorq->blockno % iorq->xd->nsect)) != -1) {
   2199 			iorq->mode |= XD_MODE_B144;	/* enter bad144 mode &
   2200 							 * redirect */
   2201 			iopb->errnum = iopb->done = iopb->errs = 0;
   2202 			iopb->sectcnt = 1;
   2203 			iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2;
   2204 			/* second to last acyl */
   2205 			i = iorq->xd->sectpercyl - 1 - i;	/* follow bad144
   2206 								 * standard */
   2207 			iopb->headno = i / iorq->xd->nhead;
   2208 			iopb->sectno = i % iorq->xd->nhead;
   2209 			XDC_HWAIT(xdcsc, rqno);
   2210 			xdc_start(xdcsc, 1);	/* resubmit */
   2211 			return (XD_ERR_AOK);	/* recovered! */
   2212 		}
   2213 #endif
   2214 	}
   2215 
   2216 	/*
   2217 	 * it isn't a bad144 sector, must be real error! see if we can retry
   2218 	 * it?
   2219 	 */
   2220 	if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
   2221 		xdc_perror(iorq, iopb, 1);	/* inform of error state
   2222 						 * change */
   2223 	iorq->lasterror = errnum;
   2224 
   2225 	if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD)
   2226 	    && iorq->tries < XDC_MAXTRIES) {	/* retry? */
   2227 		iorq->tries++;
   2228 		iorq->errnum = iopb->errnum = iopb->done = iopb->errs = 0;
   2229 		XDC_HWAIT(xdcsc, rqno);
   2230 		xdc_start(xdcsc, 1);	/* restart */
   2231 		return (XD_ERR_AOK);	/* recovered! */
   2232 	}
   2233 
   2234 	/* failed to recover from this error */
   2235 	return (XD_ERR_FAIL);
   2236 }
   2237 
   2238 /*
   2239  * xdc_tick: make sure xd is still alive and ticking (err, kicking).
   2240  */
   2241 void
   2242 xdc_tick(void *arg)
   2243 
   2244 {
   2245 	struct xdc_softc *xdcsc = arg;
   2246 	int     lcv, s, reset = 0;
   2247 #ifdef XDC_DIAG
   2248 	int     nwait, nrun, nfree, ndone, whd = 0;
   2249 	u_char  fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB];
   2250 	s = splbio();
   2251 	nwait = xdcsc->nwait;
   2252 	nrun = xdcsc->nrun;
   2253 	nfree = xdcsc->nfree;
   2254 	ndone = xdcsc->ndone;
   2255 	memcpy(wqc, xdcsc->waitq, sizeof(wqc));
   2256 	memcpy(fqc, xdcsc->freereq, sizeof(fqc));
   2257 	splx(s);
   2258 	if (nwait + nrun + nfree + ndone != XDC_MAXIOPB) {
   2259 		printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
   2260 		    device_xname(xdcsc->sc_dev), nwait, nfree, nrun, ndone,
   2261 		    XDC_MAXIOPB);
   2262 		memset(mark, 0, sizeof(mark));
   2263 		printf("FREE: ");
   2264 		for (lcv = nfree; lcv > 0; lcv--) {
   2265 			printf("%d ", fqc[lcv - 1]);
   2266 			mark[fqc[lcv - 1]] = 1;
   2267 		}
   2268 		printf("\nWAIT: ");
   2269 		lcv = nwait;
   2270 		while (lcv > 0) {
   2271 			printf("%d ", wqc[whd]);
   2272 			mark[wqc[whd]] = 1;
   2273 			whd = (whd + 1) % XDC_MAXIOPB;
   2274 			lcv--;
   2275 		}
   2276 		printf("\n");
   2277 		for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
   2278 			if (mark[lcv] == 0)
   2279 				printf("MARK: running %d: mode %d done %d errs %d errnum 0x%x ttl %d buf %p\n",
   2280 				lcv, xdcsc->reqs[lcv].mode,
   2281 				xdcsc->iopbase[lcv].done,
   2282 				xdcsc->iopbase[lcv].errs,
   2283 				xdcsc->iopbase[lcv].errnum,
   2284 				xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf);
   2285 		}
   2286 	} else
   2287 		if (ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
   2288 			printf("%s: diag: lots of done jobs (%d)\n",
   2289 				device_xname(xdcsc->sc_dev), ndone);
   2290 
   2291 #endif
   2292 #ifdef XDC_DEBUG
   2293 	printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
   2294 		device_xname(xdcsc->sc_dev),
   2295 		xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun,
   2296 		xdcsc->ndone);
   2297 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
   2298 		if (xdcsc->reqs[lcv].mode)
   2299 		  printf("running %d: mode %d done %d errs %d errnum 0x%x\n",
   2300 			 lcv,
   2301 			 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done,
   2302 			 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errnum);
   2303 	}
   2304 #endif
   2305 
   2306 	/* reduce ttl for each request if one goes to zero, reset xdc */
   2307 	s = splbio();
   2308 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
   2309 		if (xdcsc->reqs[lcv].mode == 0 ||
   2310 		    XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE)
   2311 			continue;
   2312 		xdcsc->reqs[lcv].ttl--;
   2313 		if (xdcsc->reqs[lcv].ttl == 0)
   2314 			reset = 1;
   2315 	}
   2316 	if (reset) {
   2317 		printf("%s: watchdog timeout\n", device_xname(xdcsc->sc_dev));
   2318 		xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL);
   2319 	}
   2320 	splx(s);
   2321 
   2322 	/* until next time */
   2323 
   2324 	callout_reset(&xdcsc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdcsc);
   2325 }
   2326 
   2327 /*
   2328  * xdc_ioctlcmd: this function provides a user level interface to the
   2329  * controller via ioctl.   this allows "format" programs to be written
   2330  * in user code, and is also useful for some debugging.   we return
   2331  * an error code.   called at user priority.
   2332  */
   2333 int
   2334 xdc_ioctlcmd(struct xd_softc *xd, dev_t dev, struct xd_iocmd *xio)
   2335 
   2336 {
   2337 	int     s, rqno, dummy;
   2338 	char *dvmabuf = NULL, *buf = NULL;
   2339 	struct xdc_softc *xdcsc;
   2340 	int			rseg, error;
   2341 	bus_dma_segment_t	seg;
   2342 
   2343 	/* check sanity of requested command */
   2344 
   2345 	switch (xio->cmd) {
   2346 
   2347 	case XDCMD_NOP:	/* no op: everything should be zero */
   2348 		if (xio->subfn || xio->dptr || xio->dlen ||
   2349 		    xio->block || xio->sectcnt)
   2350 			return (EINVAL);
   2351 		break;
   2352 
   2353 	case XDCMD_RD:		/* read / write sectors (up to XD_IOCMD_MAXS) */
   2354 	case XDCMD_WR:
   2355 		if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
   2356 		    xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL)
   2357 			return (EINVAL);
   2358 		break;
   2359 
   2360 	case XDCMD_SK:		/* seek: doesn't seem useful to export this */
   2361 		return (EINVAL);
   2362 
   2363 	case XDCMD_WRP:	/* write parameters */
   2364 		return (EINVAL);/* not useful, except maybe drive
   2365 				 * parameters... but drive parameters should
   2366 				 * go via disklabel changes */
   2367 
   2368 	case XDCMD_RDP:	/* read parameters */
   2369 		if (xio->subfn != XDFUN_DRV ||
   2370 		    xio->dlen || xio->block || xio->dptr)
   2371 			return (EINVAL);	/* allow read drive params to
   2372 						 * get hw_spt */
   2373 		xio->sectcnt = xd->hw_spt;	/* we already know the answer */
   2374 		return (0);
   2375 		break;
   2376 
   2377 	case XDCMD_XRD:	/* extended read/write */
   2378 	case XDCMD_XWR:
   2379 
   2380 		switch (xio->subfn) {
   2381 
   2382 		case XDFUN_THD:/* track headers */
   2383 			if (xio->sectcnt != xd->hw_spt ||
   2384 			    (xio->block % xd->nsect) != 0 ||
   2385 			    xio->dlen != XD_IOCMD_HSZ * xd->hw_spt ||
   2386 			    xio->dptr == NULL)
   2387 				return (EINVAL);
   2388 			xio->sectcnt = 0;
   2389 			break;
   2390 
   2391 		case XDFUN_FMT:/* NOTE: also XDFUN_VFY */
   2392 			if (xio->cmd == XDCMD_XRD)
   2393 				return (EINVAL);	/* no XDFUN_VFY */
   2394 			if (xio->sectcnt || xio->dlen ||
   2395 			    (xio->block % xd->nsect) != 0 || xio->dptr)
   2396 				return (EINVAL);
   2397 			break;
   2398 
   2399 		case XDFUN_HDR:/* header, header verify, data, data ECC */
   2400 			return (EINVAL);	/* not yet */
   2401 
   2402 		case XDFUN_DM:	/* defect map */
   2403 		case XDFUN_DMX:/* defect map (alternate location) */
   2404 			if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ ||
   2405 			    (xio->block % xd->nsect) != 0 || xio->dptr == NULL)
   2406 				return (EINVAL);
   2407 			break;
   2408 
   2409 		default:
   2410 			return (EINVAL);
   2411 		}
   2412 		break;
   2413 
   2414 	case XDCMD_TST:	/* diagnostics */
   2415 		return (EINVAL);
   2416 
   2417 	default:
   2418 		return (EINVAL);/* ??? */
   2419 	}
   2420 
   2421 	xdcsc = xd->parent;
   2422 
   2423 	/* create DVMA buffer for request if needed */
   2424 	if (xio->dlen) {
   2425 		bus_addr_t busbuf;
   2426 
   2427 		if ((error = xd_dmamem_alloc(xdcsc->dmatag, xdcsc->auxmap,
   2428 					     &seg, &rseg,
   2429 					     xio->dlen, (void **)&buf,
   2430 					     &busbuf)) != 0) {
   2431 			return (error);
   2432 		}
   2433 		dvmabuf = (void *)(u_long)BUS_ADDR_PADDR(busbuf);
   2434 
   2435 		if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
   2436 			if ((error = copyin(xio->dptr, buf, xio->dlen)) != 0) {
   2437 				bus_dmamem_unmap(xdcsc->dmatag, buf, xio->dlen);
   2438 				bus_dmamem_free(xdcsc->dmatag, &seg, rseg);
   2439 				return (error);
   2440 			}
   2441 		}
   2442 	}
   2443 
   2444 	/* do it! */
   2445 
   2446 	error = 0;
   2447 	s = splbio();
   2448 	rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block,
   2449 	    xio->sectcnt, dvmabuf, XD_SUB_WAIT);
   2450 	if (rqno == XD_ERR_FAIL) {
   2451 		error = EIO;
   2452 		goto done;
   2453 	}
   2454 	xio->errnum = xdcsc->reqs[rqno].errnum;
   2455 	xio->tries = xdcsc->reqs[rqno].tries;
   2456 	XDC_DONE(xdcsc, rqno, dummy);
   2457 	__USE(dummy);
   2458 
   2459 	if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
   2460 		error = copyout(buf, xio->dptr, xio->dlen);
   2461 
   2462 done:
   2463 	splx(s);
   2464 	if (dvmabuf) {
   2465 		xd_dmamem_free(xdcsc->dmatag, xdcsc->auxmap, &seg, rseg,
   2466 				xio->dlen, buf);
   2467 	}
   2468 	return (error);
   2469 }
   2470 
   2471 /*
   2472  * xdc_e2str: convert error code number into an error string
   2473  */
   2474 const char *
   2475 xdc_e2str(int no)
   2476 {
   2477 	switch (no) {
   2478 	case XD_ERR_FAIL:
   2479 		return ("Software fatal error");
   2480 	case XD_ERR_AOK:
   2481 		return ("Successful completion");
   2482 	case XD_ERR_ICYL:
   2483 		return ("Illegal cylinder address");
   2484 	case XD_ERR_IHD:
   2485 		return ("Illegal head address");
   2486 	case XD_ERR_ISEC:
   2487 		return ("Illgal sector address");
   2488 	case XD_ERR_CZER:
   2489 		return ("Count zero");
   2490 	case XD_ERR_UIMP:
   2491 		return ("Unimplemented command");
   2492 	case XD_ERR_IF1:
   2493 		return ("Illegal field length 1");
   2494 	case XD_ERR_IF2:
   2495 		return ("Illegal field length 2");
   2496 	case XD_ERR_IF3:
   2497 		return ("Illegal field length 3");
   2498 	case XD_ERR_IF4:
   2499 		return ("Illegal field length 4");
   2500 	case XD_ERR_IF5:
   2501 		return ("Illegal field length 5");
   2502 	case XD_ERR_IF6:
   2503 		return ("Illegal field length 6");
   2504 	case XD_ERR_IF7:
   2505 		return ("Illegal field length 7");
   2506 	case XD_ERR_ISG:
   2507 		return ("Illegal scatter/gather length");
   2508 	case XD_ERR_ISPT:
   2509 		return ("Not enough sectors per track");
   2510 	case XD_ERR_ALGN:
   2511 		return ("Next IOPB address alignment error");
   2512 	case XD_ERR_SGAL:
   2513 		return ("Scatter/gather address alignment error");
   2514 	case XD_ERR_SGEC:
   2515 		return ("Scatter/gather with auto-ECC");
   2516 	case XD_ERR_SECC:
   2517 		return ("Soft ECC corrected");
   2518 	case XD_ERR_SIGN:
   2519 		return ("ECC ignored");
   2520 	case XD_ERR_ASEK:
   2521 		return ("Auto-seek retry recovered");
   2522 	case XD_ERR_RTRY:
   2523 		return ("Soft retry recovered");
   2524 	case XD_ERR_HECC:
   2525 		return ("Hard data ECC");
   2526 	case XD_ERR_NHDR:
   2527 		return ("Header not found");
   2528 	case XD_ERR_NRDY:
   2529 		return ("Drive not ready");
   2530 	case XD_ERR_TOUT:
   2531 		return ("Operation timeout");
   2532 	case XD_ERR_VTIM:
   2533 		return ("VMEDMA timeout");
   2534 	case XD_ERR_DSEQ:
   2535 		return ("Disk sequencer error");
   2536 	case XD_ERR_HDEC:
   2537 		return ("Header ECC error");
   2538 	case XD_ERR_RVFY:
   2539 		return ("Read verify");
   2540 	case XD_ERR_VFER:
   2541 		return ("Fatail VMEDMA error");
   2542 	case XD_ERR_VBUS:
   2543 		return ("VMEbus error");
   2544 	case XD_ERR_DFLT:
   2545 		return ("Drive faulted");
   2546 	case XD_ERR_HECY:
   2547 		return ("Header error/cyliner");
   2548 	case XD_ERR_HEHD:
   2549 		return ("Header error/head");
   2550 	case XD_ERR_NOCY:
   2551 		return ("Drive not on-cylinder");
   2552 	case XD_ERR_SEEK:
   2553 		return ("Seek error");
   2554 	case XD_ERR_ILSS:
   2555 		return ("Illegal sector size");
   2556 	case XD_ERR_SEC:
   2557 		return ("Soft ECC");
   2558 	case XD_ERR_WPER:
   2559 		return ("Write-protect error");
   2560 	case XD_ERR_IRAM:
   2561 		return ("IRAM self test failure");
   2562 	case XD_ERR_MT3:
   2563 		return ("Maintenance test 3 failure (DSKCEL RAM)");
   2564 	case XD_ERR_MT4:
   2565 		return ("Maintenance test 4 failure (header shift reg)");
   2566 	case XD_ERR_MT5:
   2567 		return ("Maintenance test 5 failure (VMEDMA regs)");
   2568 	case XD_ERR_MT6:
   2569 		return ("Maintenance test 6 failure (REGCEL chip)");
   2570 	case XD_ERR_MT7:
   2571 		return ("Maintenance test 7 failure (buffer parity)");
   2572 	case XD_ERR_MT8:
   2573 		return ("Maintenance test 8 failure (disk FIFO)");
   2574 	case XD_ERR_IOCK:
   2575 		return ("IOPB checksum miscompare");
   2576 	case XD_ERR_IODM:
   2577 		return ("IOPB DMA fatal");
   2578 	case XD_ERR_IOAL:
   2579 		return ("IOPB address alignment error");
   2580 	case XD_ERR_FIRM:
   2581 		return ("Firmware error");
   2582 	case XD_ERR_MMOD:
   2583 		return ("Illegal maintenance mode test number");
   2584 	case XD_ERR_ACFL:
   2585 		return ("ACFAIL asserted");
   2586 	default:
   2587 		return ("Unknown error");
   2588 	}
   2589 }
   2590