Home | History | Annotate | Line # | Download | only in dev
vme_machdep.c revision 1.1
      1 /*	$NetBSD: vme_machdep.c,v 1.1 1998/01/25 16:06:26 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/device.h>
     42 #include <sys/malloc.h>
     43 
     44 #include <sys/proc.h>
     45 #include <sys/user.h>
     46 #include <sys/syslog.h>
     47 
     48 #include <vm/vm.h>
     49 
     50 #define _SPARC_BUS_DMA_PRIVATE
     51 #include <machine/bus.h>
     52 #include <machine/autoconf.h>
     53 #include <machine/pmap.h>
     54 #include <machine/oldmon.h>
     55 #include <machine/cpu.h>
     56 #include <machine/ctlreg.h>
     57 
     58 #include <dev/vme/vmevar.h>
     59 
     60 #include <sparc/sparc/asm.h>
     61 #include <sparc/sparc/vaddrs.h>
     62 #include <sparc/sparc/cpuvar.h>
     63 #include <sparc/dev/vmereg.h>
     64 
     65 struct vmebus_softc {
     66 	struct device	 sc_dev;	/* base device */
     67 	struct vmebusreg *sc_reg; 	/* VME control registers */
     68 	struct vmebusvec *sc_vec;	/* VME interrupt vector */
     69 	struct rom_range *sc_range;	/* ROM range property */
     70 	int		 sc_nrange;
     71 	volatile u_int32_t *sc_ioctags;	/* VME IO-cache tag registers */
     72 	volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
     73 	int 		 (*sc_vmeintr) __P((void *));
     74 	struct bootpath	 *sc_bp;
     75 };
     76 struct  vmebus_softc *vmebus_sc;/*XXX*/
     77 
     78 /* autoconfiguration driver */
     79 static int	vmematch    __P((struct device *, struct cfdata *, void *));
     80 static void	vmeattach   __P((struct device *, struct device *, void *));
     81 #if defined(SUN4)
     82 static void	vmeattach4  __P((struct device *, struct device *, void *));
     83 int 		vmeintr4  __P((void *));
     84 #endif
     85 #if defined(SUN4M)
     86 static void	vmeattach4m __P((struct device *, struct device *, void *));
     87 int 		vmeintr4m __P((void *));
     88 #endif
     89 
     90 
     91 static int	sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
     92 			      vme_size_t, vme_mod_t));
     93 static int	sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
     94 				   bus_space_tag_t, bus_space_handle_t *));
     95 static void	sparc_vme_unmap __P((void *));
     96 static int	sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
     97 				   bus_space_tag_t, int *));
     98 static int	sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
     99 static void *	sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
    100 					      int (*) __P((void *)), void *));
    101 static void	sparc_vme_intr_disestablish __P((void *, void *));
    102 
    103 static void	vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
    104 				      struct rom_reg *));
    105 static void	sparc_vme_bus_establish __P((void *, struct device *));
    106 #if defined(SUN4M)
    107 static void	sparc_vme4m_barrier __P((void *));
    108 #endif
    109 
    110 /*
    111  * DMA functions.
    112  */
    113 #if defined(SUN4)
    114 static int	sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
    115 		    bus_size_t, struct proc *, int));
    116 static void	sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
    117 static void	sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
    118 		    bus_dmasync_op_t));
    119 
    120 static int	sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
    121 		    bus_size_t, bus_size_t, bus_dma_segment_t *,
    122 		    int, int *, int));
    123 static void	sparc_vme4_dmamem_free __P((bus_dma_tag_t,
    124 		    bus_dma_segment_t *, int));
    125 #endif
    126 
    127 #if defined(SUN4M)
    128 static int	sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
    129 		    bus_size_t, bus_size_t, int, bus_dmamap_t *));
    130 
    131 static int	sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
    132 		    bus_size_t, struct proc *, int));
    133 static void	sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
    134 static void	sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
    135 		    bus_dmasync_op_t));
    136 
    137 static int	sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
    138 		    bus_size_t, bus_size_t, bus_dma_segment_t *,
    139 		    int, int *, int));
    140 static void	sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
    141 		    bus_dma_segment_t *, int));
    142 #endif
    143 
    144 #if 0
    145 static void	sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
    146 static int	sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
    147 		    int, size_t, caddr_t *, int));
    148 static void	sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
    149 static int	sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
    150 		    bus_dma_segment_t *, int, int, int, int));
    151 #endif
    152 
    153 struct cfattach vme_ca = {
    154 	sizeof(struct vmebus_softc), vmematch, vmeattach
    155 };
    156 
    157 struct sparc_bus_space_tag sparc_vme_bus_tag = {
    158 	NULL, /* cookie */
    159 	NULL, /* bus_map */
    160 	NULL, /* bus_unmap */
    161 	NULL, /* bus_subregion */
    162 	NULL  /* barrier */
    163 };
    164 
    165 struct vme_chipset_tag sparc_vme_chipset_tag = {
    166 	NULL,
    167 	sparc_vme_probe,
    168 	sparc_vme_map,
    169 	sparc_vme_unmap,
    170 	sparc_vme_mmap_cookie,
    171 	sparc_vme_intr_map,
    172 	sparc_vme_intr_establish,
    173 	sparc_vme_intr_disestablish,
    174 	sparc_vme_bus_establish
    175 };
    176 
    177 
    178 #if defined(SUN4)
    179 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
    180 	NULL,	/* cookie */
    181 	_bus_dmamap_create,
    182 	_bus_dmamap_destroy,
    183 	sparc_vme4_dmamap_load,
    184 	_bus_dmamap_load_mbuf,
    185 	_bus_dmamap_load_uio,
    186 	_bus_dmamap_load_raw,
    187 	sparc_vme4_dmamap_unload,
    188 	sparc_vme4_dmamap_sync,
    189 
    190 	sparc_vme4_dmamem_alloc,
    191 	sparc_vme4_dmamem_free,
    192 	_bus_dmamem_map,
    193 	_bus_dmamem_unmap,
    194 	_bus_dmamem_mmap
    195 };
    196 #endif
    197 
    198 #if defined(SUN4M)
    199 struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
    200 	NULL,	/* cookie */
    201 	sparc_vme4m_dmamap_create,
    202 	_bus_dmamap_destroy,
    203 	sparc_vme4m_dmamap_load,
    204 	_bus_dmamap_load_mbuf,
    205 	_bus_dmamap_load_uio,
    206 	_bus_dmamap_load_raw,
    207 	sparc_vme4m_dmamap_unload,
    208 	sparc_vme4m_dmamap_sync,
    209 
    210 	sparc_vme4m_dmamem_alloc,
    211 	sparc_vme4m_dmamem_free,
    212 	_bus_dmamem_map,
    213 	_bus_dmamem_unmap,
    214 	_bus_dmamem_mmap
    215 };
    216 #endif
    217 
    218 
    219 void
    220 sparc_vme_bus_establish(cookie, dev)
    221 	void *cookie;
    222 	struct device *dev;
    223 {
    224 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    225 	struct bootpath *bp = sc->sc_bp;
    226 	char *name;
    227 
    228 	name = dev->dv_cfdata->cf_driver->cd_name;
    229 #ifdef DEBUG
    230 	printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
    231 #endif
    232 	if (bp != NULL && strcmp(bp->name, name) == 0 &&
    233 	    dev->dv_unit == bp->val[1]) {
    234 		bp->dev = dev;
    235 #ifdef DEBUG
    236 printf("sparc_vme_bus_establish: on the boot path\n");
    237 #endif
    238 		sc->sc_bp++;
    239 		bootpath_store(1, sc->sc_bp);
    240 	}
    241 }
    242 
    243 
    244 int
    245 vmematch(parent, cf, aux)
    246 	struct device *parent;
    247 	struct cfdata *cf;
    248 	void *aux;
    249 {
    250 	register struct confargs *ca = aux;
    251 	register struct romaux *ra = &ca->ca_ra;
    252 
    253 	if (CPU_ISSUN4C)
    254 		return (0);
    255 
    256 	return (strcmp(cf->cf_driver->cd_name, ra->ra_name) == 0);
    257 }
    258 
    259 void
    260 vmeattach(parent, self, aux)
    261 	struct device *parent, *self;
    262 	void *aux;
    263 {
    264 	struct vmebus_softc *sc = (struct vmebus_softc *)self;
    265 	struct confargs *ca = aux;
    266 	register struct romaux *ra = &ca->ca_ra;
    267 
    268 	if (ra->ra_bp != NULL && strcmp(ra->ra_bp->name, "vme") == 0) {
    269 		sc->sc_bp = ra->ra_bp + 1;
    270 		bootpath_store(1, sc->sc_bp);
    271 	}
    272 
    273 #if defined(SUN4)
    274 	if (CPU_ISSUN4)
    275 		vmeattach4(parent, self, aux);
    276 #endif
    277 
    278 #if defined(SUN4M)
    279 	if (CPU_ISSUN4M)
    280 		vmeattach4m(parent, self, aux);
    281 #endif
    282 
    283 	bootpath_store(1, NULL);
    284 }
    285 
    286 #if defined(SUN4)
    287 void
    288 vmeattach4(parent, self, aux)
    289 	struct device *parent, *self;
    290 	void *aux;
    291 {
    292 	struct vmebus_softc *sc = (struct vmebus_softc *)self;
    293 	struct vme_busattach_args vba;
    294 
    295 	if (self->dv_unit > 0) {
    296 		printf(" unsupported\n");
    297 		return;
    298 	}
    299 
    300 	/* VME interrupt entry point */
    301 	sc->sc_vmeintr = vmeintr4;
    302 
    303 /*XXX*/	sparc_vme_chipset_tag.cookie = self;
    304 /*XXX*/	sparc_vme4_dma_tag._cookie = self;
    305 
    306 	vba.vba_bustag = &sparc_vme_bus_tag;
    307 	vba.vba_chipset_tag = &sparc_vme_chipset_tag;
    308 	vba.vba_dmatag = &sparc_vme4_dma_tag;
    309 
    310 	printf("\n");
    311 	(void)config_search(vmesearch, self, &vba);
    312 	return;
    313 }
    314 #endif
    315 
    316 #if defined(SUN4M)
    317 /* sun4m vmebus */
    318 void
    319 vmeattach4m(parent, self, aux)
    320 	struct device *parent, *self;
    321 	void *aux;
    322 {
    323 	struct vmebus_softc *sc = (struct vmebus_softc *)self;
    324 	struct confargs *ca = aux;
    325 	register struct romaux *ra = &ca->ca_ra;
    326 	int node, rlen;
    327 	struct vme_busattach_args vba;
    328 	int cline;
    329 
    330 	if (self->dv_unit > 0) {
    331 		printf(" unsupported\n");
    332 		return;
    333 	}
    334 
    335 	/* VME interrupt entry point */
    336 	sc->sc_vmeintr = vmeintr4m;
    337 
    338 /*XXX*/	sparc_vme_chipset_tag.cookie = self;
    339 /*XXX*/	sparc_vme4m_dma_tag._cookie = self;
    340 	sparc_vme_bus_tag.sparc_barrier = sparc_vme4m_barrier;
    341 
    342 	vba.vba_bustag = &sparc_vme_bus_tag;
    343 	vba.vba_chipset_tag = &sparc_vme_chipset_tag;
    344 	vba.vba_dmatag = &sparc_vme4m_dma_tag;
    345 
    346 	node = ra->ra_node;
    347 
    348 	/* Map VME control space */
    349 	sc->sc_reg = (struct vmebusreg *)
    350 		mapdev(&ra->ra_reg[0], 0, 0, ra->ra_reg[0].rr_len);
    351 	sc->sc_vec = (struct vmebusvec *)
    352 		mapdev(&ra->ra_reg[1], 0, 0, ra->ra_reg[1].rr_len);
    353 	sc->sc_ioctags = (u_int32_t *)
    354 		mapdev(&ra->ra_reg[1], 0, VME_IOC_TAGOFFSET, VME_IOC_SIZE);
    355 	sc->sc_iocflush = (u_int32_t *)
    356 		mapdev(&ra->ra_reg[1], 0, VME_IOC_FLUSHOFFSET, VME_IOC_SIZE);
    357 
    358 /*XXX*/	sparc_vme_bus_tag.cookie = sc->sc_reg;
    359 
    360 	/*
    361 	 * Get "range" property.
    362 	 */
    363 	rlen = getproplen(node, "ranges");
    364 	if (rlen > 0) {
    365 		sc->sc_nrange = rlen / sizeof(struct rom_range);
    366 		sc->sc_range =
    367 			(struct rom_range *)malloc(rlen, M_DEVBUF, M_NOWAIT);
    368 		if (sc->sc_range == 0)
    369 			panic("vme: PROM ranges too large: %d", rlen);
    370 		(void)getprop(node, "ranges", sc->sc_range, rlen);
    371 	}
    372 
    373 	vmebus_sc = sc;
    374 
    375 	/*
    376 	 * Invalidate all IO-cache entries.
    377 	 */
    378 	for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
    379 		sc->sc_ioctags[--cline] = 0;
    380 	}
    381 
    382 	/* Enable IO-cache */
    383 	sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
    384 
    385 	printf(": version 0x%x\n",
    386 	       sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
    387 
    388 	(void)config_search(vmesearch, self, &vba);
    389 }
    390 #endif
    391 
    392 void sparc_vme_async_fault __P((void));
    393 void
    394 sparc_vme_async_fault()
    395 {
    396 	struct vmebus_softc *sc = vmebus_sc;
    397 	u_int32_t addr;
    398 
    399 	addr = sc->sc_reg->vmebus_afar;
    400 	printf("vme afsr: %x; addr %x\n", sc->sc_reg->vmebus_afsr, addr);
    401 }
    402 
    403 int
    404 sparc_vme_probe(cookie, tag, addr, size, mod)
    405 	void *cookie;
    406 	bus_space_tag_t tag;
    407 	vme_addr_t addr;
    408 	vme_size_t size;
    409 	int mod;
    410 {
    411 	struct rom_reg reg;
    412 	caddr_t tmp;
    413 	int result;
    414 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    415 
    416 /* XXX - Use bus_space_[un]map() etc. */
    417 	reg.rr_paddr = (void *)addr;
    418 	vmebus_translate(sc, mod, &reg);
    419 	tmp = (caddr_t)mapdev(&reg, TMPMAP_VA, 0, NBPG);
    420 	result = probeget(tmp, size) != -1;
    421 	pmap_remove(pmap_kernel(), TMPMAP_VA, TMPMAP_VA+NBPG);
    422 	return (result);
    423 }
    424 
    425 int
    426 sparc_vme_map(cookie, addr, size, mod, tag, handlep)
    427 	void *cookie;
    428 	vme_addr_t addr;
    429 	vme_size_t size;
    430 	int mod;
    431 	bus_space_tag_t tag;
    432 	bus_space_handle_t *handlep;
    433 {
    434 	struct rom_reg reg;
    435 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    436 
    437 	reg.rr_paddr = (void *)addr;
    438 	vmebus_translate(sc, mod, &reg);
    439 	*handlep = (bus_space_handle_t)mapdev(&reg, 0, 0, size);
    440 	return (0);
    441 }
    442 
    443 int
    444 sparc_vme_mmap_cookie(cookie, addr, mod, tag, handlep)
    445 	void *cookie;
    446 	vme_addr_t addr;
    447 	int mod;
    448 	bus_space_tag_t tag;
    449 	int *handlep;
    450 {
    451 	struct rom_reg reg;
    452 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    453 
    454 	reg.rr_paddr = (void *)addr;
    455 	vmebus_translate(sc, mod, &reg);
    456 	*handlep = (int)reg.rr_paddr | PMAP_IOENC(reg.rr_iospace) | PMAP_NC;
    457 	return (0);
    458 }
    459 
    460 void
    461 vmebus_translate(sc, mod, rr)
    462 	struct vmebus_softc *sc;
    463 	vme_mod_t mod;
    464 	struct rom_reg *rr;
    465 {
    466 	register int j;
    467 
    468 	if (CPU_ISSUN4) {
    469 		(int)rr->rr_iospace = (mod & VMEMOD_D32)
    470 			? PMAP_VME32
    471 			: PMAP_VME16;
    472 
    473 		switch (mod & ~VMEMOD_D32) {
    474 		case VMEMOD_A16|VMEMOD_D|VMEMOD_S:
    475 			rr->rr_paddr += 0xffff0000;
    476 			break;
    477 		case VMEMOD_A24|VMEMOD_D|VMEMOD_S:
    478 			rr->rr_paddr += 0xff000000;
    479 			break;
    480 		case VMEMOD_A32|VMEMOD_D|VMEMOD_S:
    481 			break;
    482 		default:
    483 			panic("vmebus_translate: unsupported VME modifier: %x",
    484 				mod);
    485 		}
    486 		return;
    487 	}
    488 
    489 
    490 	/* sun4m VME node: translate through "ranges" property */
    491 	if (sc->sc_nrange == 0)
    492 		panic("vmebus: no ranges");
    493 
    494 	/* Translate into parent address spaces */
    495 	for (j = 0; j < sc->sc_nrange; j++) {
    496 		if (sc->sc_range[j].cspace == mod) {
    497 			(int)rr->rr_paddr +=
    498 				sc->sc_range[j].poffset;
    499 			(int)rr->rr_iospace =
    500 				sc->sc_range[j].pspace;
    501 			return;
    502 		}
    503 	}
    504 	panic("sparc_vme_translate: modifier %x not supported", mod);
    505 }
    506 
    507 #if defined(SUN4M)
    508 void
    509 sparc_vme4m_barrier(cookie)
    510 	void *cookie;
    511 {
    512 	struct vmebusreg *vbp = (struct vmebusreg *)cookie;
    513 
    514 	/* Read async fault status to flush write-buffers */
    515 	(*(volatile int *)&vbp->vmebus_afsr);
    516 }
    517 #endif
    518 
    519 
    520 
    521 /*
    522  * VME Interrupt Priority Level to sparc Processor Interrupt Level.
    523  */
    524 static int vme_ipl_to_pil[] = {
    525 	0,
    526 	2,
    527 	3,
    528 	5,
    529 	7,
    530 	9,
    531 	11,
    532 	13
    533 };
    534 
    535 
    536 /*
    537  * All VME device interrupts go through vmeintr(). This function reads
    538  * the VME vector from the bus, then dispatches the device interrupt
    539  * handler.  All handlers for devices that map to the same Processor
    540  * Interrupt Level (according to the table above) are on a linked list
    541  * of `sparc_vme_intr_handle' structures. The head of which is passed
    542  * down as the argument to `vmeintr(void *arg)'.
    543  */
    544 struct sparc_vme_intr_handle {
    545 	struct intrhand ih;
    546 	struct sparc_vme_intr_handle *next;
    547 	int	vec;		/* VME interrupt vector */
    548 	int	pri;		/* VME interrupt priority */
    549 	struct vmebus_softc *sc;/*XXX*/
    550 };
    551 
    552 #if defined(SUN4)
    553 int
    554 vmeintr4(arg)
    555 	void *arg;
    556 {
    557 	struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
    558 	int level, vec;
    559 	int i = 0;
    560 
    561 	level = (ihp->pri << 1) | 1;
    562 
    563 	vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
    564 
    565 	if (vec == -1) {
    566 		printf("vme: spurious interrupt\n");
    567 		return 1; /* XXX - pretend we handled it, for now */
    568 	}
    569 
    570 	for (; ihp; ihp = ihp->next)
    571 		if (ihp->vec == vec && ihp->ih.ih_fun)
    572 			i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
    573 	return (i);
    574 }
    575 #endif
    576 
    577 #if defined(SUN4M)
    578 int
    579 vmeintr4m(arg)
    580 	void *arg;
    581 {
    582 	struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
    583 	int level, vec;
    584 	int i = 0;
    585 
    586 	level = (ihp->pri << 1) | 1;
    587 
    588 #if 0
    589 	int pending;
    590 
    591 	/* Flush VME <=> Sbus write buffers */
    592 	(*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
    593 
    594 	pending = *((int*)ICR_SI_PEND);
    595 	if ((pending & SINTR_VME(ihp->pri)) == 0) {
    596 		printf("vmeintr: non pending at pri %x(p 0x%x)\n",
    597 			ihp->pri, pending);
    598 		return (0);
    599 	}
    600 #endif
    601 #if 0
    602 	/* Why gives this a bus timeout sometimes? */
    603 	vec = ihp->sc->sc_vec->vmebusvec[level];
    604 #else
    605 	/* so, arrange to catch the fault... */
    606 	{
    607 	extern struct user *proc0paddr;
    608 	extern int fkbyte __P((caddr_t, struct pcb *));
    609 	caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
    610 	struct pcb *xpcb;
    611 	u_long saveonfault;
    612 	int s;
    613 
    614 	s = splhigh();
    615 	if (curproc == NULL)
    616 		xpcb = (struct pcb *)proc0paddr;
    617 	else
    618 		xpcb = &curproc->p_addr->u_pcb;
    619 
    620 	saveonfault = (u_long)xpcb->pcb_onfault;
    621 	vec = fkbyte(addr, xpcb);
    622 	xpcb->pcb_onfault = (caddr_t)saveonfault;
    623 
    624 	splx(s);
    625 	}
    626 #endif
    627 
    628 	if (vec == -1) {
    629 		printf("vme: spurious interrupt: ");
    630 		printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
    631 			*((int*)ICR_SI_PEND),
    632 			ihp->sc->sc_reg->vmebus_afsr,
    633 			ihp->sc->sc_reg->vmebus_afar);
    634 		return 1; /* XXX - pretend we handled it, for now */
    635 	}
    636 
    637 	for (; ihp; ihp = ihp->next)
    638 		if (ihp->vec == vec && ihp->ih.ih_fun)
    639 			i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
    640 	return (i);
    641 }
    642 #endif
    643 
    644 int
    645 sparc_vme_intr_map(cookie, vec, pri, ihp)
    646 	void *cookie;
    647 	int vec;
    648 	int pri;
    649 	vme_intr_handle_t *ihp;
    650 {
    651 	struct sparc_vme_intr_handle *ih;
    652 
    653 	ih = (vme_intr_handle_t)
    654 	    malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
    655 	ih->pri = pri;
    656 	ih->vec = vec;
    657 	ih->sc = cookie;/*XXX*/
    658 	*ihp = ih;
    659 	return (0);
    660 }
    661 
    662 void *
    663 sparc_vme_intr_establish(cookie, vih, func, arg)
    664 	void *cookie;
    665 	vme_intr_handle_t vih;
    666 	int (*func) __P((void *));
    667 	void *arg;
    668 {
    669 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    670 	struct sparc_vme_intr_handle *svih =
    671 			(struct sparc_vme_intr_handle *)vih;
    672 	struct intrhand *ih;
    673 	int level;
    674 
    675 	/* Translate VME priority to processor IPL */
    676 	level = vme_ipl_to_pil[svih->pri];
    677 
    678 	svih->ih.ih_fun = func;
    679 	svih->ih.ih_arg = arg;
    680 	svih->next = NULL;
    681 
    682 	/* ensure the interrupt subsystem will call us at this level */
    683 	for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
    684 		if (ih->ih_fun == sc->sc_vmeintr)
    685 			break;
    686 
    687 	if (ih == NULL) {
    688 		ih = (struct intrhand *)
    689 			malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
    690 		if (ih == NULL)
    691 			panic("vme_addirq");
    692 		bzero(ih, sizeof *ih);
    693 		ih->ih_fun = sc->sc_vmeintr;
    694 		ih->ih_arg = vih;
    695 		intr_establish(level, ih);
    696 	} else {
    697 		svih->next = (vme_intr_handle_t)ih->ih_arg;
    698 		ih->ih_arg = vih;
    699 	}
    700 	return (NULL);
    701 }
    702 
    703 void
    704 sparc_vme_unmap(cookie)
    705 	void * cookie;
    706 {
    707 	/* Not implemented */
    708 	panic("sparc_vme_unmap");
    709 }
    710 
    711 void
    712 sparc_vme_intr_disestablish(cookie, a)
    713 	void *cookie;
    714 	void *a;
    715 {
    716 	/* Not implemented */
    717 	panic("sparc_vme_intr_disestablish");
    718 }
    719 
    720 
    721 
    722 /*
    723  * VME DMA functions.
    724  */
    725 
    726 #if defined(SUN4)
    727 int
    728 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
    729 	bus_dma_tag_t t;
    730 	bus_dmamap_t map;
    731 	void *buf;
    732 	bus_size_t buflen;
    733 	struct proc *p;
    734 	int flags;
    735 {
    736 	int error;
    737 
    738 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
    739 	if (error != 0)
    740 		return (error);
    741 
    742 	/* Adjust DVMA address to VME view */
    743 	map->dm_segs[0].ds_addr -= DVMA_BASE;
    744 	return (0);
    745 }
    746 
    747 void
    748 sparc_vme4_dmamap_unload(t, map)
    749 	bus_dma_tag_t t;
    750 	bus_dmamap_t map;
    751 {
    752 	map->dm_segs[0].ds_addr += DVMA_BASE;
    753 	_bus_dmamap_unload(t, map);
    754 }
    755 
    756 int
    757 sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    758 	bus_dma_tag_t t;
    759 	bus_size_t size, alignment, boundary;
    760 	bus_dma_segment_t *segs;
    761 	int nsegs;
    762 	int *rsegs;
    763 	int flags;
    764 {
    765 	int error;
    766 
    767 	error = _bus_dmamem_alloc(t, size, alignment, boundary,
    768 				  segs, nsegs, rsegs, flags);
    769 	if (error != 0)
    770 		return (error);
    771 
    772 	segs[0].ds_addr -= DVMA_BASE;
    773 	return (0);
    774 }
    775 
    776 void
    777 sparc_vme4_dmamem_free(t, segs, nsegs)
    778 	bus_dma_tag_t t;
    779 	bus_dma_segment_t *segs;
    780 	int nsegs;
    781 {
    782 	segs[0].ds_addr += DVMA_BASE;
    783 	_bus_dmamem_free(t, segs, nsegs);
    784 }
    785 
    786 void
    787 sparc_vme4_dmamap_sync(t, map, op)
    788 	bus_dma_tag_t t;
    789 	bus_dmamap_t map;
    790 	bus_dmasync_op_t op;
    791 {
    792 	switch (op) {
    793 	default:
    794 	}
    795 }
    796 #endif /* SUN4 */
    797 
    798 #if defined(SUN4M)
    799 static int
    800 sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
    801 	bus_dma_tag_t t;
    802 	bus_size_t size;
    803 	int nsegments;
    804 	bus_size_t maxsegsz;
    805 	bus_size_t boundary;
    806 	int flags;
    807 	bus_dmamap_t *dmamp;
    808 {
    809 	int align;
    810 
    811 	/* VME DVMA addresses must always be 8K aligned */
    812 	align = 8192;
    813 
    814 	/* XXX - todo: allocate DVMA addresses from assigned ranges:
    815 		 upper 8MB for A32 space; upper 1MB for A24 space */
    816 	return (_bus_dmamap_create(t, size, nsegments, maxsegsz,
    817 				    boundary, /*align,*/ flags, dmamp));
    818 }
    819 
    820 int
    821 sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
    822 	bus_dma_tag_t t;
    823 	bus_dmamap_t map;
    824 	void *buf;
    825 	bus_size_t buflen;
    826 	struct proc *p;
    827 	int flags;
    828 {
    829 	struct vmebus_softc	*sc = (struct vmebus_softc *)t->_cookie;
    830 	volatile u_int32_t	*ioctags;
    831 	int			error;
    832 
    833 	buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
    834 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
    835 	if (error != 0)
    836 		return (error);
    837 
    838 	/* allocate IO cache entries for this range */
    839 	ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
    840 	for (;buflen > 0;) {
    841 		*ioctags = VME_IOC_IC | VME_IOC_W;
    842 		ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
    843 		buflen -= VME_IOC_PAGESZ;
    844 	}
    845 	return (0);
    846 }
    847 
    848 
    849 void
    850 sparc_vme4m_dmamap_unload(t, map)
    851 	bus_dma_tag_t t;
    852 	bus_dmamap_t map;
    853 {
    854 	struct vmebus_softc	*sc = (struct vmebus_softc *)t->_cookie;
    855 	volatile u_int32_t	*flushregs;
    856 	int			len;
    857 
    858 	/* Flush VME IO cache */
    859 	len = map->dm_segs[0].ds_len;
    860 	flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
    861 	for (;len > 0;) {
    862 		*flushregs = 0;
    863 		flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
    864 		len -= VME_IOC_PAGESZ;
    865 	}
    866 	/* Read a tag to synchronize the IOC flushes */
    867 	(*sc->sc_ioctags);
    868 
    869 	_bus_dmamap_unload(t, map);
    870 }
    871 
    872 int
    873 sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
    874 	bus_dma_tag_t t;
    875 	bus_size_t size, alignmnt, boundary;
    876 	bus_dma_segment_t *segs;
    877 	int nsegs;
    878 	int *rsegs;
    879 	int flags;
    880 {
    881 	int error;
    882 
    883 	error = _bus_dmamem_alloc(t, size, alignmnt, boundary,
    884 				  segs, nsegs, rsegs, flags);
    885 	if (error != 0)
    886 		return (error);
    887 
    888 	return (0);
    889 }
    890 
    891 void
    892 sparc_vme4m_dmamem_free(t, segs, nsegs)
    893 	bus_dma_tag_t t;
    894 	bus_dma_segment_t *segs;
    895 	int nsegs;
    896 {
    897 	_bus_dmamem_free(t, segs, nsegs);
    898 }
    899 
    900 void
    901 sparc_vme4m_dmamap_sync(t, map, op)
    902 	bus_dma_tag_t t;
    903 	bus_dmamap_t map;
    904 	bus_dmasync_op_t op;
    905 {
    906 	switch (op) {
    907 	default:
    908 	}
    909 }
    910 #endif /* SUN4M */
    911