Home | History | Annotate | Line # | Download | only in dev
vme_machdep.c revision 1.6
      1 /*	$NetBSD: vme_machdep.c,v 1.6 1998/03/21 20:30:49 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/device.h>
     42 #include <sys/malloc.h>
     43 
     44 #include <sys/proc.h>
     45 #include <sys/user.h>
     46 #include <sys/syslog.h>
     47 
     48 #include <vm/vm.h>
     49 
     50 #define _SPARC_BUS_DMA_PRIVATE
     51 #include <machine/bus.h>
     52 #include <sparc/sparc/iommuvar.h>
     53 #include <machine/autoconf.h>
     54 #include <machine/pmap.h>
     55 #include <machine/oldmon.h>
     56 #include <machine/cpu.h>
     57 #include <machine/ctlreg.h>
     58 
     59 #include <dev/vme/vmevar.h>
     60 
     61 #include <sparc/sparc/asm.h>
     62 #include <sparc/sparc/vaddrs.h>
     63 #include <sparc/sparc/cpuvar.h>
     64 #include <sparc/dev/vmereg.h>
     65 
     66 struct vmebus_softc {
     67 	struct device	 sc_dev;	/* base device */
     68 	struct vmebusreg *sc_reg; 	/* VME control registers */
     69 	struct vmebusvec *sc_vec;	/* VME interrupt vector */
     70 	struct rom_range *sc_range;	/* ROM range property */
     71 	int		 sc_nrange;
     72 	volatile u_int32_t *sc_ioctags;	/* VME IO-cache tag registers */
     73 	volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
     74 	int 		 (*sc_vmeintr) __P((void *));
     75 	struct bootpath	 *sc_bp;
     76 };
     77 struct  vmebus_softc *vmebus_sc;/*XXX*/
     78 
     79 /* autoconfiguration driver */
     80 static int	vmematch_iommu  __P((struct device *, struct cfdata *, void *));
     81 static void	vmeattach_iommu __P((struct device *, struct device *, void *));
     82 static int	vmematch_mainbus  __P((struct device *, struct cfdata *, void *));
     83 static void	vmeattach_mainbus __P((struct device *, struct device *, void *));
     84 #if defined(SUN4)
     85 int 		vmeintr4  __P((void *));
     86 #endif
     87 #if defined(SUN4M)
     88 int 		vmeintr4m __P((void *));
     89 #endif
     90 
     91 
     92 static int	sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
     93 				     size_t, vme_size_t, vme_mod_t,
     94 				     int (*) __P((void *, void *)), void *));
     95 static int	sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
     96 				   bus_space_tag_t, bus_space_handle_t *));
     97 static void	sparc_vme_unmap __P((void *));
     98 static int	sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
     99 				   bus_space_tag_t, int *));
    100 static int	sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
    101 static void *	sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
    102 					      int (*) __P((void *)), void *));
    103 static void	sparc_vme_intr_disestablish __P((void *, void *));
    104 
    105 static void	vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
    106 				      struct rom_reg *));
    107 static void	sparc_vme_bus_establish __P((void *, struct device *));
    108 #if defined(SUN4M)
    109 static void	sparc_vme4m_barrier __P((void *));
    110 #endif
    111 
    112 /*
    113  * DMA functions.
    114  */
    115 #if defined(SUN4)
    116 static int	sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
    117 		    bus_size_t, struct proc *, int));
    118 static void	sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
    119 static void	sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
    120 		    bus_addr_t, bus_size_t, int));
    121 
    122 static int	sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
    123 		    bus_size_t, bus_size_t, bus_dma_segment_t *,
    124 		    int, int *, int));
    125 static void	sparc_vme4_dmamem_free __P((bus_dma_tag_t,
    126 		    bus_dma_segment_t *, int));
    127 #endif
    128 
    129 #if defined(SUN4M)
    130 static int	sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
    131 		    bus_size_t, bus_size_t, int, bus_dmamap_t *));
    132 
    133 static int	sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
    134 		    bus_size_t, struct proc *, int));
    135 static void	sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
    136 static void	sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
    137 		    bus_addr_t, bus_size_t, int));
    138 
    139 static int	sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
    140 		    bus_size_t, bus_size_t, bus_dma_segment_t *,
    141 		    int, int *, int));
    142 static void	sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
    143 		    bus_dma_segment_t *, int));
    144 #endif
    145 
    146 #if 0
    147 static void	sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
    148 static int	sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
    149 		    int, size_t, caddr_t *, int));
    150 static void	sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
    151 static int	sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
    152 		    bus_dma_segment_t *, int, int, int, int));
    153 #endif
    154 
    155 struct cfattach vme_mainbus_ca = {
    156 	sizeof(struct vmebus_softc), vmematch_mainbus, vmeattach_mainbus
    157 };
    158 
    159 struct cfattach vme_iommu_ca = {
    160 	sizeof(struct vmebus_softc), vmematch_iommu, vmeattach_iommu
    161 };
    162 
    163 struct sparc_bus_space_tag sparc_vme_bus_tag = {
    164 	NULL, /* cookie */
    165 	NULL, /* bus_map */
    166 	NULL, /* bus_unmap */
    167 	NULL, /* bus_subregion */
    168 	NULL  /* barrier */
    169 };
    170 
    171 struct vme_chipset_tag sparc_vme_chipset_tag = {
    172 	NULL,
    173 	sparc_vme_probe,
    174 	sparc_vme_map,
    175 	sparc_vme_unmap,
    176 	sparc_vme_mmap_cookie,
    177 	sparc_vme_intr_map,
    178 	sparc_vme_intr_establish,
    179 	sparc_vme_intr_disestablish,
    180 	sparc_vme_bus_establish
    181 };
    182 
    183 
    184 #if defined(SUN4)
    185 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
    186 	NULL,	/* cookie */
    187 	_bus_dmamap_create,
    188 	_bus_dmamap_destroy,
    189 	sparc_vme4_dmamap_load,
    190 	_bus_dmamap_load_mbuf,
    191 	_bus_dmamap_load_uio,
    192 	_bus_dmamap_load_raw,
    193 	sparc_vme4_dmamap_unload,
    194 	sparc_vme4_dmamap_sync,
    195 
    196 	sparc_vme4_dmamem_alloc,
    197 	sparc_vme4_dmamem_free,
    198 	_bus_dmamem_map,
    199 	_bus_dmamem_unmap,
    200 	_bus_dmamem_mmap
    201 };
    202 #endif
    203 
    204 #if defined(SUN4M)
    205 struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
    206 	NULL,	/* cookie */
    207 	sparc_vme4m_dmamap_create,
    208 	_bus_dmamap_destroy,
    209 	sparc_vme4m_dmamap_load,
    210 	_bus_dmamap_load_mbuf,
    211 	_bus_dmamap_load_uio,
    212 	_bus_dmamap_load_raw,
    213 	sparc_vme4m_dmamap_unload,
    214 	sparc_vme4m_dmamap_sync,
    215 
    216 	sparc_vme4m_dmamem_alloc,
    217 	sparc_vme4m_dmamem_free,
    218 	_bus_dmamem_map,
    219 	_bus_dmamem_unmap,
    220 	_bus_dmamem_mmap
    221 };
    222 #endif
    223 
    224 
    225 void
    226 sparc_vme_bus_establish(cookie, dev)
    227 	void *cookie;
    228 	struct device *dev;
    229 {
    230 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    231 	struct bootpath *bp = sc->sc_bp;
    232 	char *name;
    233 
    234 	name = dev->dv_cfdata->cf_driver->cd_name;
    235 #ifdef DEBUG
    236 	printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
    237 #endif
    238 	if (bp != NULL && strcmp(bp->name, name) == 0 &&
    239 	    dev->dv_unit == bp->val[1]) {
    240 		bp->dev = dev;
    241 #ifdef DEBUG
    242 printf("sparc_vme_bus_establish: on the boot path\n");
    243 #endif
    244 		sc->sc_bp++;
    245 		bootpath_store(1, sc->sc_bp);
    246 	}
    247 }
    248 
    249 
    250 int
    251 vmematch_mainbus(parent, cf, aux)
    252 	struct device *parent;
    253 	struct cfdata *cf;
    254 	void *aux;
    255 {
    256 
    257 	if (!CPU_ISSUN4)
    258 		return (0);
    259 
    260 	return (1);
    261 }
    262 
    263 int
    264 vmematch_iommu(parent, cf, aux)
    265 	struct device *parent;
    266 	struct cfdata *cf;
    267 	void *aux;
    268 {
    269 	struct mainbus_attach_args *ma = aux;
    270 
    271 	return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
    272 }
    273 
    274 
    275 void
    276 vmeattach_mainbus(parent, self, aux)
    277 	struct device *parent, *self;
    278 	void *aux;
    279 {
    280 #if defined(SUN4)
    281 	struct mainbus_attach_args *ma = aux;
    282 	struct vmebus_softc *sc = (struct vmebus_softc *)self;
    283 	struct vme_busattach_args vba;
    284 
    285 	if (self->dv_unit > 0) {
    286 		printf(" unsupported\n");
    287 		return;
    288 	}
    289 
    290 	if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "vme") == 0) {
    291 		sc->sc_bp = ma->ma_bp + 1;
    292 		bootpath_store(1, sc->sc_bp);
    293 	}
    294 
    295 	/* VME interrupt entry point */
    296 	sc->sc_vmeintr = vmeintr4;
    297 
    298 /*XXX*/	sparc_vme_chipset_tag.cookie = self;
    299 /*XXX*/	sparc_vme4_dma_tag._cookie = self;
    300 
    301 	vba.vba_bustag = &sparc_vme_bus_tag;
    302 	vba.vba_chipset_tag = &sparc_vme_chipset_tag;
    303 	vba.vba_dmatag = &sparc_vme4_dma_tag;
    304 
    305 	printf("\n");
    306 	(void)config_search(vmesearch, self, &vba);
    307 
    308 	bootpath_store(1, NULL);
    309 #endif
    310 	return;
    311 }
    312 
    313 /* sun4m vmebus */
    314 void
    315 vmeattach_iommu(parent, self, aux)
    316 	struct device *parent, *self;
    317 	void *aux;
    318 {
    319 #if defined(SUN4M)
    320 	struct vmebus_softc *sc = (struct vmebus_softc *)self;
    321 	struct iommu_attach_args *ia = aux;
    322 	struct vme_busattach_args vba;
    323 	bus_space_handle_t bh;
    324 	struct rom_reg *rr;
    325 	int nreg;
    326 	int node;
    327 	int cline;
    328 
    329 	if (self->dv_unit > 0) {
    330 		printf(" unsupported\n");
    331 		return;
    332 	}
    333 
    334 	/* VME interrupt entry point */
    335 	sc->sc_vmeintr = vmeintr4m;
    336 
    337 /*XXX*/	sparc_vme_chipset_tag.cookie = self;
    338 /*XXX*/	sparc_vme4m_dma_tag._cookie = self;
    339 	sparc_vme_bus_tag.sparc_barrier = sparc_vme4m_barrier;
    340 
    341 	vba.vba_bustag = &sparc_vme_bus_tag;
    342 	vba.vba_chipset_tag = &sparc_vme_chipset_tag;
    343 	vba.vba_dmatag = &sparc_vme4m_dma_tag;
    344 
    345 	node = ia->iom_node;
    346 
    347 	/* Map VME control space */
    348 	rr = NULL;
    349 	if (getpropA(node, "reg", sizeof(*rr), &nreg, (void**)&rr) != 0) {
    350 		printf("%s: can't get register property\n", self->dv_xname);
    351 		return;
    352 	}
    353 	if (nreg < 2) {
    354 		printf("%s: only %d register sets\n", self->dv_xname, nreg);
    355 		return;
    356 	}
    357 
    358 	if (sparc_bus_map(ia->iom_bustag,
    359 			 (bus_type_t)rr[0].rr_iospace,
    360 			 (bus_addr_t)rr[0].rr_paddr,
    361 			 (bus_size_t)rr[0].rr_len,
    362 			 BUS_SPACE_MAP_LINEAR,
    363 			 0, &bh) != 0) {
    364 		panic("%s: can't map vmebusreg", self->dv_xname);
    365 	}
    366 	sc->sc_reg = (struct vmebusreg *)bh;
    367 
    368 	if (sparc_bus_map(ia->iom_bustag,
    369 			 (bus_type_t)rr[1].rr_iospace,
    370 			 (bus_addr_t)rr[1].rr_paddr,
    371 			 (bus_size_t)rr[1].rr_len,
    372 			 BUS_SPACE_MAP_LINEAR,
    373 			 0, &bh) != 0) {
    374 		panic("%s: can't map vmebusvec", self->dv_xname);
    375 	}
    376 	sc->sc_vec = (struct vmebusvec *)bh;
    377 
    378 	if (sparc_bus_map(ia->iom_bustag,
    379 			 (bus_type_t)rr[1].rr_iospace,
    380 			 (bus_addr_t)rr[1].rr_paddr + VME_IOC_TAGOFFSET,
    381 			 VME_IOC_SIZE,
    382 			 BUS_SPACE_MAP_LINEAR,
    383 			 0, &bh) != 0) {
    384 		panic("%s: can't map IOC tags", self->dv_xname);
    385 	}
    386 	sc->sc_ioctags = (u_int32_t *)bh;
    387 
    388 	if (sparc_bus_map(ia->iom_bustag,
    389 			 (bus_type_t)rr[1].rr_iospace,
    390 			 (bus_addr_t)rr[1].rr_paddr + VME_IOC_FLUSHOFFSET,
    391 			 VME_IOC_SIZE,
    392 			 BUS_SPACE_MAP_LINEAR,
    393 			 0, &bh) != 0) {
    394 		panic("%s: can't map IOC flush registers", self->dv_xname);
    395 	}
    396 	sc->sc_iocflush = (u_int32_t *)bh;
    397 
    398 /*XXX*/	sparc_vme_bus_tag.cookie = sc->sc_reg;
    399 
    400 	/*
    401 	 * Get "range" property.
    402 	 */
    403 	if (getpropA(node, "ranges", sizeof(struct rom_range),
    404 		     &sc->sc_nrange, (void **)&sc->sc_range) != 0) {
    405 		panic("%s: can't get ranges property", self->dv_xname);
    406 	}
    407 
    408 	vmebus_sc = sc;
    409 
    410 	/*
    411 	 * Invalidate all IO-cache entries.
    412 	 */
    413 	for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
    414 		sc->sc_ioctags[--cline] = 0;
    415 	}
    416 
    417 	/* Enable IO-cache */
    418 	sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
    419 
    420 	printf(": version 0x%x\n",
    421 	       sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
    422 
    423 	(void)config_search(vmesearch, self, &vba);
    424 #endif
    425 }
    426 
    427 void sparc_vme_async_fault __P((void));
    428 void
    429 sparc_vme_async_fault()
    430 {
    431 	struct vmebus_softc *sc = vmebus_sc;
    432 	u_int32_t addr;
    433 
    434 	addr = sc->sc_reg->vmebus_afar;
    435 	printf("vme afsr: %x; addr %x\n", sc->sc_reg->vmebus_afsr, addr);
    436 }
    437 
    438 int
    439 sparc_vme_probe(cookie, tag, addr, offset, size, mod, callback, arg)
    440 	void *cookie;
    441 	bus_space_tag_t tag;
    442 	vme_addr_t addr;
    443 	size_t offset;
    444 	vme_size_t size;
    445 	int mod;
    446 	int (*callback) __P((void *, void *));
    447 	void *arg;
    448 {
    449 	struct rom_reg reg;
    450 	caddr_t tmp;
    451 	int result;
    452 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    453 
    454 /* XXX - Use bus_space_[un]map() etc. */
    455 	reg.rr_paddr = (u_int32_t)addr;
    456 	vmebus_translate(sc, mod, &reg);
    457 	tmp = (caddr_t)mapdev(&reg, TMPMAP_VA, 0, NBPG);
    458 	result = probeget(tmp + offset, size) != -1;
    459 	if (result && callback != NULL)
    460 		result = (*callback)(tmp, arg);
    461 	pmap_remove(pmap_kernel(), TMPMAP_VA, TMPMAP_VA+NBPG);
    462 	return (result);
    463 }
    464 
    465 int
    466 sparc_vme_map(cookie, addr, size, mod, tag, handlep)
    467 	void *cookie;
    468 	vme_addr_t addr;
    469 	vme_size_t size;
    470 	int mod;
    471 	bus_space_tag_t tag;
    472 	bus_space_handle_t *handlep;
    473 {
    474 	struct rom_reg reg;
    475 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    476 
    477 	reg.rr_paddr = (u_int32_t)addr;
    478 	vmebus_translate(sc, mod, &reg);
    479 	*handlep = (bus_space_handle_t)mapdev(&reg, 0, 0, size);
    480 	return (0);
    481 }
    482 
    483 int
    484 sparc_vme_mmap_cookie(cookie, addr, mod, tag, handlep)
    485 	void *cookie;
    486 	vme_addr_t addr;
    487 	int mod;
    488 	bus_space_tag_t tag;
    489 	int *handlep;
    490 {
    491 	struct rom_reg reg;
    492 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    493 
    494 	reg.rr_paddr = (u_int32_t)addr;
    495 	vmebus_translate(sc, mod, &reg);
    496 	*handlep = (int)reg.rr_paddr | PMAP_IOENC(reg.rr_iospace) | PMAP_NC;
    497 	return (0);
    498 }
    499 
    500 void
    501 vmebus_translate(sc, mod, rr)
    502 	struct vmebus_softc *sc;
    503 	vme_mod_t mod;
    504 	struct rom_reg *rr;
    505 {
    506 
    507 	if (CPU_ISSUN4) {
    508 		rr->rr_iospace = (mod & VMEMOD_D32)
    509 			? PMAP_VME32
    510 			: PMAP_VME16;
    511 
    512 		switch (mod & ~VMEMOD_D32) {
    513 		case VMEMOD_A16|VMEMOD_D|VMEMOD_S:
    514 			rr->rr_paddr += 0xffff0000;
    515 			break;
    516 		case VMEMOD_A24|VMEMOD_D|VMEMOD_S:
    517 			rr->rr_paddr += 0xff000000;
    518 			break;
    519 		case VMEMOD_A32|VMEMOD_D|VMEMOD_S:
    520 			break;
    521 		default:
    522 			panic("vmebus_translate: unsupported VME modifier: %x",
    523 				mod);
    524 		}
    525 		return;
    526 	} else if (CPU_ISSUN4M) {
    527 		int j;
    528 
    529 		/* sun4m VME node: translate through "ranges" property */
    530 		if (sc->sc_nrange == 0)
    531 			panic("vmebus: no ranges");
    532 
    533 		/* Translate into parent address spaces */
    534 		for (j = 0; j < sc->sc_nrange; j++) {
    535 			if (sc->sc_range[j].cspace != mod)
    536 				continue;
    537 
    538 			rr->rr_paddr += sc->sc_range[j].poffset;
    539 			rr->rr_iospace = sc->sc_range[j].pspace;
    540 			return;
    541 		}
    542 		panic("sparc_vme_translate: modifier %x not supported", mod);
    543 	} else {
    544 		panic("sparc_vme_translate: inappropriate cpu arch");
    545 	}
    546 }
    547 
    548 #if defined(SUN4M)
    549 void
    550 sparc_vme4m_barrier(cookie)
    551 	void *cookie;
    552 {
    553 	struct vmebusreg *vbp = (struct vmebusreg *)cookie;
    554 
    555 	/* Read async fault status to flush write-buffers */
    556 	(*(volatile int *)&vbp->vmebus_afsr);
    557 }
    558 #endif
    559 
    560 
    561 
    562 /*
    563  * VME Interrupt Priority Level to sparc Processor Interrupt Level.
    564  */
    565 static int vme_ipl_to_pil[] = {
    566 	0,
    567 	2,
    568 	3,
    569 	5,
    570 	7,
    571 	9,
    572 	11,
    573 	13
    574 };
    575 
    576 
    577 /*
    578  * All VME device interrupts go through vmeintr(). This function reads
    579  * the VME vector from the bus, then dispatches the device interrupt
    580  * handler.  All handlers for devices that map to the same Processor
    581  * Interrupt Level (according to the table above) are on a linked list
    582  * of `sparc_vme_intr_handle' structures. The head of which is passed
    583  * down as the argument to `vmeintr(void *arg)'.
    584  */
    585 struct sparc_vme_intr_handle {
    586 	struct intrhand ih;
    587 	struct sparc_vme_intr_handle *next;
    588 	int	vec;		/* VME interrupt vector */
    589 	int	pri;		/* VME interrupt priority */
    590 	struct vmebus_softc *sc;/*XXX*/
    591 };
    592 
    593 #if defined(SUN4)
    594 int
    595 vmeintr4(arg)
    596 	void *arg;
    597 {
    598 	struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
    599 	int level, vec;
    600 	int i = 0;
    601 
    602 	level = (ihp->pri << 1) | 1;
    603 
    604 	vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
    605 
    606 	if (vec == -1) {
    607 		printf("vme: spurious interrupt\n");
    608 		return 1; /* XXX - pretend we handled it, for now */
    609 	}
    610 
    611 	for (; ihp; ihp = ihp->next)
    612 		if (ihp->vec == vec && ihp->ih.ih_fun)
    613 			i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
    614 	return (i);
    615 }
    616 #endif
    617 
    618 #if defined(SUN4M)
    619 int
    620 vmeintr4m(arg)
    621 	void *arg;
    622 {
    623 	struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
    624 	int level, vec;
    625 	int i = 0;
    626 
    627 	level = (ihp->pri << 1) | 1;
    628 
    629 #if 0
    630 	int pending;
    631 
    632 	/* Flush VME <=> Sbus write buffers */
    633 	(*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
    634 
    635 	pending = *((int*)ICR_SI_PEND);
    636 	if ((pending & SINTR_VME(ihp->pri)) == 0) {
    637 		printf("vmeintr: non pending at pri %x(p 0x%x)\n",
    638 			ihp->pri, pending);
    639 		return (0);
    640 	}
    641 #endif
    642 #if 0
    643 	/* Why gives this a bus timeout sometimes? */
    644 	vec = ihp->sc->sc_vec->vmebusvec[level];
    645 #else
    646 	/* so, arrange to catch the fault... */
    647 	{
    648 	extern struct user *proc0paddr;
    649 	extern int fkbyte __P((caddr_t, struct pcb *));
    650 	caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
    651 	struct pcb *xpcb;
    652 	u_long saveonfault;
    653 	int s;
    654 
    655 	s = splhigh();
    656 	if (curproc == NULL)
    657 		xpcb = (struct pcb *)proc0paddr;
    658 	else
    659 		xpcb = &curproc->p_addr->u_pcb;
    660 
    661 	saveonfault = (u_long)xpcb->pcb_onfault;
    662 	vec = fkbyte(addr, xpcb);
    663 	xpcb->pcb_onfault = (caddr_t)saveonfault;
    664 
    665 	splx(s);
    666 	}
    667 #endif
    668 
    669 	if (vec == -1) {
    670 		printf("vme: spurious interrupt: ");
    671 		printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
    672 			*((int*)ICR_SI_PEND),
    673 			ihp->sc->sc_reg->vmebus_afsr,
    674 			ihp->sc->sc_reg->vmebus_afar);
    675 		return 1; /* XXX - pretend we handled it, for now */
    676 	}
    677 
    678 	for (; ihp; ihp = ihp->next)
    679 		if (ihp->vec == vec && ihp->ih.ih_fun)
    680 			i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
    681 	return (i);
    682 }
    683 #endif
    684 
    685 int
    686 sparc_vme_intr_map(cookie, vec, pri, ihp)
    687 	void *cookie;
    688 	int vec;
    689 	int pri;
    690 	vme_intr_handle_t *ihp;
    691 {
    692 	struct sparc_vme_intr_handle *ih;
    693 
    694 	ih = (vme_intr_handle_t)
    695 	    malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
    696 	ih->pri = pri;
    697 	ih->vec = vec;
    698 	ih->sc = cookie;/*XXX*/
    699 	*ihp = ih;
    700 	return (0);
    701 }
    702 
    703 void *
    704 sparc_vme_intr_establish(cookie, vih, func, arg)
    705 	void *cookie;
    706 	vme_intr_handle_t vih;
    707 	int (*func) __P((void *));
    708 	void *arg;
    709 {
    710 	struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
    711 	struct sparc_vme_intr_handle *svih =
    712 			(struct sparc_vme_intr_handle *)vih;
    713 	struct intrhand *ih;
    714 	int level;
    715 
    716 	/* Translate VME priority to processor IPL */
    717 	level = vme_ipl_to_pil[svih->pri];
    718 
    719 	svih->ih.ih_fun = func;
    720 	svih->ih.ih_arg = arg;
    721 	svih->next = NULL;
    722 
    723 	/* ensure the interrupt subsystem will call us at this level */
    724 	for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
    725 		if (ih->ih_fun == sc->sc_vmeintr)
    726 			break;
    727 
    728 	if (ih == NULL) {
    729 		ih = (struct intrhand *)
    730 			malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
    731 		if (ih == NULL)
    732 			panic("vme_addirq");
    733 		bzero(ih, sizeof *ih);
    734 		ih->ih_fun = sc->sc_vmeintr;
    735 		ih->ih_arg = vih;
    736 		intr_establish(level, ih);
    737 	} else {
    738 		svih->next = (vme_intr_handle_t)ih->ih_arg;
    739 		ih->ih_arg = vih;
    740 	}
    741 	return (NULL);
    742 }
    743 
    744 void
    745 sparc_vme_unmap(cookie)
    746 	void * cookie;
    747 {
    748 	/* Not implemented */
    749 	panic("sparc_vme_unmap");
    750 }
    751 
    752 void
    753 sparc_vme_intr_disestablish(cookie, a)
    754 	void *cookie;
    755 	void *a;
    756 {
    757 	/* Not implemented */
    758 	panic("sparc_vme_intr_disestablish");
    759 }
    760 
    761 
    762 
    763 /*
    764  * VME DMA functions.
    765  */
    766 
    767 #if defined(SUN4)
    768 int
    769 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
    770 	bus_dma_tag_t t;
    771 	bus_dmamap_t map;
    772 	void *buf;
    773 	bus_size_t buflen;
    774 	struct proc *p;
    775 	int flags;
    776 {
    777 	int error;
    778 
    779 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
    780 	if (error != 0)
    781 		return (error);
    782 
    783 	/* Adjust DVMA address to VME view */
    784 	map->dm_segs[0].ds_addr -= DVMA_BASE;
    785 	return (0);
    786 }
    787 
    788 void
    789 sparc_vme4_dmamap_unload(t, map)
    790 	bus_dma_tag_t t;
    791 	bus_dmamap_t map;
    792 {
    793 	map->dm_segs[0].ds_addr += DVMA_BASE;
    794 	_bus_dmamap_unload(t, map);
    795 }
    796 
    797 int
    798 sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
    799 	bus_dma_tag_t t;
    800 	bus_size_t size, alignment, boundary;
    801 	bus_dma_segment_t *segs;
    802 	int nsegs;
    803 	int *rsegs;
    804 	int flags;
    805 {
    806 	int error;
    807 
    808 	error = _bus_dmamem_alloc(t, size, alignment, boundary,
    809 				  segs, nsegs, rsegs, flags);
    810 	if (error != 0)
    811 		return (error);
    812 
    813 	segs[0].ds_addr -= DVMA_BASE;
    814 	return (0);
    815 }
    816 
    817 void
    818 sparc_vme4_dmamem_free(t, segs, nsegs)
    819 	bus_dma_tag_t t;
    820 	bus_dma_segment_t *segs;
    821 	int nsegs;
    822 {
    823 	segs[0].ds_addr += DVMA_BASE;
    824 	_bus_dmamem_free(t, segs, nsegs);
    825 }
    826 
    827 void
    828 sparc_vme4_dmamap_sync(t, map, offset, len, ops)
    829 	bus_dma_tag_t t;
    830 	bus_dmamap_t map;
    831 	bus_addr_t offset;
    832 	bus_size_t len;
    833 	int ops;
    834 {
    835 
    836 	/*
    837 	 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
    838 	 */
    839 }
    840 #endif /* SUN4 */
    841 
    842 #if defined(SUN4M)
    843 static int
    844 sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
    845 	bus_dma_tag_t t;
    846 	bus_size_t size;
    847 	int nsegments;
    848 	bus_size_t maxsegsz;
    849 	bus_size_t boundary;
    850 	int flags;
    851 	bus_dmamap_t *dmamp;
    852 {
    853 	int align;
    854 
    855 	/* VME DVMA addresses must always be 8K aligned */
    856 	align = 8192;
    857 
    858 	/* XXX - todo: allocate DVMA addresses from assigned ranges:
    859 		 upper 8MB for A32 space; upper 1MB for A24 space */
    860 	return (_bus_dmamap_create(t, size, nsegments, maxsegsz,
    861 				    boundary, /*align,*/ flags, dmamp));
    862 }
    863 
    864 int
    865 sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
    866 	bus_dma_tag_t t;
    867 	bus_dmamap_t map;
    868 	void *buf;
    869 	bus_size_t buflen;
    870 	struct proc *p;
    871 	int flags;
    872 {
    873 	struct vmebus_softc	*sc = (struct vmebus_softc *)t->_cookie;
    874 	volatile u_int32_t	*ioctags;
    875 	int			error;
    876 
    877 	buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
    878 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
    879 	if (error != 0)
    880 		return (error);
    881 
    882 	/* allocate IO cache entries for this range */
    883 	ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
    884 	for (;buflen > 0;) {
    885 		*ioctags = VME_IOC_IC | VME_IOC_W;
    886 		ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
    887 		buflen -= VME_IOC_PAGESZ;
    888 	}
    889 	return (0);
    890 }
    891 
    892 
    893 void
    894 sparc_vme4m_dmamap_unload(t, map)
    895 	bus_dma_tag_t t;
    896 	bus_dmamap_t map;
    897 {
    898 	struct vmebus_softc	*sc = (struct vmebus_softc *)t->_cookie;
    899 	volatile u_int32_t	*flushregs;
    900 	int			len;
    901 
    902 	/* Flush VME IO cache */
    903 	len = map->dm_segs[0].ds_len;
    904 	flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
    905 	for (;len > 0;) {
    906 		*flushregs = 0;
    907 		flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
    908 		len -= VME_IOC_PAGESZ;
    909 	}
    910 	/* Read a tag to synchronize the IOC flushes */
    911 	(*sc->sc_ioctags);
    912 
    913 	_bus_dmamap_unload(t, map);
    914 }
    915 
    916 int
    917 sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
    918 	bus_dma_tag_t t;
    919 	bus_size_t size, alignmnt, boundary;
    920 	bus_dma_segment_t *segs;
    921 	int nsegs;
    922 	int *rsegs;
    923 	int flags;
    924 {
    925 	int error;
    926 
    927 	error = _bus_dmamem_alloc(t, size, alignmnt, boundary,
    928 				  segs, nsegs, rsegs, flags);
    929 	if (error != 0)
    930 		return (error);
    931 
    932 	return (0);
    933 }
    934 
    935 void
    936 sparc_vme4m_dmamem_free(t, segs, nsegs)
    937 	bus_dma_tag_t t;
    938 	bus_dma_segment_t *segs;
    939 	int nsegs;
    940 {
    941 	_bus_dmamem_free(t, segs, nsegs);
    942 }
    943 
    944 void
    945 sparc_vme4m_dmamap_sync(t, map, offset, len, ops)
    946 	bus_dma_tag_t t;
    947 	bus_dmamap_t map;
    948 	bus_addr_t offset;
    949 	bus_size_t len;
    950 	int ops;
    951 {
    952 
    953 	/*
    954 	 * XXX Should perform cache flushes as necessary.
    955 	 */
    956 }
    957 #endif /* SUN4M */
    958