Home | History | Annotate | Line # | Download | only in vax
sgmap.c revision 1.18
      1  1.18     matt /* $NetBSD: sgmap.c,v 1.18 2015/07/05 02:03:36 matt Exp $ */
      2   1.1    ragge 
      3   1.1    ragge /*-
      4   1.1    ragge  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5   1.1    ragge  * All rights reserved.
      6   1.1    ragge  *
      7   1.1    ragge  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1    ragge  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.1    ragge  * NASA Ames Research Center.
     10   1.1    ragge  *
     11   1.1    ragge  * Redistribution and use in source and binary forms, with or without
     12   1.1    ragge  * modification, are permitted provided that the following conditions
     13   1.1    ragge  * are met:
     14   1.1    ragge  * 1. Redistributions of source code must retain the above copyright
     15   1.1    ragge  *    notice, this list of conditions and the following disclaimer.
     16   1.1    ragge  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1    ragge  *    notice, this list of conditions and the following disclaimer in the
     18   1.1    ragge  *    documentation and/or other materials provided with the distribution.
     19   1.1    ragge  *
     20   1.1    ragge  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1    ragge  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1    ragge  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1    ragge  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1    ragge  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1    ragge  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1    ragge  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1    ragge  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1    ragge  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1    ragge  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1    ragge  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1    ragge  */
     32  1.11    lukem 
     33  1.11    lukem #include <sys/cdefs.h>
     34  1.18     matt __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.18 2015/07/05 02:03:36 matt Exp $");
     35   1.1    ragge 
     36   1.1    ragge #include <sys/param.h>
     37   1.1    ragge #include <sys/systm.h>
     38  1.16     matt #include <sys/bus.h>
     39   1.1    ragge #include <sys/kernel.h>
     40   1.1    ragge #include <sys/proc.h>
     41   1.1    ragge #include <sys/malloc.h>
     42   1.1    ragge 
     43   1.8      mrg #include <uvm/uvm_extern.h>
     44   1.1    ragge 
     45   1.1    ragge #include <machine/sgmap.h>
     46   1.1    ragge 
     47   1.1    ragge void
     48  1.14     matt vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
     49  1.14     matt 	bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
     50  1.14     matt 	bus_size_t minptalign)
     51   1.1    ragge {
     52   1.1    ragge 	bus_dma_segment_t seg;
     53   1.1    ragge 	size_t ptsize;
     54   1.1    ragge 	int rseg;
     55   1.1    ragge 
     56   1.1    ragge 	if (sgvasize & PGOFSET) {
     57   1.1    ragge 		printf("size botch for sgmap `%s'\n", name);
     58   1.1    ragge 		goto die;
     59   1.1    ragge 	}
     60   1.1    ragge 
     61   1.1    ragge 	sgmap->aps_sgvabase = sgvabase;
     62   1.1    ragge 	sgmap->aps_sgvasize = sgvasize;
     63   1.1    ragge 
     64   1.1    ragge 	if (ptva != NULL) {
     65   1.1    ragge 		/*
     66   1.1    ragge 		 * We already have a page table; this may be a system
     67   1.1    ragge 		 * where the page table resides in bridge-resident SRAM.
     68   1.1    ragge 		 */
     69   1.1    ragge 		sgmap->aps_pt = ptva;
     70   1.1    ragge 	} else {
     71   1.1    ragge 		/*
     72   1.1    ragge 		 * Compute the page table size and allocate it.  At minimum,
     73   1.1    ragge 		 * this must be aligned to the page table size.  However,
     74   1.1    ragge 		 * some platforms have more strict alignment reqirements.
     75   1.1    ragge 		 */
     76   1.1    ragge 		ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
     77   1.1    ragge 		if (minptalign != 0) {
     78   1.1    ragge 			if (minptalign < ptsize)
     79   1.1    ragge 				minptalign = ptsize;
     80   1.1    ragge 		} else
     81   1.1    ragge 			minptalign = ptsize;
     82   1.1    ragge 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
     83   1.1    ragge 		    BUS_DMA_NOWAIT)) {
     84  1.10   provos 			panic("unable to allocate page table for sgmap `%s'",
     85   1.1    ragge 			    name);
     86   1.1    ragge 			goto die;
     87   1.1    ragge 		}
     88   1.1    ragge 		sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
     89   1.1    ragge 	}
     90   1.1    ragge 
     91   1.1    ragge 	/*
     92   1.1    ragge 	 * Create the extent map used to manage the virtual address
     93   1.1    ragge 	 * space.
     94   1.1    ragge 	 */
     95  1.12    ragge 	sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
     96  1.17     para 	    NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
     97   1.1    ragge 	if (sgmap->aps_ex == NULL) {
     98   1.1    ragge 		printf("unable to create extent map for sgmap `%s'\n", name);
     99   1.1    ragge 		goto die;
    100   1.1    ragge 	}
    101   1.1    ragge 
    102   1.1    ragge 	return;
    103   1.1    ragge  die:
    104   1.1    ragge 	panic("vax_sgmap_init");
    105   1.1    ragge }
    106   1.1    ragge 
    107   1.1    ragge int
    108  1.14     matt vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
    109  1.14     matt 	int flags)
    110   1.1    ragge {
    111   1.1    ragge 	int error;
    112   1.1    ragge 	bus_size_t len = origlen;
    113   1.1    ragge 
    114   1.1    ragge #ifdef DIAGNOSTIC
    115   1.1    ragge 	if (map->_dm_flags & DMAMAP_HAS_SGMAP)
    116   1.1    ragge 		panic("vax_sgmap_alloc: already have sgva space");
    117   1.1    ragge #endif
    118   1.1    ragge 
    119   1.5     matt 	/* If we need a spill page (for the VS4000 SCSI), make sure we
    120   1.5     matt 	 * allocate enough space for an extra page.
    121   1.5     matt 	 */
    122   1.5     matt 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    123   1.5     matt 		len += VAX_NBPG;
    124   1.5     matt 	}
    125   1.5     matt 
    126   1.2    ragge 	map->_dm_sgvalen = vax_round_page(len);
    127  1.18     matt #define DEBUG_SGMAP 0
    128  1.18     matt #if DEBUG_SGMAP
    129   1.1    ragge 	printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
    130  1.18     matt 	    //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
    131  1.18     matt 	    (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
    132   1.1    ragge #endif
    133   1.1    ragge 
    134   1.1    ragge 	error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
    135   1.1    ragge 	    0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
    136   1.1    ragge 	    &map->_dm_sgva);
    137  1.18     matt #if DEBUG_SGMAP
    138  1.18     matt 	printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
    139   1.1    ragge #endif
    140   1.1    ragge 
    141   1.1    ragge 	if (error == 0)
    142   1.1    ragge 		map->_dm_flags |= DMAMAP_HAS_SGMAP;
    143   1.1    ragge 	else
    144   1.1    ragge 		map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    145   1.1    ragge 
    146   1.1    ragge 	return (error);
    147   1.1    ragge }
    148   1.1    ragge 
    149   1.1    ragge void
    150  1.14     matt vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
    151   1.1    ragge {
    152   1.1    ragge 
    153   1.1    ragge #ifdef DIAGNOSTIC
    154   1.1    ragge 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
    155   1.1    ragge 		panic("vax_sgmap_free: no sgva space to free");
    156   1.1    ragge #endif
    157   1.1    ragge 
    158   1.1    ragge 	if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
    159   1.1    ragge 	    EX_NOWAIT))
    160   1.1    ragge 		panic("vax_sgmap_free");
    161   1.1    ragge 
    162   1.1    ragge 	map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    163   1.1    ragge }
    164   1.1    ragge 
    165   1.1    ragge int
    166  1.18     matt vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
    167  1.18     matt {
    168  1.18     matt 	return extent_alloc_region(sgmap->aps_ex, ba, len, EX_NOWAIT);
    169  1.18     matt }
    170  1.18     matt 
    171  1.18     matt int
    172  1.14     matt vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
    173  1.14     matt 	struct proc *p, int flags, struct vax_sgmap *sgmap)
    174   1.1    ragge {
    175   1.1    ragge 	vaddr_t endva, va = (vaddr_t)buf;
    176   1.1    ragge 	paddr_t pa;
    177   1.1    ragge 	bus_addr_t dmaoffset;
    178   1.1    ragge 	bus_size_t dmalen;
    179   1.1    ragge 	long *pte, *page_table = (long *)sgmap->aps_pt;
    180   1.1    ragge 	int pteidx, error;
    181   1.1    ragge 
    182   1.1    ragge 	/*
    183   1.1    ragge 	 * Make sure that on error condition we return "no valid mappings".
    184   1.1    ragge 	 */
    185   1.1    ragge 	map->dm_mapsize = 0;
    186   1.1    ragge 	map->dm_nsegs = 0;
    187   1.1    ragge 
    188   1.1    ragge 	if (buflen > map->_dm_size)
    189   1.1    ragge 		return (EINVAL);
    190   1.1    ragge 
    191   1.1    ragge 	/*
    192   1.1    ragge 	 * Remember the offset into the first page and the total
    193   1.1    ragge 	 * transfer length.
    194   1.1    ragge 	 */
    195   1.1    ragge 	dmaoffset = ((u_long)buf) & VAX_PGOFSET;
    196   1.1    ragge 	dmalen = buflen;
    197   1.1    ragge 
    198   1.1    ragge 
    199   1.1    ragge 	/*
    200   1.1    ragge 	 * Allocate the necessary virtual address space for the
    201   1.1    ragge 	 * mapping.  Round the size, since we deal with whole pages.
    202   1.1    ragge 	 */
    203   1.2    ragge 	endva = vax_round_page(va + buflen);
    204   1.2    ragge 	va = vax_trunc_page(va);
    205   1.1    ragge 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
    206   1.1    ragge 		error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
    207   1.1    ragge 		if (error)
    208   1.1    ragge 			return (error);
    209   1.1    ragge 	}
    210   1.1    ragge 
    211   1.1    ragge 	pteidx = map->_dm_sgva >> VAX_PGSHIFT;
    212   1.1    ragge 	pte = &page_table[pteidx];
    213   1.1    ragge 
    214   1.1    ragge 	/*
    215   1.1    ragge 	 * Generate the DMA address.
    216   1.1    ragge 	 */
    217   1.1    ragge 	map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
    218   1.1    ragge 	map->dm_segs[0].ds_len = dmalen;
    219   1.1    ragge 
    220   1.1    ragge 
    221   1.1    ragge 	map->_dm_pteidx = pteidx;
    222   1.1    ragge 	map->_dm_ptecnt = 0;
    223   1.1    ragge 
    224   1.1    ragge 	/*
    225   1.1    ragge 	 * Create the bus-specific page tables.
    226   1.1    ragge 	 * Can be done much more efficient than this.
    227   1.1    ragge 	 */
    228   1.4     matt 	for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
    229   1.1    ragge 		/*
    230   1.1    ragge 		 * Get the physical address for this segment.
    231   1.1    ragge 		 */
    232   1.1    ragge 		if (p != NULL)
    233   1.3  thorpej 			(void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
    234   1.1    ragge 		else
    235   1.1    ragge 			pa = kvtophys(va);
    236   1.1    ragge 
    237   1.1    ragge 		/*
    238   1.1    ragge 		 * Load the current PTE with this page.
    239   1.1    ragge 		 */
    240   1.9     matt 		*pte = (pa >> VAX_PGSHIFT) | PG_V;
    241   1.5     matt 	}
    242   1.5     matt 	/* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
    243   1.5     matt 	 * so add an extra page to quiet it down.
    244   1.5     matt 	 */
    245   1.5     matt 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    246   1.5     matt 		*pte = pte[-1];
    247   1.5     matt 		map->_dm_ptecnt++;
    248   1.1    ragge 	}
    249   1.1    ragge 
    250   1.1    ragge 	map->dm_mapsize = buflen;
    251   1.1    ragge 	map->dm_nsegs = 1;
    252   1.1    ragge 	return (0);
    253   1.1    ragge }
    254   1.1    ragge 
    255   1.1    ragge int
    256  1.14     matt vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
    257  1.14     matt 	int flags, struct vax_sgmap *sgmap)
    258   1.1    ragge {
    259   1.1    ragge 
    260   1.1    ragge 	panic("vax_sgmap_load_mbuf : not implemented");
    261   1.1    ragge }
    262   1.1    ragge 
    263   1.1    ragge int
    264  1.14     matt vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    265  1.14     matt 	int flags, struct vax_sgmap *sgmap)
    266   1.1    ragge {
    267   1.1    ragge 
    268   1.1    ragge 	panic("vax_sgmap_load_uio : not implemented");
    269   1.1    ragge }
    270   1.1    ragge 
    271   1.1    ragge int
    272  1.14     matt vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
    273  1.14     matt 	int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
    274   1.1    ragge {
    275   1.1    ragge 
    276   1.1    ragge 	panic("vax_sgmap_load_raw : not implemented");
    277   1.1    ragge }
    278   1.1    ragge 
    279   1.1    ragge void
    280  1.14     matt vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
    281   1.1    ragge {
    282   1.1    ragge 	long *pte, *page_table = (long *)sgmap->aps_pt;
    283   1.4     matt 	int ptecnt;
    284   1.1    ragge 
    285   1.1    ragge 	/*
    286   1.1    ragge 	 * Invalidate the PTEs for the mapping.
    287   1.1    ragge 	 */
    288   1.4     matt 	for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
    289   1.4     matt 		ptecnt-- != 0; ) {
    290   1.4     matt 		*pte++ = 0;
    291   1.1    ragge 	}
    292   1.1    ragge 
    293   1.1    ragge 	/*
    294   1.1    ragge 	 * Free the virtual address space used by the mapping
    295   1.1    ragge 	 * if necessary.
    296   1.1    ragge 	 */
    297   1.1    ragge 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    298   1.1    ragge 		vax_sgmap_free(map, sgmap);
    299   1.1    ragge 	/*
    300   1.1    ragge 	 * Mark the mapping invalid.
    301   1.1    ragge 	 */
    302   1.1    ragge 	map->dm_mapsize = 0;
    303   1.1    ragge 	map->dm_nsegs = 0;
    304   1.1    ragge }
    305