Home | History | Annotate | Line # | Download | only in vax
sgmap.c revision 1.11
      1 /* $NetBSD: sgmap.c,v 1.11 2003/07/15 02:15:05 lukem Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.11 2003/07/15 02:15:05 lukem Exp $");
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 
     49 #include <uvm/uvm_extern.h>
     50 
     51 #include <machine/bus.h>
     52 #include <machine/sgmap.h>
     53 
     54 void
     55 vax_sgmap_init(t, sgmap, name, sgvabase, sgvasize, ptva, minptalign)
     56 	bus_dma_tag_t t;
     57 	struct vax_sgmap *sgmap;
     58 	const char *name;
     59 	bus_addr_t sgvabase;
     60 	bus_size_t sgvasize;
     61 	struct pte *ptva;
     62 	bus_size_t minptalign;
     63 {
     64 	bus_dma_segment_t seg;
     65 	size_t ptsize;
     66 	int rseg;
     67 
     68 	if (sgvasize & PGOFSET) {
     69 		printf("size botch for sgmap `%s'\n", name);
     70 		goto die;
     71 	}
     72 
     73 	sgmap->aps_sgvabase = sgvabase;
     74 	sgmap->aps_sgvasize = sgvasize;
     75 
     76 	if (ptva != NULL) {
     77 		/*
     78 		 * We already have a page table; this may be a system
     79 		 * where the page table resides in bridge-resident SRAM.
     80 		 */
     81 		sgmap->aps_pt = ptva;
     82 	} else {
     83 		/*
     84 		 * Compute the page table size and allocate it.  At minimum,
     85 		 * this must be aligned to the page table size.  However,
     86 		 * some platforms have more strict alignment reqirements.
     87 		 */
     88 		ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
     89 		if (minptalign != 0) {
     90 			if (minptalign < ptsize)
     91 				minptalign = ptsize;
     92 		} else
     93 			minptalign = ptsize;
     94 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
     95 		    BUS_DMA_NOWAIT)) {
     96 			panic("unable to allocate page table for sgmap `%s'",
     97 			    name);
     98 			goto die;
     99 		}
    100 		sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
    101 	}
    102 
    103 	/*
    104 	 * Create the extent map used to manage the virtual address
    105 	 * space.
    106 	 */
    107 	sgmap->aps_ex = extent_create((char *)name, sgvabase, sgvasize - 1,
    108 	    M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
    109 	if (sgmap->aps_ex == NULL) {
    110 		printf("unable to create extent map for sgmap `%s'\n", name);
    111 		goto die;
    112 	}
    113 
    114 	return;
    115  die:
    116 	panic("vax_sgmap_init");
    117 }
    118 
    119 int
    120 vax_sgmap_alloc(map, origlen, sgmap, flags)
    121 	bus_dmamap_t map;
    122 	bus_size_t origlen;
    123 	struct vax_sgmap *sgmap;
    124 	int flags;
    125 {
    126 	int error;
    127 	bus_size_t len = origlen;
    128 
    129 #ifdef DIAGNOSTIC
    130 	if (map->_dm_flags & DMAMAP_HAS_SGMAP)
    131 		panic("vax_sgmap_alloc: already have sgva space");
    132 #endif
    133 
    134 	/* If we need a spill page (for the VS4000 SCSI), make sure we
    135 	 * allocate enough space for an extra page.
    136 	 */
    137 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    138 		len += VAX_NBPG;
    139 	}
    140 
    141 	map->_dm_sgvalen = vax_round_page(len);
    142 #if 0
    143 	printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
    144 	    origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
    145 #endif
    146 
    147 	error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
    148 	    0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
    149 	    &map->_dm_sgva);
    150 #if 0
    151 	printf("error %d _dm_sgva %x\n", error, map->_dm_sgva);
    152 #endif
    153 
    154 	if (error == 0)
    155 		map->_dm_flags |= DMAMAP_HAS_SGMAP;
    156 	else
    157 		map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    158 
    159 	return (error);
    160 }
    161 
    162 void
    163 vax_sgmap_free(map, sgmap)
    164 	bus_dmamap_t map;
    165 	struct vax_sgmap *sgmap;
    166 {
    167 
    168 #ifdef DIAGNOSTIC
    169 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
    170 		panic("vax_sgmap_free: no sgva space to free");
    171 #endif
    172 
    173 	if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
    174 	    EX_NOWAIT))
    175 		panic("vax_sgmap_free");
    176 
    177 	map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    178 }
    179 
    180 int
    181 vax_sgmap_load(t, map, buf, buflen, p, flags, sgmap)
    182 	bus_dma_tag_t t;
    183 	bus_dmamap_t map;
    184 	void *buf;
    185 	bus_size_t buflen;
    186 	struct proc *p;
    187 	int flags;
    188 	struct vax_sgmap *sgmap;
    189 {
    190 	vaddr_t endva, va = (vaddr_t)buf;
    191 	paddr_t pa;
    192 	bus_addr_t dmaoffset;
    193 	bus_size_t dmalen;
    194 	long *pte, *page_table = (long *)sgmap->aps_pt;
    195 	int pteidx, error;
    196 
    197 	/*
    198 	 * Make sure that on error condition we return "no valid mappings".
    199 	 */
    200 	map->dm_mapsize = 0;
    201 	map->dm_nsegs = 0;
    202 
    203 	if (buflen > map->_dm_size)
    204 		return (EINVAL);
    205 
    206 	/*
    207 	 * Remember the offset into the first page and the total
    208 	 * transfer length.
    209 	 */
    210 	dmaoffset = ((u_long)buf) & VAX_PGOFSET;
    211 	dmalen = buflen;
    212 
    213 
    214 	/*
    215 	 * Allocate the necessary virtual address space for the
    216 	 * mapping.  Round the size, since we deal with whole pages.
    217 	 */
    218 	endva = vax_round_page(va + buflen);
    219 	va = vax_trunc_page(va);
    220 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
    221 		error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
    222 		if (error)
    223 			return (error);
    224 	}
    225 
    226 	pteidx = map->_dm_sgva >> VAX_PGSHIFT;
    227 	pte = &page_table[pteidx];
    228 
    229 	/*
    230 	 * Generate the DMA address.
    231 	 */
    232 	map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
    233 	map->dm_segs[0].ds_len = dmalen;
    234 
    235 
    236 	map->_dm_pteidx = pteidx;
    237 	map->_dm_ptecnt = 0;
    238 
    239 	/*
    240 	 * Create the bus-specific page tables.
    241 	 * Can be done much more efficient than this.
    242 	 */
    243 	for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
    244 		/*
    245 		 * Get the physical address for this segment.
    246 		 */
    247 		if (p != NULL)
    248 			(void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
    249 		else
    250 			pa = kvtophys(va);
    251 
    252 		/*
    253 		 * Load the current PTE with this page.
    254 		 */
    255 		*pte = (pa >> VAX_PGSHIFT) | PG_V;
    256 	}
    257 	/* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
    258 	 * so add an extra page to quiet it down.
    259 	 */
    260 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    261 		*pte = pte[-1];
    262 		map->_dm_ptecnt++;
    263 	}
    264 
    265 	map->dm_mapsize = buflen;
    266 	map->dm_nsegs = 1;
    267 	return (0);
    268 }
    269 
    270 int
    271 vax_sgmap_load_mbuf(t, map, m, flags, sgmap)
    272 	bus_dma_tag_t t;
    273 	bus_dmamap_t map;
    274 	struct mbuf *m;
    275 	int flags;
    276 	struct vax_sgmap *sgmap;
    277 {
    278 
    279 	panic("vax_sgmap_load_mbuf : not implemented");
    280 }
    281 
    282 int
    283 vax_sgmap_load_uio(t, map, uio, flags, sgmap)
    284 	bus_dma_tag_t t;
    285 	bus_dmamap_t map;
    286 	struct uio *uio;
    287 	int flags;
    288 	struct vax_sgmap *sgmap;
    289 {
    290 
    291 	panic("vax_sgmap_load_uio : not implemented");
    292 }
    293 
    294 int
    295 vax_sgmap_load_raw(t, map, segs, nsegs, size, flags, sgmap)
    296 	bus_dma_tag_t t;
    297 	bus_dmamap_t map;
    298 	bus_dma_segment_t *segs;
    299 	int nsegs;
    300 	bus_size_t size;
    301 	int flags;
    302 	struct vax_sgmap *sgmap;
    303 {
    304 
    305 	panic("vax_sgmap_load_raw : not implemented");
    306 }
    307 
    308 void
    309 vax_sgmap_unload(t, map, sgmap)
    310 	bus_dma_tag_t t;
    311 	bus_dmamap_t map;
    312 	struct vax_sgmap *sgmap;
    313 {
    314 	long *pte, *page_table = (long *)sgmap->aps_pt;
    315 	int ptecnt;
    316 
    317 	/*
    318 	 * Invalidate the PTEs for the mapping.
    319 	 */
    320 	for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
    321 		ptecnt-- != 0; ) {
    322 		*pte++ = 0;
    323 	}
    324 
    325 	/*
    326 	 * Free the virtual address space used by the mapping
    327 	 * if necessary.
    328 	 */
    329 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    330 		vax_sgmap_free(map, sgmap);
    331 	/*
    332 	 * Mark the mapping invalid.
    333 	 */
    334 	map->dm_mapsize = 0;
    335 	map->dm_nsegs = 0;
    336 }
    337