Home | History | Annotate | Line # | Download | only in vax
sgmap.c revision 1.20
      1 /* $NetBSD: sgmap.c,v 1.20 2023/12/03 00:49:46 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.20 2023/12/03 00:49:46 thorpej Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/bus.h>
     39 #include <sys/kernel.h>
     40 #include <sys/proc.h>
     41 #include <sys/malloc.h>
     42 
     43 #include <uvm/uvm_extern.h>
     44 
     45 #include <machine/sgmap.h>
     46 
     47 void
     48 vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
     49 	bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
     50 	bus_size_t minptalign)
     51 {
     52 	bus_dma_segment_t seg;
     53 	size_t ptsize;
     54 	int rseg;
     55 
     56 	if (sgvasize & PGOFSET) {
     57 		printf("size botch for sgmap `%s'\n", name);
     58 		goto die;
     59 	}
     60 
     61 	sgmap->aps_sgvabase = sgvabase;
     62 	sgmap->aps_sgvasize = sgvasize;
     63 
     64 	if (ptva != NULL) {
     65 		/*
     66 		 * We already have a page table; this may be a system
     67 		 * where the page table resides in bridge-resident SRAM.
     68 		 */
     69 		sgmap->aps_pt = ptva;
     70 	} else {
     71 		/*
     72 		 * Compute the page table size and allocate it.  At minimum,
     73 		 * this must be aligned to the page table size.  However,
     74 		 * some platforms have more strict alignment reqirements.
     75 		 */
     76 		ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
     77 		if (minptalign != 0) {
     78 			if (minptalign < ptsize)
     79 				minptalign = ptsize;
     80 		} else
     81 			minptalign = ptsize;
     82 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
     83 		    BUS_DMA_NOWAIT)) {
     84 			panic("unable to allocate page table for sgmap `%s'",
     85 			    name);
     86 			goto die;
     87 		}
     88 		sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
     89 	}
     90 
     91 	/*
     92 	 * Create the arena used to manage the virtual address
     93 	 * space.
     94 	 */
     95 	sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
     96 				       VAX_NBPG,	/* quantum */
     97 				       NULL,		/* importfn */
     98 				       NULL,		/* releasefn */
     99 				       NULL,		/* source */
    100 				       0,		/* qcache_max */
    101 				       VM_SLEEP,
    102 				       IPL_VM);
    103 	KASSERT(sgmap->aps_arena != NULL);
    104 	return;
    105  die:
    106 	panic("vax_sgmap_init");
    107 }
    108 
    109 int
    110 vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
    111 	int flags)
    112 {
    113 	int error;
    114 	bus_size_t len = origlen;
    115 
    116 #ifdef DIAGNOSTIC
    117 	if (map->_dm_flags & DMAMAP_HAS_SGMAP)
    118 		panic("vax_sgmap_alloc: already have sgva space");
    119 #endif
    120 
    121 	/* If we need a spill page (for the VS4000 SCSI), make sure we
    122 	 * allocate enough space for an extra page.
    123 	 */
    124 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    125 		len += VAX_NBPG;
    126 	}
    127 
    128 	map->_dm_sgvalen = vax_round_page(len);
    129 #define DEBUG_SGMAP 0
    130 #if DEBUG_SGMAP
    131 	printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
    132 	    //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
    133 	    (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
    134 #endif
    135 
    136 	const vm_flag_t vmflags = VM_BESTFIT |
    137 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
    138 
    139 	error = vmem_xalloc(sgmap->aps_arena, map->_dm_sgvalen,
    140 			    0,			/* alignment */
    141 			    0,			/* phase */
    142 			    map->_dm_boundary,	/* nocross */
    143 			    VMEM_ADDR_MIN,	/* minaddr */
    144 			    VMEM_ADDR_MAX,	/* maxaddr */
    145 			    vmflags,
    146 			    &map->_dm_sgva);
    147 
    148 #if DEBUG_SGMAP
    149 	printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
    150 #endif
    151 
    152 	if (error == 0)
    153 		map->_dm_flags |= DMAMAP_HAS_SGMAP;
    154 	else
    155 		map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    156 
    157 	return (error);
    158 }
    159 
    160 void
    161 vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
    162 {
    163 
    164 #ifdef DIAGNOSTIC
    165 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
    166 		panic("vax_sgmap_free: no sgva space to free");
    167 #endif
    168 
    169 	vmem_xfree(sgmap->aps_arena, map->_dm_sgva, map->_dm_sgvalen);
    170 
    171 	map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    172 }
    173 
    174 int
    175 vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
    176 {
    177 	return vmem_xalloc_addr(sgmap->aps_arena, ba, len, VM_NOSLEEP);
    178 }
    179 
    180 int
    181 vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
    182 	struct proc *p, int flags, struct vax_sgmap *sgmap)
    183 {
    184 	vaddr_t endva, va = (vaddr_t)buf;
    185 	paddr_t pa;
    186 	bus_addr_t dmaoffset;
    187 	bus_size_t dmalen;
    188 	long *pte, *page_table = (long *)sgmap->aps_pt;
    189 	int pteidx, error;
    190 
    191 	/*
    192 	 * Make sure that on error condition we return "no valid mappings".
    193 	 */
    194 	map->dm_mapsize = 0;
    195 	map->dm_nsegs = 0;
    196 
    197 	if (buflen > map->_dm_size)
    198 		return (EINVAL);
    199 
    200 	/*
    201 	 * Remember the offset into the first page and the total
    202 	 * transfer length.
    203 	 */
    204 	dmaoffset = ((u_long)buf) & VAX_PGOFSET;
    205 	dmalen = buflen;
    206 
    207 
    208 	/*
    209 	 * Allocate the necessary virtual address space for the
    210 	 * mapping.  Round the size, since we deal with whole pages.
    211 	 */
    212 	endva = vax_round_page(va + buflen);
    213 	va = vax_trunc_page(va);
    214 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
    215 		error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
    216 		if (error)
    217 			return (error);
    218 	}
    219 
    220 	pteidx = map->_dm_sgva >> VAX_PGSHIFT;
    221 	pte = &page_table[pteidx];
    222 
    223 	/*
    224 	 * Generate the DMA address.
    225 	 */
    226 	map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
    227 	map->dm_segs[0].ds_len = dmalen;
    228 
    229 
    230 	map->_dm_pteidx = pteidx;
    231 	map->_dm_ptecnt = 0;
    232 
    233 	/*
    234 	 * Create the bus-specific page tables.
    235 	 * Can be done much more efficient than this.
    236 	 */
    237 	for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
    238 		/*
    239 		 * Get the physical address for this segment.
    240 		 */
    241 		if (p != NULL)
    242 			(void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
    243 		else
    244 			pa = kvtophys(va);
    245 
    246 		/*
    247 		 * Load the current PTE with this page.
    248 		 */
    249 		*pte = (pa >> VAX_PGSHIFT) | PG_V;
    250 	}
    251 	/* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
    252 	 * so add an extra page to quiet it down.
    253 	 */
    254 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    255 		*pte = pte[-1];
    256 		map->_dm_ptecnt++;
    257 	}
    258 
    259 	map->dm_mapsize = buflen;
    260 	map->dm_nsegs = 1;
    261 	return (0);
    262 }
    263 
    264 int
    265 vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
    266 	int flags, struct vax_sgmap *sgmap)
    267 {
    268 
    269 	panic("vax_sgmap_load_mbuf : not implemented");
    270 }
    271 
    272 int
    273 vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    274 	int flags, struct vax_sgmap *sgmap)
    275 {
    276 
    277 	panic("vax_sgmap_load_uio : not implemented");
    278 }
    279 
    280 int
    281 vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
    282 	int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
    283 {
    284 
    285 	panic("vax_sgmap_load_raw : not implemented");
    286 }
    287 
    288 void
    289 vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
    290 {
    291 	long *pte, *page_table = (long *)sgmap->aps_pt;
    292 	int ptecnt;
    293 
    294 	/*
    295 	 * Invalidate the PTEs for the mapping.
    296 	 */
    297 	for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
    298 		ptecnt-- != 0; ) {
    299 		*pte++ = 0;
    300 	}
    301 
    302 	/*
    303 	 * Free the virtual address space used by the mapping
    304 	 * if necessary.
    305 	 */
    306 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    307 		vax_sgmap_free(map, sgmap);
    308 	/*
    309 	 * Mark the mapping invalid.
    310 	 */
    311 	map->dm_mapsize = 0;
    312 	map->dm_nsegs = 0;
    313 }
    314