Home | History | Annotate | Line # | Download | only in vax
      1 /* $NetBSD: sgmap.c,v 1.21 2023/12/20 15:34:45 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.21 2023/12/20 15:34:45 thorpej Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/bus.h>
     39 #include <sys/kernel.h>
     40 #include <sys/proc.h>
     41 
     42 #include <uvm/uvm_extern.h>
     43 
     44 #include <machine/sgmap.h>
     45 
     46 void
     47 vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
     48 	bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
     49 	bus_size_t minptalign)
     50 {
     51 	bus_dma_segment_t seg;
     52 	size_t ptsize;
     53 	int rseg;
     54 
     55 	if (sgvasize & PGOFSET) {
     56 		printf("size botch for sgmap `%s'\n", name);
     57 		goto die;
     58 	}
     59 
     60 	sgmap->aps_sgvabase = sgvabase;
     61 	sgmap->aps_sgvasize = sgvasize;
     62 
     63 	if (ptva != NULL) {
     64 		/*
     65 		 * We already have a page table; this may be a system
     66 		 * where the page table resides in bridge-resident SRAM.
     67 		 */
     68 		sgmap->aps_pt = ptva;
     69 	} else {
     70 		/*
     71 		 * Compute the page table size and allocate it.  At minimum,
     72 		 * this must be aligned to the page table size.  However,
     73 		 * some platforms have more strict alignment reqirements.
     74 		 */
     75 		ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
     76 		if (minptalign != 0) {
     77 			if (minptalign < ptsize)
     78 				minptalign = ptsize;
     79 		} else
     80 			minptalign = ptsize;
     81 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
     82 		    BUS_DMA_NOWAIT)) {
     83 			panic("unable to allocate page table for sgmap `%s'",
     84 			    name);
     85 			goto die;
     86 		}
     87 		sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
     88 	}
     89 
     90 	/*
     91 	 * Create the arena used to manage the virtual address
     92 	 * space.
     93 	 */
     94 	sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
     95 				       VAX_NBPG,	/* quantum */
     96 				       NULL,		/* importfn */
     97 				       NULL,		/* releasefn */
     98 				       NULL,		/* source */
     99 				       0,		/* qcache_max */
    100 				       VM_SLEEP,
    101 				       IPL_VM);
    102 	KASSERT(sgmap->aps_arena != NULL);
    103 	return;
    104  die:
    105 	panic("vax_sgmap_init");
    106 }
    107 
    108 int
    109 vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
    110 	int flags)
    111 {
    112 	int error;
    113 	bus_size_t len = origlen;
    114 
    115 #ifdef DIAGNOSTIC
    116 	if (map->_dm_flags & DMAMAP_HAS_SGMAP)
    117 		panic("vax_sgmap_alloc: already have sgva space");
    118 #endif
    119 
    120 	/* If we need a spill page (for the VS4000 SCSI), make sure we
    121 	 * allocate enough space for an extra page.
    122 	 */
    123 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    124 		len += VAX_NBPG;
    125 	}
    126 
    127 	map->_dm_sgvalen = vax_round_page(len);
    128 #define DEBUG_SGMAP 0
    129 #if DEBUG_SGMAP
    130 	printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
    131 	    //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
    132 	    (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
    133 #endif
    134 
    135 	const vm_flag_t vmflags = VM_BESTFIT |
    136 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
    137 
    138 	error = vmem_xalloc(sgmap->aps_arena, map->_dm_sgvalen,
    139 			    0,			/* alignment */
    140 			    0,			/* phase */
    141 			    map->_dm_boundary,	/* nocross */
    142 			    VMEM_ADDR_MIN,	/* minaddr */
    143 			    VMEM_ADDR_MAX,	/* maxaddr */
    144 			    vmflags,
    145 			    &map->_dm_sgva);
    146 
    147 #if DEBUG_SGMAP
    148 	printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
    149 #endif
    150 
    151 	if (error == 0)
    152 		map->_dm_flags |= DMAMAP_HAS_SGMAP;
    153 	else
    154 		map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    155 
    156 	return (error);
    157 }
    158 
    159 void
    160 vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
    161 {
    162 
    163 #ifdef DIAGNOSTIC
    164 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
    165 		panic("vax_sgmap_free: no sgva space to free");
    166 #endif
    167 
    168 	vmem_xfree(sgmap->aps_arena, map->_dm_sgva, map->_dm_sgvalen);
    169 
    170 	map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
    171 }
    172 
    173 int
    174 vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
    175 {
    176 	return vmem_xalloc_addr(sgmap->aps_arena, ba, len, VM_NOSLEEP);
    177 }
    178 
    179 int
    180 vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
    181 	struct proc *p, int flags, struct vax_sgmap *sgmap)
    182 {
    183 	vaddr_t endva, va = (vaddr_t)buf;
    184 	paddr_t pa;
    185 	bus_addr_t dmaoffset;
    186 	bus_size_t dmalen;
    187 	long *pte, *page_table = (long *)sgmap->aps_pt;
    188 	int pteidx, error;
    189 
    190 	/*
    191 	 * Make sure that on error condition we return "no valid mappings".
    192 	 */
    193 	map->dm_mapsize = 0;
    194 	map->dm_nsegs = 0;
    195 
    196 	if (buflen > map->_dm_size)
    197 		return (EINVAL);
    198 
    199 	/*
    200 	 * Remember the offset into the first page and the total
    201 	 * transfer length.
    202 	 */
    203 	dmaoffset = ((u_long)buf) & VAX_PGOFSET;
    204 	dmalen = buflen;
    205 
    206 
    207 	/*
    208 	 * Allocate the necessary virtual address space for the
    209 	 * mapping.  Round the size, since we deal with whole pages.
    210 	 */
    211 	endva = vax_round_page(va + buflen);
    212 	va = vax_trunc_page(va);
    213 	if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
    214 		error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
    215 		if (error)
    216 			return (error);
    217 	}
    218 
    219 	pteidx = map->_dm_sgva >> VAX_PGSHIFT;
    220 	pte = &page_table[pteidx];
    221 
    222 	/*
    223 	 * Generate the DMA address.
    224 	 */
    225 	map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
    226 	map->dm_segs[0].ds_len = dmalen;
    227 
    228 
    229 	map->_dm_pteidx = pteidx;
    230 	map->_dm_ptecnt = 0;
    231 
    232 	/*
    233 	 * Create the bus-specific page tables.
    234 	 * Can be done much more efficient than this.
    235 	 */
    236 	for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
    237 		/*
    238 		 * Get the physical address for this segment.
    239 		 */
    240 		if (p != NULL)
    241 			(void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
    242 		else
    243 			pa = kvtophys(va);
    244 
    245 		/*
    246 		 * Load the current PTE with this page.
    247 		 */
    248 		*pte = (pa >> VAX_PGSHIFT) | PG_V;
    249 	}
    250 	/* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
    251 	 * so add an extra page to quiet it down.
    252 	 */
    253 	if (flags & VAX_BUS_DMA_SPILLPAGE) {
    254 		*pte = pte[-1];
    255 		map->_dm_ptecnt++;
    256 	}
    257 
    258 	map->dm_mapsize = buflen;
    259 	map->dm_nsegs = 1;
    260 	return (0);
    261 }
    262 
    263 int
    264 vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
    265 	int flags, struct vax_sgmap *sgmap)
    266 {
    267 
    268 	panic("vax_sgmap_load_mbuf : not implemented");
    269 }
    270 
    271 int
    272 vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    273 	int flags, struct vax_sgmap *sgmap)
    274 {
    275 
    276 	panic("vax_sgmap_load_uio : not implemented");
    277 }
    278 
    279 int
    280 vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
    281 	int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
    282 {
    283 
    284 	panic("vax_sgmap_load_raw : not implemented");
    285 }
    286 
    287 void
    288 vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
    289 {
    290 	long *pte, *page_table = (long *)sgmap->aps_pt;
    291 	int ptecnt;
    292 
    293 	/*
    294 	 * Invalidate the PTEs for the mapping.
    295 	 */
    296 	for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
    297 		ptecnt-- != 0; ) {
    298 		*pte++ = 0;
    299 	}
    300 
    301 	/*
    302 	 * Free the virtual address space used by the mapping
    303 	 * if necessary.
    304 	 */
    305 	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
    306 		vax_sgmap_free(map, sgmap);
    307 	/*
    308 	 * Mark the mapping invalid.
    309 	 */
    310 	map->dm_mapsize = 0;
    311 	map->dm_nsegs = 0;
    312 }
    313