Home | History | Annotate | Line # | Download | only in common
      1 /* $NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     34 
     35 __KERNEL_RCSID(0, "$NetBSD: sgmap_common.c,v 1.29 2021/07/18 05:12:27 thorpej Exp $");
     36 
     37 #include <sys/param.h>
     38 #include <sys/systm.h>
     39 #include <sys/kernel.h>
     40 #include <sys/proc.h>
     41 
     42 #include <uvm/uvm_extern.h>
     43 
     44 #define	_ALPHA_BUS_DMA_PRIVATE
     45 #include <sys/bus.h>
     46 
     47 #include <alpha/common/sgmapvar.h>
     48 
     49 /*
     50  * Some systems will prefetch the next page during a memory -> device DMA.
     51  * This can cause machine checks if there is not a spill page after the
     52  * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE).
     53  */
     54 vaddr_t		alpha_sgmap_prefetch_spill_page_va;
     55 bus_addr_t	alpha_sgmap_prefetch_spill_page_pa;
     56 
     57 void
     58 alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name,
     59     bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize,
     60     void *ptva, bus_size_t minptalign)
     61 {
     62 	bus_dma_segment_t seg;
     63 	size_t ptsize;
     64 	int rseg;
     65 
     66 	if (sgvasize & PGOFSET) {
     67 		printf("size botch for sgmap `%s'\n", name);
     68 		goto die;
     69 	}
     70 
     71 	/*
     72 	 * If we don't yet have a minimum SGVA alignment, default
     73 	 * to the system page size.
     74 	 */
     75 	if (t->_sgmap_minalign < PAGE_SIZE) {
     76 		t->_sgmap_minalign = PAGE_SIZE;
     77 	}
     78 
     79 	sgmap->aps_wbase = wbase;
     80 	sgmap->aps_sgvabase = sgvabase;
     81 	sgmap->aps_sgvasize = sgvasize;
     82 
     83 	if (ptva != NULL) {
     84 		/*
     85 		 * We already have a page table; this may be a system
     86 		 * where the page table resides in bridge-resident SRAM.
     87 		 */
     88 		sgmap->aps_pt = ptva;
     89 		sgmap->aps_ptpa = 0;
     90 	} else {
     91 		/*
     92 		 * Compute the page table size and allocate it.  At minimum,
     93 		 * this must be aligned to the page table size.  However,
     94 		 * some platforms have more strict alignment reqirements.
     95 		 */
     96 		ptsize = (sgvasize / PAGE_SIZE) * ptesize;
     97 		if (minptalign != 0) {
     98 			if (minptalign < ptsize)
     99 				minptalign = ptsize;
    100 		} else
    101 			minptalign = ptsize;
    102 		if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
    103 		    BUS_DMA_NOWAIT)) {
    104 			panic("unable to allocate page table for sgmap `%s'",
    105 			    name);
    106 			goto die;
    107 		}
    108 		sgmap->aps_ptpa = seg.ds_addr;
    109 		sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa);
    110 	}
    111 
    112 	/*
    113 	 * Create the arena used to manage the virtual address
    114 	 * space.
    115 	 *
    116 	 * XXX Consider using a quantum cache up to MAXPHYS+PAGE_SIZE
    117 	 * XXX (extra page to handle the spill page).  For now, we don't,
    118 	 * XXX because we are using constrained allocations everywhere.
    119 	 */
    120 	sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
    121 				       PAGE_SIZE,	/* quantum */
    122 				       NULL,		/* importfn */
    123 				       NULL,		/* releasefn */
    124 				       NULL,		/* source */
    125 				       0,		/* qcache_max */
    126 				       VM_SLEEP,
    127 				       IPL_VM);
    128 	KASSERT(sgmap->aps_arena != NULL);
    129 
    130 	/*
    131 	 * Allocate a spill page if that hasn't already been done.
    132 	 */
    133 	if (alpha_sgmap_prefetch_spill_page_va == 0) {
    134 		if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg,
    135 		    BUS_DMA_NOWAIT)) {
    136 			printf("unable to allocate spill page for sgmap `%s'\n",
    137 			    name);
    138 			goto die;
    139 		}
    140 		alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr;
    141 		alpha_sgmap_prefetch_spill_page_va =
    142 		    ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa);
    143 		memset((void *)alpha_sgmap_prefetch_spill_page_va, 0,
    144 		    PAGE_SIZE);
    145 	}
    146 
    147 	return;
    148  die:
    149 	panic("alpha_sgmap_init");
    150 }
    151 
    152 int
    153 alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    154     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    155 {
    156 	bus_dmamap_t map;
    157 	int error;
    158 
    159 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
    160 	    boundary, flags, &map);
    161 	if (error)
    162 		return (error);
    163 
    164 	/* XXX BUS_DMA_ALLOCNOW */
    165 
    166 	if (error == 0)
    167 		*dmamp = map;
    168 	else
    169 		alpha_sgmap_dmamap_destroy(t, map);
    170 
    171 	return (error);
    172 }
    173 
    174 void
    175 alpha_sgmap_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    176 {
    177 
    178 	KASSERT(map->dm_mapsize == 0);
    179 
    180 	_bus_dmamap_destroy(t, map);
    181 }
    182