1 1.11 matt /* $NetBSD: uba_dma.c,v 1.11 2010/12/14 23:38:30 matt Exp $ */ 2 1.1 ragge 3 1.1 ragge /*- 4 1.1 ragge * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 1.1 ragge * All rights reserved. 6 1.1 ragge * 7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation 8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.1 ragge * NASA Ames Research Center. 10 1.1 ragge * 11 1.1 ragge * Redistribution and use in source and binary forms, with or without 12 1.1 ragge * modification, are permitted provided that the following conditions 13 1.1 ragge * are met: 14 1.1 ragge * 1. Redistributions of source code must retain the above copyright 15 1.1 ragge * notice, this list of conditions and the following disclaimer. 16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 ragge * notice, this list of conditions and the following disclaimer in the 18 1.1 ragge * documentation and/or other materials provided with the distribution. 19 1.1 ragge * 20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.3 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 ragge * POSSIBILITY OF SUCH DAMAGE. 31 1.1 ragge */ 32 1.1 ragge 33 1.7 lukem #include <sys/cdefs.h> 34 1.11 matt __KERNEL_RCSID(0, "$NetBSD: uba_dma.c,v 1.11 2010/12/14 23:38:30 matt Exp $"); 35 1.11 matt 36 1.11 matt #define _VAX_BUS_DMA_PRIVATE 37 1.1 ragge 38 1.1 ragge #include <sys/param.h> 39 1.1 ragge #include <sys/systm.h> 40 1.11 matt #include <sys/bus.h> 41 1.11 matt #include <sys/cpu.h> 42 1.11 matt #include <sys/device.h> 43 1.1 ragge #include <sys/kernel.h> 44 1.1 ragge #include <sys/malloc.h> 45 1.11 matt 46 1.5 mrg #include <uvm/uvm_extern.h> 47 1.1 ragge 48 1.1 ragge #include <machine/sgmap.h> 49 1.1 ragge 50 1.1 ragge #include <dev/qbus/ubavar.h> 51 1.1 ragge 52 1.1 ragge #include <arch/vax/uba/uba_common.h> 53 1.1 ragge 54 1.9 matt int uba_bus_dmamap_create_sgmap(bus_dma_tag_t, bus_size_t, int, 55 1.9 matt bus_size_t, bus_size_t, int, bus_dmamap_t *); 56 1.1 ragge 57 1.9 matt void uba_bus_dmamap_destroy_sgmap(bus_dma_tag_t, bus_dmamap_t); 58 1.1 ragge 59 1.9 matt int uba_bus_dmamap_load_sgmap(bus_dma_tag_t, bus_dmamap_t, void *, 60 1.9 matt bus_size_t, struct proc *, int); 61 1.1 ragge 62 1.9 matt int uba_bus_dmamap_load_mbuf_sgmap(bus_dma_tag_t, bus_dmamap_t, 63 1.9 matt struct mbuf *, int); 64 1.1 ragge 65 1.9 matt int uba_bus_dmamap_load_uio_sgmap(bus_dma_tag_t, bus_dmamap_t, 66 1.9 matt struct uio *, int); 67 1.1 ragge 68 1.9 matt int uba_bus_dmamap_load_raw_sgmap(bus_dma_tag_t, bus_dmamap_t, 69 1.9 matt bus_dma_segment_t *, int, bus_size_t, int); 70 1.1 ragge 71 1.9 matt void uba_bus_dmamap_unload_sgmap(bus_dma_tag_t, bus_dmamap_t); 72 1.1 ragge 73 1.9 matt void uba_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 74 1.9 matt bus_size_t, int); 75 1.1 ragge 76 1.1 ragge void 77 1.9 matt uba_dma_init(struct uba_vsoftc *sc) 78 1.1 ragge { 79 1.1 ragge bus_dma_tag_t t; 80 1.1 ragge struct pte *pte; 81 1.1 ragge 82 1.1 ragge /* 83 1.1 ragge * Initialize the DMA tag used for sgmap-mapped DMA. 84 1.1 ragge */ 85 1.1 ragge t = &sc->uv_dmat; 86 1.1 ragge t->_cookie = sc; 87 1.1 ragge t->_wbase = 0; 88 1.1 ragge t->_wsize = sc->uv_size; 89 1.1 ragge t->_boundary = 0; 90 1.1 ragge t->_sgmap = &sc->uv_sgmap; 91 1.1 ragge t->_dmamap_create = uba_bus_dmamap_create_sgmap; 92 1.1 ragge t->_dmamap_destroy = uba_bus_dmamap_destroy_sgmap; 93 1.1 ragge t->_dmamap_load = uba_bus_dmamap_load_sgmap; 94 1.1 ragge t->_dmamap_load_mbuf = uba_bus_dmamap_load_mbuf_sgmap; 95 1.1 ragge t->_dmamap_load_uio = uba_bus_dmamap_load_uio_sgmap; 96 1.1 ragge t->_dmamap_load_raw = uba_bus_dmamap_load_raw_sgmap; 97 1.1 ragge t->_dmamap_unload = uba_bus_dmamap_unload_sgmap; 98 1.1 ragge t->_dmamap_sync = uba_bus_dmamap_sync; 99 1.1 ragge 100 1.1 ragge t->_dmamem_alloc = _bus_dmamem_alloc; 101 1.1 ragge t->_dmamem_free = _bus_dmamem_free; 102 1.1 ragge t->_dmamem_map = _bus_dmamem_map; 103 1.1 ragge t->_dmamem_unmap = _bus_dmamem_unmap; 104 1.1 ragge t->_dmamem_mmap = _bus_dmamem_mmap; 105 1.1 ragge 106 1.1 ragge /* 107 1.4 ragge * Map in Unibus map registers, if not mapped in already. 108 1.1 ragge */ 109 1.4 ragge if (sc->uv_uba) { 110 1.4 ragge pte = sc->uv_uba->uba_map; 111 1.4 ragge } else { 112 1.4 ragge pte = (struct pte *)vax_map_physmem(sc->uv_addr, 113 1.6 ragge vax_btoc(vax_btoc(sc->uv_size) * sizeof(struct pte))); 114 1.4 ragge if (pte == 0) 115 1.4 ragge panic("uba_dma_init"); 116 1.4 ragge } 117 1.1 ragge /* 118 1.1 ragge * Initialize the SGMAP. 119 1.1 ragge */ 120 1.1 ragge vax_sgmap_init(t, &sc->uv_sgmap, "uba_sgmap", 0, sc->uv_size, pte, 0); 121 1.1 ragge 122 1.1 ragge } 123 1.1 ragge 124 1.1 ragge /* 125 1.1 ragge * Create a UBA SGMAP-mapped DMA map. 126 1.1 ragge */ 127 1.1 ragge int 128 1.9 matt uba_bus_dmamap_create_sgmap(bus_dma_tag_t t, bus_size_t size, int nsegments, 129 1.9 matt bus_size_t maxsegsz, bus_size_t boundary, int flags, 130 1.9 matt bus_dmamap_t *dmamp) 131 1.1 ragge { 132 1.1 ragge bus_dmamap_t map; 133 1.1 ragge int error; 134 1.1 ragge 135 1.1 ragge error = _bus_dmamap_create(t, size, nsegments, maxsegsz, 136 1.1 ragge boundary, flags, dmamp); 137 1.1 ragge if (error) 138 1.1 ragge return (error); 139 1.1 ragge 140 1.1 ragge map = *dmamp; 141 1.1 ragge 142 1.1 ragge if (flags & BUS_DMA_ALLOCNOW) { 143 1.2 ragge error = vax_sgmap_alloc(map, vax_round_page(size), 144 1.1 ragge t->_sgmap, flags); 145 1.1 ragge if (error) 146 1.1 ragge uba_bus_dmamap_destroy_sgmap(t, map); 147 1.1 ragge } 148 1.1 ragge 149 1.1 ragge return (error); 150 1.1 ragge } 151 1.1 ragge 152 1.1 ragge /* 153 1.1 ragge * Destroy a UBA SGMAP-mapped DMA map. 154 1.1 ragge */ 155 1.1 ragge void 156 1.9 matt uba_bus_dmamap_destroy_sgmap(bus_dma_tag_t t, bus_dmamap_t map) 157 1.1 ragge { 158 1.1 ragge 159 1.1 ragge if (map->_dm_flags & DMAMAP_HAS_SGMAP) 160 1.1 ragge vax_sgmap_free(map, t->_sgmap); 161 1.1 ragge 162 1.1 ragge _bus_dmamap_destroy(t, map); 163 1.1 ragge } 164 1.1 ragge 165 1.1 ragge /* 166 1.1 ragge * Load a UBA SGMAP-mapped DMA map with a linear buffer. 167 1.1 ragge */ 168 1.1 ragge int 169 1.9 matt uba_bus_dmamap_load_sgmap(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 170 1.9 matt bus_size_t buflen, struct proc *p, int flags) 171 1.1 ragge { 172 1.1 ragge int error; 173 1.1 ragge 174 1.1 ragge error = vax_sgmap_load(t, map, buf, buflen, p, flags, t->_sgmap); 175 1.1 ragge /* 176 1.1 ragge * XXX - Set up BDPs. 177 1.1 ragge */ 178 1.1 ragge 179 1.1 ragge return (error); 180 1.1 ragge } 181 1.1 ragge 182 1.1 ragge /* 183 1.1 ragge * Load a UBA SGMAP-mapped DMA map with an mbuf chain. 184 1.1 ragge */ 185 1.1 ragge int 186 1.9 matt uba_bus_dmamap_load_mbuf_sgmap( bus_dma_tag_t t, bus_dmamap_t map, 187 1.9 matt struct mbuf *m, int flags) 188 1.1 ragge { 189 1.1 ragge int error; 190 1.1 ragge 191 1.1 ragge error = vax_sgmap_load_mbuf(t, map, m, flags, t->_sgmap); 192 1.1 ragge 193 1.1 ragge return (error); 194 1.1 ragge } 195 1.1 ragge 196 1.1 ragge /* 197 1.1 ragge * Load a UBA SGMAP-mapped DMA map with a uio. 198 1.1 ragge */ 199 1.1 ragge int 200 1.9 matt uba_bus_dmamap_load_uio_sgmap(bus_dma_tag_t t, bus_dmamap_t map, 201 1.9 matt struct uio *uio, int flags) 202 1.1 ragge { 203 1.1 ragge int error; 204 1.1 ragge 205 1.1 ragge error = vax_sgmap_load_uio(t, map, uio, flags, t->_sgmap); 206 1.1 ragge 207 1.1 ragge return (error); 208 1.1 ragge } 209 1.1 ragge 210 1.1 ragge /* 211 1.1 ragge * Load a UBA SGMAP-mapped DMA map with raw memory. 212 1.1 ragge */ 213 1.1 ragge int 214 1.9 matt uba_bus_dmamap_load_raw_sgmap(bus_dma_tag_t t, bus_dmamap_t map, 215 1.9 matt bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 216 1.1 ragge { 217 1.1 ragge int error; 218 1.1 ragge 219 1.1 ragge error = vax_sgmap_load_raw(t, map, segs, nsegs, size, flags, 220 1.1 ragge t->_sgmap); 221 1.1 ragge 222 1.1 ragge return (error); 223 1.1 ragge } 224 1.1 ragge 225 1.1 ragge /* 226 1.1 ragge * Unload a UBA DMA map. 227 1.1 ragge */ 228 1.1 ragge void 229 1.9 matt uba_bus_dmamap_unload_sgmap(bus_dma_tag_t t, bus_dmamap_t map) 230 1.1 ragge { 231 1.1 ragge /* 232 1.1 ragge * Invalidate any SGMAP page table entries used by this 233 1.1 ragge * mapping. 234 1.1 ragge */ 235 1.1 ragge vax_sgmap_unload(t, map, t->_sgmap); 236 1.1 ragge 237 1.1 ragge /* 238 1.1 ragge * Do the generic bits of the unload. 239 1.1 ragge */ 240 1.1 ragge _bus_dmamap_unload(t, map); 241 1.1 ragge } 242 1.1 ragge 243 1.1 ragge /* 244 1.1 ragge * Sync the bus map. This is only needed if BDP's are used. 245 1.1 ragge */ 246 1.1 ragge void 247 1.9 matt uba_bus_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset, 248 1.9 matt bus_size_t len, int ops) 249 1.1 ragge { 250 1.1 ragge /* Only BDP handling, but not yet. */ 251 1.1 ragge } 252