gapspci_dma.c revision 1.21 1 1.21 thorpej /* $NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*-
4 1.1 thorpej * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 1.1 thorpej * All rights reserved.
6 1.1 thorpej *
7 1.1 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.1 thorpej * by Jason R. Thorpe.
9 1.1 thorpej *
10 1.1 thorpej * Redistribution and use in source and binary forms, with or without
11 1.1 thorpej * modification, are permitted provided that the following conditions
12 1.1 thorpej * are met:
13 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.1 thorpej * notice, this list of conditions and the following disclaimer.
15 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.1 thorpej * documentation and/or other materials provided with the distribution.
18 1.1 thorpej *
19 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
30 1.1 thorpej */
31 1.1 thorpej
32 1.1 thorpej /*
33 1.1 thorpej * Bus DMA implementation for the SEGA GAPS PCI bridge.
34 1.1 thorpej *
35 1.21 thorpej * NOTE: We only implement a small subset of what the bus_dma(9)
36 1.1 thorpej * API specifies. Right now, the GAPS PCI bridge is only used for
37 1.1 thorpej * the Dreamcast Broadband Adatper, so we only provide what the
38 1.1 thorpej * pci(4) and rtk(4) drivers need.
39 1.1 thorpej */
40 1.1 thorpej
41 1.1 thorpej #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
42 1.21 thorpej __KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $");
43 1.1 thorpej
44 1.1 thorpej #include <sys/param.h>
45 1.13 tsutsui #include <sys/systm.h>
46 1.1 thorpej #include <sys/device.h>
47 1.1 thorpej #include <sys/mbuf.h>
48 1.21 thorpej #include <sys/vmem.h>
49 1.1 thorpej #include <sys/malloc.h>
50 1.19 dyoung #include <sys/bus.h>
51 1.1 thorpej
52 1.13 tsutsui #include <machine/cpu.h>
53 1.1 thorpej
54 1.1 thorpej #include <dev/pci/pcivar.h>
55 1.1 thorpej
56 1.1 thorpej #include <dreamcast/dev/g2/gapspcivar.h>
57 1.1 thorpej
58 1.18 uebayasi #include <uvm/uvm.h>
59 1.1 thorpej
60 1.1 thorpej int gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
61 1.1 thorpej bus_size_t, int, bus_dmamap_t *);
62 1.1 thorpej void gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
63 1.1 thorpej int gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
64 1.1 thorpej struct proc *, int);
65 1.1 thorpej int gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
66 1.1 thorpej int gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
67 1.1 thorpej int gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
68 1.1 thorpej int, bus_size_t, int);
69 1.1 thorpej void gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
70 1.1 thorpej void gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
71 1.1 thorpej bus_size_t, int);
72 1.1 thorpej
73 1.1 thorpej int gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
74 1.1 thorpej bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
75 1.1 thorpej int nsegs, int *rsegs, int flags);
76 1.1 thorpej void gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
77 1.1 thorpej int gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
78 1.14 christos size_t size, void **kvap, int flags);
79 1.14 christos void gaps_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
80 1.1 thorpej paddr_t gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
81 1.1 thorpej off_t off, int prot, int flags);
82 1.1 thorpej
83 1.1 thorpej void
84 1.1 thorpej gaps_dma_init(struct gaps_softc *sc)
85 1.1 thorpej {
86 1.1 thorpej bus_dma_tag_t t = &sc->sc_dmat;
87 1.1 thorpej
88 1.1 thorpej memset(t, 0, sizeof(*t));
89 1.1 thorpej
90 1.1 thorpej t->_cookie = sc;
91 1.1 thorpej t->_dmamap_create = gaps_dmamap_create;
92 1.1 thorpej t->_dmamap_destroy = gaps_dmamap_destroy;
93 1.1 thorpej t->_dmamap_load = gaps_dmamap_load;
94 1.1 thorpej t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
95 1.1 thorpej t->_dmamap_load_uio = gaps_dmamap_load_uio;
96 1.1 thorpej t->_dmamap_load_raw = gaps_dmamap_load_raw;
97 1.1 thorpej t->_dmamap_unload = gaps_dmamap_unload;
98 1.1 thorpej t->_dmamap_sync = gaps_dmamap_sync;
99 1.1 thorpej
100 1.1 thorpej t->_dmamem_alloc = gaps_dmamem_alloc;
101 1.1 thorpej t->_dmamem_free = gaps_dmamem_free;
102 1.1 thorpej t->_dmamem_map = gaps_dmamem_map;
103 1.1 thorpej t->_dmamem_unmap = gaps_dmamem_unmap;
104 1.1 thorpej t->_dmamem_mmap = gaps_dmamem_mmap;
105 1.1 thorpej
106 1.1 thorpej /*
107 1.1 thorpej * The GAPS PCI bridge has 32k of DMA memory. We manage it
108 1.21 thorpej * with a vmem arena.
109 1.1 thorpej */
110 1.21 thorpej sc->sc_dma_arena = vmem_create("gaps dma",
111 1.21 thorpej sc->sc_dmabase,
112 1.21 thorpej sc->sc_dmasize,
113 1.21 thorpej 1024 /* XXX */, /* quantum */
114 1.21 thorpej NULL, /* allocfn */
115 1.21 thorpej NULL, /* freefn */
116 1.21 thorpej NULL, /* arg */
117 1.21 thorpej 0, /* qcache_max */
118 1.21 thorpej VM_SLEEP,
119 1.21 thorpej IPL_VM);
120 1.1 thorpej
121 1.2 marcus if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
122 1.1 thorpej 0, &sc->sc_dma_memh) != 0)
123 1.1 thorpej panic("gaps_dma_init: can't map SRAM buffer");
124 1.1 thorpej }
125 1.1 thorpej
126 1.1 thorpej /*
127 1.1 thorpej * A GAPS DMA map -- has the standard DMA map, plus some extra
128 1.1 thorpej * housekeeping data.
129 1.1 thorpej */
130 1.1 thorpej struct gaps_dmamap {
131 1.1 thorpej struct dreamcast_bus_dmamap gd_dmamap;
132 1.1 thorpej void *gd_origbuf;
133 1.1 thorpej int gd_buftype;
134 1.1 thorpej };
135 1.1 thorpej
136 1.1 thorpej #define GAPS_DMA_BUFTYPE_INVALID 0
137 1.1 thorpej #define GAPS_DMA_BUFTYPE_LINEAR 1
138 1.1 thorpej #define GAPS_DMA_BUFTYPE_MBUF 2
139 1.1 thorpej
140 1.1 thorpej int
141 1.1 thorpej gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
142 1.1 thorpej bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
143 1.1 thorpej {
144 1.1 thorpej struct gaps_softc *sc = t->_cookie;
145 1.1 thorpej struct gaps_dmamap *gmap;
146 1.1 thorpej bus_dmamap_t map;
147 1.1 thorpej
148 1.1 thorpej /*
149 1.1 thorpej * Allocate an initialize the DMA map. The end of the map is
150 1.1 thorpej * a variable-sized array of segments, so we allocate enough
151 1.1 thorpej * room for them in one shot. Since the DMA map always includes
152 1.1 thorpej * one segment, and we only support one segment, this is really
153 1.1 thorpej * easy.
154 1.1 thorpej *
155 1.1 thorpej * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
156 1.1 thorpej * of ALLOCNOW notifies others that we've reserved these resources
157 1.1 thorpej * and they are not to be freed.
158 1.1 thorpej */
159 1.1 thorpej
160 1.1 thorpej gmap = malloc(sizeof(*gmap), M_DMAMAP,
161 1.1 thorpej (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
162 1.1 thorpej if (gmap == NULL)
163 1.9 tsutsui return ENOMEM;
164 1.1 thorpej
165 1.1 thorpej memset(gmap, 0, sizeof(*gmap));
166 1.1 thorpej
167 1.1 thorpej gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
168 1.1 thorpej
169 1.1 thorpej map = &gmap->gd_dmamap;
170 1.1 thorpej
171 1.1 thorpej map->_dm_size = size;
172 1.1 thorpej map->_dm_segcnt = 1;
173 1.10 matt map->_dm_maxmaxsegsz = maxsegsz;
174 1.1 thorpej map->_dm_boundary = boundary;
175 1.1 thorpej map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
176 1.10 matt map->dm_maxsegsz = maxsegsz;
177 1.1 thorpej
178 1.1 thorpej if (flags & BUS_DMA_ALLOCNOW) {
179 1.21 thorpej vmem_addr_t res;
180 1.1 thorpej int error;
181 1.1 thorpej
182 1.21 thorpej const vm_flag_t vmflags = VM_BESTFIT |
183 1.21 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
184 1.21 thorpej
185 1.21 thorpej error = vmem_xalloc(sc->sc_dma_arena, size,
186 1.21 thorpej 0, /* alignment */
187 1.21 thorpej 0, /* phase */
188 1.21 thorpej 0, /* nocross */
189 1.21 thorpej VMEM_ADDR_MIN, /* minaddr */
190 1.21 thorpej VMEM_ADDR_MAX, /* maxaddr */
191 1.21 thorpej vmflags,
192 1.21 thorpej &res);
193 1.1 thorpej if (error) {
194 1.1 thorpej free(gmap, M_DEVBUF);
195 1.9 tsutsui return error;
196 1.1 thorpej }
197 1.1 thorpej
198 1.1 thorpej map->dm_segs[0].ds_addr = res;
199 1.1 thorpej map->dm_segs[0].ds_len = size;
200 1.13 tsutsui
201 1.1 thorpej map->dm_mapsize = size;
202 1.1 thorpej map->dm_nsegs = 1;
203 1.1 thorpej } else {
204 1.1 thorpej map->dm_mapsize = 0; /* no valid mappings */
205 1.1 thorpej map->dm_nsegs = 0;
206 1.1 thorpej }
207 1.1 thorpej
208 1.1 thorpej *dmamap = map;
209 1.1 thorpej
210 1.9 tsutsui return 0;
211 1.1 thorpej }
212 1.1 thorpej
213 1.1 thorpej void
214 1.1 thorpej gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
215 1.1 thorpej {
216 1.1 thorpej struct gaps_softc *sc = t->_cookie;
217 1.1 thorpej
218 1.1 thorpej if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
219 1.21 thorpej vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
220 1.21 thorpej map->dm_mapsize);
221 1.1 thorpej }
222 1.1 thorpej free(map, M_DMAMAP);
223 1.1 thorpej }
224 1.1 thorpej
225 1.1 thorpej int
226 1.1 thorpej gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
227 1.1 thorpej bus_size_t size, struct proc *p, int flags)
228 1.1 thorpej {
229 1.1 thorpej struct gaps_softc *sc = t->_cookie;
230 1.1 thorpej struct gaps_dmamap *gmap = (void *) map;
231 1.21 thorpej vmem_addr_t res;
232 1.1 thorpej int error;
233 1.1 thorpej
234 1.1 thorpej if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
235 1.1 thorpej /*
236 1.1 thorpej * Make sure that on error condition we return
237 1.1 thorpej * "no valid mappings".
238 1.1 thorpej */
239 1.1 thorpej map->dm_mapsize = 0;
240 1.1 thorpej map->dm_nsegs = 0;
241 1.10 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
242 1.1 thorpej }
243 1.1 thorpej
244 1.1 thorpej /* XXX Don't support DMA to process space right now. */
245 1.1 thorpej if (p != NULL)
246 1.9 tsutsui return EINVAL;
247 1.1 thorpej
248 1.1 thorpej if (size > map->_dm_size)
249 1.9 tsutsui return EINVAL;
250 1.1 thorpej
251 1.21 thorpej const vm_flag_t vmflags = VM_BESTFIT |
252 1.21 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
253 1.21 thorpej
254 1.21 thorpej error = vmem_xalloc(sc->sc_dma_arena, size,
255 1.21 thorpej 0, /* alignment */
256 1.21 thorpej 0, /* phase */
257 1.21 thorpej map->_dm_boundary, /* nocross */
258 1.21 thorpej VMEM_ADDR_MIN, /* minaddr */
259 1.21 thorpej VMEM_ADDR_MAX, /* maxaddr */
260 1.21 thorpej vmflags,
261 1.21 thorpej &res);
262 1.1 thorpej if (error)
263 1.9 tsutsui return error;
264 1.1 thorpej
265 1.1 thorpej map->dm_segs[0].ds_addr = res;
266 1.1 thorpej map->dm_segs[0].ds_len = size;
267 1.1 thorpej
268 1.1 thorpej gmap->gd_origbuf = addr;
269 1.1 thorpej gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
270 1.1 thorpej
271 1.1 thorpej map->dm_mapsize = size;
272 1.1 thorpej map->dm_nsegs = 1;
273 1.1 thorpej
274 1.9 tsutsui return 0;
275 1.1 thorpej }
276 1.1 thorpej
277 1.1 thorpej int
278 1.1 thorpej gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
279 1.1 thorpej int flags)
280 1.1 thorpej {
281 1.1 thorpej struct gaps_softc *sc = t->_cookie;
282 1.1 thorpej struct gaps_dmamap *gmap = (void *) map;
283 1.21 thorpej vmem_addr_t res;
284 1.1 thorpej int error;
285 1.1 thorpej
286 1.1 thorpej if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
287 1.1 thorpej /*
288 1.1 thorpej * Make sure that on error condition we return
289 1.1 thorpej * "no valid mappings".
290 1.1 thorpej */
291 1.1 thorpej map->dm_mapsize = 0;
292 1.1 thorpej map->dm_nsegs = 0;
293 1.10 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
294 1.1 thorpej }
295 1.1 thorpej
296 1.1 thorpej #ifdef DIAGNOSTIC
297 1.1 thorpej if ((m0->m_flags & M_PKTHDR) == 0)
298 1.1 thorpej panic("gaps_dmamap_load_mbuf: no packet header");
299 1.1 thorpej #endif
300 1.1 thorpej
301 1.1 thorpej if (m0->m_pkthdr.len > map->_dm_size)
302 1.9 tsutsui return EINVAL;
303 1.1 thorpej
304 1.21 thorpej const vm_flag_t vmflags = VM_BESTFIT |
305 1.21 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
306 1.21 thorpej
307 1.21 thorpej error = vmem_xalloc(sc->sc_dma_arena, m0->m_pkthdr.len,
308 1.21 thorpej 0, /* alignment */
309 1.21 thorpej 0, /* phase */
310 1.21 thorpej map->_dm_boundary, /* nocross */
311 1.21 thorpej VMEM_ADDR_MIN, /* minaddr */
312 1.21 thorpej VMEM_ADDR_MAX, /* maxaddr */
313 1.21 thorpej vmflags,
314 1.21 thorpej &res);
315 1.1 thorpej if (error)
316 1.9 tsutsui return error;
317 1.1 thorpej
318 1.1 thorpej map->dm_segs[0].ds_addr = res;
319 1.1 thorpej map->dm_segs[0].ds_len = m0->m_pkthdr.len;
320 1.1 thorpej
321 1.1 thorpej gmap->gd_origbuf = m0;
322 1.1 thorpej gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
323 1.1 thorpej
324 1.1 thorpej map->dm_mapsize = m0->m_pkthdr.len;
325 1.1 thorpej map->dm_nsegs = 1;
326 1.1 thorpej
327 1.9 tsutsui return 0;
328 1.1 thorpej }
329 1.1 thorpej
330 1.1 thorpej int
331 1.1 thorpej gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
332 1.1 thorpej int flags)
333 1.1 thorpej {
334 1.1 thorpej
335 1.1 thorpej printf("gaps_dmamap_load_uio: not implemented\n");
336 1.9 tsutsui return EINVAL;
337 1.1 thorpej }
338 1.1 thorpej
339 1.1 thorpej int
340 1.1 thorpej gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
341 1.1 thorpej bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
342 1.1 thorpej {
343 1.1 thorpej
344 1.1 thorpej printf("gaps_dmamap_load_raw: not implemented\n");
345 1.9 tsutsui return EINVAL;
346 1.1 thorpej }
347 1.1 thorpej
348 1.1 thorpej void
349 1.1 thorpej gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
350 1.1 thorpej {
351 1.1 thorpej struct gaps_softc *sc = t->_cookie;
352 1.1 thorpej struct gaps_dmamap *gmap = (void *) map;
353 1.1 thorpej
354 1.1 thorpej if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
355 1.1 thorpej printf("gaps_dmamap_unload: DMA map not loaded!\n");
356 1.1 thorpej return;
357 1.1 thorpej }
358 1.1 thorpej
359 1.1 thorpej if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
360 1.21 thorpej vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
361 1.21 thorpej map->dm_mapsize);
362 1.1 thorpej
363 1.10 matt map->dm_maxsegsz = map->_dm_maxmaxsegsz;
364 1.1 thorpej map->dm_mapsize = 0;
365 1.1 thorpej map->dm_nsegs = 0;
366 1.1 thorpej }
367 1.1 thorpej
368 1.1 thorpej gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
369 1.1 thorpej }
370 1.1 thorpej
371 1.1 thorpej void
372 1.1 thorpej gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
373 1.1 thorpej bus_size_t len, int ops)
374 1.1 thorpej {
375 1.1 thorpej struct gaps_softc *sc = t->_cookie;
376 1.1 thorpej struct gaps_dmamap *gmap = (void *) map;
377 1.1 thorpej bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
378 1.1 thorpej
379 1.1 thorpej /*
380 1.1 thorpej * Mixing PRE and POST operations is not allowed.
381 1.1 thorpej */
382 1.1 thorpej if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
383 1.1 thorpej (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
384 1.1 thorpej panic("gaps_dmamap_sync: mix PRE and POST");
385 1.1 thorpej
386 1.1 thorpej #ifdef DIAGNOSTIC
387 1.1 thorpej if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
388 1.3 thorpej if (offset >= map->dm_mapsize) {
389 1.3 thorpej printf("offset 0x%lx mapsize 0x%lx\n",
390 1.3 thorpej offset, map->dm_mapsize);
391 1.1 thorpej panic("gaps_dmamap_sync: bad offset");
392 1.3 thorpej }
393 1.3 thorpej if (len == 0 || (offset + len) > map->dm_mapsize) {
394 1.3 thorpej printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
395 1.3 thorpej len, offset, map->dm_mapsize);
396 1.1 thorpej panic("gaps_dmamap_sync: bad length");
397 1.3 thorpej }
398 1.1 thorpej }
399 1.1 thorpej #endif
400 1.1 thorpej
401 1.1 thorpej switch (gmap->gd_buftype) {
402 1.1 thorpej case GAPS_DMA_BUFTYPE_INVALID:
403 1.1 thorpej printf("gaps_dmamap_sync: DMA map is not loaded!\n");
404 1.1 thorpej return;
405 1.1 thorpej
406 1.1 thorpej case GAPS_DMA_BUFTYPE_LINEAR:
407 1.1 thorpej /*
408 1.1 thorpej * Nothing to do for pre-read.
409 1.1 thorpej */
410 1.1 thorpej
411 1.1 thorpej if (ops & BUS_DMASYNC_PREWRITE) {
412 1.1 thorpej /*
413 1.1 thorpej * Copy the caller's buffer to the SRAM buffer.
414 1.1 thorpej */
415 1.1 thorpej bus_space_write_region_1(sc->sc_memt,
416 1.1 thorpej sc->sc_dma_memh,
417 1.1 thorpej dmaoff + offset,
418 1.9 tsutsui (uint8_t *)gmap->gd_origbuf + offset, len);
419 1.1 thorpej }
420 1.1 thorpej
421 1.1 thorpej if (ops & BUS_DMASYNC_POSTREAD) {
422 1.1 thorpej /*
423 1.1 thorpej * Copy the SRAM buffer to the caller's buffer.
424 1.1 thorpej */
425 1.1 thorpej bus_space_read_region_1(sc->sc_memt,
426 1.1 thorpej sc->sc_dma_memh,
427 1.1 thorpej dmaoff + offset,
428 1.9 tsutsui (uint8_t *)gmap->gd_origbuf + offset, len);
429 1.1 thorpej }
430 1.1 thorpej
431 1.1 thorpej /*
432 1.1 thorpej * Nothing to do for post-write.
433 1.1 thorpej */
434 1.1 thorpej break;
435 1.1 thorpej
436 1.1 thorpej case GAPS_DMA_BUFTYPE_MBUF:
437 1.1 thorpej {
438 1.1 thorpej struct mbuf *m, *m0 = gmap->gd_origbuf;
439 1.1 thorpej bus_size_t minlen, moff;
440 1.1 thorpej
441 1.1 thorpej /*
442 1.1 thorpej * Nothing to do for pre-read.
443 1.1 thorpej */
444 1.1 thorpej
445 1.1 thorpej if (ops & BUS_DMASYNC_PREWRITE) {
446 1.1 thorpej /*
447 1.1 thorpej * Copy the caller's buffer into the SRAM buffer.
448 1.1 thorpej */
449 1.1 thorpej for (moff = offset, m = m0; m != NULL && len != 0;
450 1.1 thorpej m = m->m_next) {
451 1.1 thorpej /* Find the beginning mbuf. */
452 1.1 thorpej if (moff >= m->m_len) {
453 1.1 thorpej moff -= m->m_len;
454 1.1 thorpej continue;
455 1.1 thorpej }
456 1.1 thorpej
457 1.1 thorpej /*
458 1.1 thorpej * Now at the first mbuf to sync; nail
459 1.1 thorpej * each one until we have exhausted the
460 1.1 thorpej * length.
461 1.1 thorpej */
462 1.1 thorpej minlen = len < m->m_len - moff ?
463 1.1 thorpej len : m->m_len - moff;
464 1.1 thorpej
465 1.1 thorpej bus_space_write_region_1(sc->sc_memt,
466 1.1 thorpej sc->sc_dma_memh, dmaoff + offset,
467 1.9 tsutsui mtod(m, uint8_t *) + moff, minlen);
468 1.1 thorpej
469 1.1 thorpej moff = 0;
470 1.1 thorpej len -= minlen;
471 1.1 thorpej offset += minlen;
472 1.1 thorpej }
473 1.1 thorpej }
474 1.1 thorpej
475 1.1 thorpej if (ops & BUS_DMASYNC_POSTREAD) {
476 1.1 thorpej /*
477 1.1 thorpej * Copy the SRAM buffer into the caller's buffer.
478 1.1 thorpej */
479 1.1 thorpej for (moff = offset, m = m0; m != NULL && len != 0;
480 1.1 thorpej m = m->m_next) {
481 1.1 thorpej /* Find the beginning mbuf. */
482 1.1 thorpej if (moff >= m->m_len) {
483 1.1 thorpej moff -= m->m_len;
484 1.1 thorpej continue;
485 1.1 thorpej }
486 1.1 thorpej
487 1.1 thorpej /*
488 1.1 thorpej * Now at the first mbuf to sync; nail
489 1.1 thorpej * each one until we have exhausted the
490 1.1 thorpej * length.
491 1.1 thorpej */
492 1.1 thorpej minlen = len < m->m_len - moff ?
493 1.1 thorpej len : m->m_len - moff;
494 1.1 thorpej
495 1.1 thorpej bus_space_read_region_1(sc->sc_memt,
496 1.1 thorpej sc->sc_dma_memh, dmaoff + offset,
497 1.9 tsutsui mtod(m, uint8_t *) + moff, minlen);
498 1.1 thorpej
499 1.1 thorpej moff = 0;
500 1.1 thorpej len -= minlen;
501 1.1 thorpej offset += minlen;
502 1.1 thorpej }
503 1.1 thorpej }
504 1.1 thorpej
505 1.1 thorpej /*
506 1.1 thorpej * Nothing to do for post-write.
507 1.1 thorpej */
508 1.1 thorpej break;
509 1.1 thorpej }
510 1.1 thorpej
511 1.1 thorpej default:
512 1.1 thorpej printf("unknown buffer type %d\n", gmap->gd_buftype);
513 1.1 thorpej panic("gaps_dmamap_sync");
514 1.1 thorpej }
515 1.1 thorpej }
516 1.1 thorpej
517 1.1 thorpej int
518 1.1 thorpej gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
519 1.1 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
520 1.1 thorpej int flags)
521 1.1 thorpej {
522 1.1 thorpej extern paddr_t avail_start, avail_end; /* from pmap.c */
523 1.1 thorpej
524 1.1 thorpej struct pglist mlist;
525 1.1 thorpej paddr_t curaddr, lastaddr;
526 1.5 chs struct vm_page *m;
527 1.1 thorpej int curseg, error;
528 1.1 thorpej
529 1.1 thorpej /* Always round the size. */
530 1.1 thorpej size = round_page(size);
531 1.1 thorpej
532 1.1 thorpej /*
533 1.1 thorpej * Allocate the pages from the VM system.
534 1.1 thorpej */
535 1.1 thorpej error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
536 1.1 thorpej alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
537 1.1 thorpej if (error)
538 1.9 tsutsui return error;
539 1.1 thorpej
540 1.1 thorpej /*
541 1.1 thorpej * Compute the location, size, and number of segments actually
542 1.1 thorpej * returned by the VM code.
543 1.1 thorpej */
544 1.1 thorpej m = mlist.tqh_first;
545 1.1 thorpej curseg = 0;
546 1.1 thorpej lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
547 1.1 thorpej segs[curseg].ds_len = PAGE_SIZE;
548 1.16 ad m = TAILQ_NEXT(m, pageq.queue);
549 1.1 thorpej
550 1.16 ad for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
551 1.1 thorpej curaddr = VM_PAGE_TO_PHYS(m);
552 1.1 thorpej if (curaddr == (lastaddr + PAGE_SIZE))
553 1.1 thorpej segs[curseg].ds_len += PAGE_SIZE;
554 1.1 thorpej else {
555 1.1 thorpej curseg++;
556 1.1 thorpej segs[curseg].ds_addr = curaddr;
557 1.1 thorpej segs[curseg].ds_len = PAGE_SIZE;
558 1.1 thorpej }
559 1.1 thorpej lastaddr = curaddr;
560 1.1 thorpej }
561 1.1 thorpej
562 1.1 thorpej *rsegs = curseg + 1;
563 1.1 thorpej
564 1.9 tsutsui return 0;
565 1.1 thorpej }
566 1.1 thorpej
567 1.1 thorpej void
568 1.1 thorpej gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
569 1.1 thorpej {
570 1.1 thorpej struct pglist mlist;
571 1.5 chs struct vm_page *m;
572 1.1 thorpej bus_addr_t addr;
573 1.1 thorpej int curseg;
574 1.1 thorpej
575 1.1 thorpej /*
576 1.1 thorpej * Build a list of pages to free back to the VM system.
577 1.1 thorpej */
578 1.1 thorpej TAILQ_INIT(&mlist);
579 1.1 thorpej for (curseg = 0; curseg < nsegs; curseg++) {
580 1.1 thorpej for (addr = segs[curseg].ds_addr;
581 1.1 thorpej addr < segs[curseg].ds_addr + segs[curseg].ds_len;
582 1.1 thorpej addr += PAGE_SIZE) {
583 1.1 thorpej m = PHYS_TO_VM_PAGE(addr);
584 1.16 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
585 1.1 thorpej }
586 1.1 thorpej }
587 1.1 thorpej
588 1.1 thorpej uvm_pglistfree(&mlist);
589 1.1 thorpej }
590 1.1 thorpej
591 1.1 thorpej int
592 1.1 thorpej gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
593 1.14 christos size_t size, void **kvap, int flags)
594 1.1 thorpej {
595 1.1 thorpej vaddr_t va;
596 1.1 thorpej bus_addr_t addr;
597 1.1 thorpej int curseg;
598 1.12 yamt const uvm_flag_t kmflags =
599 1.12 yamt (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
600 1.1 thorpej
601 1.1 thorpej /*
602 1.1 thorpej * If we're only mapping 1 segment, use P2SEG, to avoid
603 1.1 thorpej * TLB thrashing.
604 1.1 thorpej */
605 1.1 thorpej if (nsegs == 1) {
606 1.14 christos *kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
607 1.9 tsutsui return 0;
608 1.1 thorpej }
609 1.1 thorpej
610 1.1 thorpej size = round_page(size);
611 1.1 thorpej
612 1.12 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
613 1.1 thorpej
614 1.1 thorpej if (va == 0)
615 1.9 tsutsui return ENOMEM;
616 1.1 thorpej
617 1.14 christos *kvap = (void *)va;
618 1.1 thorpej
619 1.1 thorpej for (curseg = 0; curseg < nsegs; curseg++) {
620 1.1 thorpej for (addr = segs[curseg].ds_addr;
621 1.1 thorpej addr < segs[curseg].ds_addr + segs[curseg].ds_len;
622 1.1 thorpej addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
623 1.1 thorpej if (size == 0)
624 1.1 thorpej panic("gaps_dmamem_map: size botch");
625 1.1 thorpej pmap_kenter_pa(va, addr,
626 1.17 cegger VM_PROT_READ | VM_PROT_WRITE, 0);
627 1.1 thorpej }
628 1.1 thorpej }
629 1.6 chris pmap_update(pmap_kernel());
630 1.1 thorpej
631 1.9 tsutsui return 0;
632 1.1 thorpej }
633 1.1 thorpej
634 1.1 thorpej void
635 1.14 christos gaps_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
636 1.1 thorpej {
637 1.1 thorpej
638 1.1 thorpej #ifdef DIAGNOSTIC
639 1.1 thorpej if ((u_long) kva & PAGE_MASK)
640 1.1 thorpej panic("gaps_dmamem_unmap");
641 1.1 thorpej #endif
642 1.13 tsutsui
643 1.1 thorpej /*
644 1.1 thorpej * Nothing to do if we mapped it with P2SEG.
645 1.1 thorpej */
646 1.14 christos if (kva >= (void *)SH3_P2SEG_BASE &&
647 1.14 christos kva <= (void *)SH3_P2SEG_END)
648 1.1 thorpej return;
649 1.1 thorpej
650 1.1 thorpej size = round_page(size);
651 1.1 thorpej pmap_kremove((vaddr_t) kva, size);
652 1.6 chris pmap_update(pmap_kernel());
653 1.11 yamt uvm_km_free(kernel_map, (vaddr_t) kva, size, UVM_KMF_VAONLY);
654 1.1 thorpej }
655 1.1 thorpej
656 1.1 thorpej paddr_t
657 1.1 thorpej gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
658 1.1 thorpej off_t off, int prot, int flags)
659 1.1 thorpej {
660 1.1 thorpej
661 1.1 thorpej /* Not implemented. */
662 1.9 tsutsui return -1;
663 1.1 thorpej }
664