bus_dma.c revision 1.128 1 1.128 jmcneill /* $NetBSD: bus_dma.c,v 1.128 2020/12/20 10:34:33 jmcneill Exp $ */
2 1.1 chris
3 1.1 chris /*-
4 1.121 ad * Copyright (c) 1996, 1997, 1998, 2020 The NetBSD Foundation, Inc.
5 1.1 chris * All rights reserved.
6 1.1 chris *
7 1.1 chris * This code is derived from software contributed to The NetBSD Foundation
8 1.1 chris * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 chris * NASA Ames Research Center.
10 1.1 chris *
11 1.1 chris * Redistribution and use in source and binary forms, with or without
12 1.1 chris * modification, are permitted provided that the following conditions
13 1.1 chris * are met:
14 1.1 chris * 1. Redistributions of source code must retain the above copyright
15 1.1 chris * notice, this list of conditions and the following disclaimer.
16 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 chris * notice, this list of conditions and the following disclaimer in the
18 1.1 chris * documentation and/or other materials provided with the distribution.
19 1.1 chris *
20 1.1 chris * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 chris * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 chris * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 chris * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 chris * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 chris * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 chris * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 chris * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 chris * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 chris * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 chris * POSSIBILITY OF SUCH DAMAGE.
31 1.1 chris */
32 1.33 lukem
33 1.35 rearnsha #define _ARM32_BUS_DMA_PRIVATE
34 1.35 rearnsha
35 1.81 matt #include "opt_arm_bus_space.h"
36 1.107 ryo #include "opt_cputypes.h"
37 1.81 matt
38 1.33 lukem #include <sys/cdefs.h>
39 1.128 jmcneill __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.128 2020/12/20 10:34:33 jmcneill Exp $");
40 1.1 chris
41 1.1 chris #include <sys/param.h>
42 1.122 skrll
43 1.84 matt #include <sys/bus.h>
44 1.84 matt #include <sys/cpu.h>
45 1.81 matt #include <sys/kmem.h>
46 1.1 chris #include <sys/mbuf.h>
47 1.1 chris
48 1.53 uebayasi #include <uvm/uvm.h>
49 1.1 chris
50 1.107 ryo #include <arm/cpuconf.h>
51 1.84 matt #include <arm/cpufunc.h>
52 1.4 thorpej
53 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
54 1.84 matt #include <dev/mm.h>
55 1.84 matt #endif
56 1.1 chris
57 1.76 matt #ifdef BUSDMA_COUNTERS
58 1.58 matt static struct evcnt bus_dma_creates =
59 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
60 1.58 matt static struct evcnt bus_dma_bounced_creates =
61 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
62 1.58 matt static struct evcnt bus_dma_loads =
63 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
64 1.58 matt static struct evcnt bus_dma_bounced_loads =
65 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
66 1.81 matt static struct evcnt bus_dma_coherent_loads =
67 1.81 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads");
68 1.58 matt static struct evcnt bus_dma_read_bounces =
69 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
70 1.58 matt static struct evcnt bus_dma_write_bounces =
71 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
72 1.58 matt static struct evcnt bus_dma_bounced_unloads =
73 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
74 1.58 matt static struct evcnt bus_dma_unloads =
75 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
76 1.58 matt static struct evcnt bus_dma_bounced_destroys =
77 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
78 1.58 matt static struct evcnt bus_dma_destroys =
79 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
80 1.95 skrll static struct evcnt bus_dma_sync_prereadwrite =
81 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
82 1.76 matt static struct evcnt bus_dma_sync_preread_begin =
83 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
84 1.76 matt static struct evcnt bus_dma_sync_preread =
85 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
86 1.76 matt static struct evcnt bus_dma_sync_preread_tail =
87 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
88 1.95 skrll static struct evcnt bus_dma_sync_prewrite =
89 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
90 1.95 skrll static struct evcnt bus_dma_sync_postread =
91 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
92 1.95 skrll static struct evcnt bus_dma_sync_postreadwrite =
93 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
94 1.95 skrll static struct evcnt bus_dma_sync_postwrite =
95 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
96 1.58 matt
97 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_creates);
98 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
99 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_loads);
100 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
101 1.81 matt EVCNT_ATTACH_STATIC(bus_dma_coherent_loads);
102 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
103 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
104 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_unloads);
105 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
106 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_destroys);
107 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
108 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
109 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
110 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
111 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
112 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
113 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
114 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
115 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
116 1.58 matt
117 1.58 matt #define STAT_INCR(x) (bus_dma_ ## x.ev_count++)
118 1.76 matt #else
119 1.107 ryo #define STAT_INCR(x) __nothing
120 1.76 matt #endif
121 1.58 matt
122 1.7 thorpej int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
123 1.48 yamt bus_size_t, struct vmspace *, int);
124 1.1 chris
125 1.1 chris /*
126 1.19 briggs * Check to see if the specified page is in an allowed DMA range.
127 1.19 briggs */
128 1.105 skrll static inline struct arm32_dma_range *
129 1.59 matt _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges,
130 1.19 briggs bus_addr_t curaddr)
131 1.19 briggs {
132 1.19 briggs struct arm32_dma_range *dr;
133 1.19 briggs int i;
134 1.19 briggs
135 1.19 briggs for (i = 0, dr = ranges; i < nranges; i++, dr++) {
136 1.19 briggs if (curaddr >= dr->dr_sysbase &&
137 1.82 skrll curaddr < (dr->dr_sysbase + dr->dr_len))
138 1.100 skrll return dr;
139 1.19 briggs }
140 1.19 briggs
141 1.100 skrll return NULL;
142 1.19 briggs }
143 1.19 briggs
144 1.19 briggs /*
145 1.59 matt * Check to see if the specified busaddr is in an allowed DMA range.
146 1.59 matt */
147 1.59 matt static inline paddr_t
148 1.59 matt _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr)
149 1.59 matt {
150 1.59 matt struct arm32_dma_range *dr;
151 1.59 matt u_int i;
152 1.59 matt
153 1.59 matt if (t->_nranges == 0)
154 1.59 matt return curaddr;
155 1.59 matt
156 1.59 matt for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) {
157 1.59 matt if (dr->dr_busbase <= curaddr
158 1.82 skrll && curaddr < dr->dr_busbase + dr->dr_len)
159 1.59 matt return curaddr - dr->dr_busbase + dr->dr_sysbase;
160 1.59 matt }
161 1.59 matt panic("%s: curaddr %#lx not in range", __func__, curaddr);
162 1.59 matt }
163 1.59 matt
164 1.59 matt /*
165 1.41 thorpej * Common function to load the specified physical address into the
166 1.41 thorpej * DMA map, coalescing segments and boundary checking as necessary.
167 1.41 thorpej */
168 1.41 thorpej static int
169 1.41 thorpej _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
170 1.61 matt bus_addr_t paddr, bus_size_t size, bool coherent)
171 1.41 thorpej {
172 1.41 thorpej bus_dma_segment_t * const segs = map->dm_segs;
173 1.41 thorpej int nseg = map->dm_nsegs;
174 1.58 matt bus_addr_t lastaddr;
175 1.41 thorpej bus_addr_t bmask = ~(map->_dm_boundary - 1);
176 1.41 thorpej bus_addr_t curaddr;
177 1.41 thorpej bus_size_t sgsize;
178 1.61 matt uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0;
179 1.41 thorpej
180 1.41 thorpej if (nseg > 0)
181 1.101 skrll lastaddr = segs[nseg - 1].ds_addr + segs[nseg - 1].ds_len;
182 1.58 matt else
183 1.58 matt lastaddr = 0xdead;
184 1.95 skrll
185 1.41 thorpej again:
186 1.41 thorpej sgsize = size;
187 1.41 thorpej
188 1.41 thorpej /* Make sure we're in an allowed DMA range. */
189 1.41 thorpej if (t->_ranges != NULL) {
190 1.41 thorpej /* XXX cache last result? */
191 1.41 thorpej const struct arm32_dma_range * const dr =
192 1.59 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr);
193 1.41 thorpej if (dr == NULL)
194 1.100 skrll return EINVAL;
195 1.61 matt
196 1.61 matt /*
197 1.61 matt * If this region is coherent, mark the segment as coherent.
198 1.61 matt */
199 1.61 matt _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT;
200 1.72 skrll
201 1.41 thorpej /*
202 1.41 thorpej * In a valid DMA range. Translate the physical
203 1.41 thorpej * memory address to an address in the DMA window.
204 1.41 thorpej */
205 1.41 thorpej curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
206 1.72 skrll #if 0
207 1.72 skrll printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n",
208 1.72 skrll t, paddr, dr->dr_sysbase, dr->dr_busbase,
209 1.72 skrll dr->dr_len, dr->dr_flags, _ds_flags, curaddr);
210 1.72 skrll #endif
211 1.41 thorpej } else
212 1.41 thorpej curaddr = paddr;
213 1.41 thorpej
214 1.41 thorpej /*
215 1.41 thorpej * Make sure we don't cross any boundaries.
216 1.41 thorpej */
217 1.41 thorpej if (map->_dm_boundary > 0) {
218 1.41 thorpej bus_addr_t baddr; /* next boundary address */
219 1.41 thorpej
220 1.41 thorpej baddr = (curaddr + map->_dm_boundary) & bmask;
221 1.41 thorpej if (sgsize > (baddr - curaddr))
222 1.41 thorpej sgsize = (baddr - curaddr);
223 1.41 thorpej }
224 1.41 thorpej
225 1.41 thorpej /*
226 1.41 thorpej * Insert chunk into a segment, coalescing with the
227 1.41 thorpej * previous segment if possible.
228 1.41 thorpej */
229 1.41 thorpej if (nseg > 0 && curaddr == lastaddr &&
230 1.101 skrll segs[nseg - 1].ds_len + sgsize <= map->dm_maxsegsz &&
231 1.101 skrll ((segs[nseg - 1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 &&
232 1.41 thorpej (map->_dm_boundary == 0 ||
233 1.101 skrll (segs[nseg - 1].ds_addr & bmask) == (curaddr & bmask))) {
234 1.41 thorpej /* coalesce */
235 1.101 skrll segs[nseg - 1].ds_len += sgsize;
236 1.41 thorpej } else if (nseg >= map->_dm_segcnt) {
237 1.100 skrll return EFBIG;
238 1.41 thorpej } else {
239 1.41 thorpej /* new segment */
240 1.41 thorpej segs[nseg].ds_addr = curaddr;
241 1.41 thorpej segs[nseg].ds_len = sgsize;
242 1.61 matt segs[nseg]._ds_flags = _ds_flags;
243 1.41 thorpej nseg++;
244 1.41 thorpej }
245 1.41 thorpej
246 1.41 thorpej lastaddr = curaddr + sgsize;
247 1.41 thorpej
248 1.41 thorpej paddr += sgsize;
249 1.41 thorpej size -= sgsize;
250 1.41 thorpej if (size > 0)
251 1.41 thorpej goto again;
252 1.61 matt
253 1.61 matt map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT);
254 1.41 thorpej map->dm_nsegs = nseg;
255 1.100 skrll return 0;
256 1.41 thorpej }
257 1.41 thorpej
258 1.115 skrll static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
259 1.115 skrll int direction);
260 1.115 skrll
261 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
262 1.58 matt static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
263 1.58 matt bus_size_t size, int flags);
264 1.58 matt static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
265 1.58 matt
266 1.58 matt static int
267 1.58 matt _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
268 1.58 matt size_t buflen, int buftype, int flags)
269 1.58 matt {
270 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
271 1.58 matt struct vmspace * const vm = vmspace_kernel();
272 1.58 matt int error;
273 1.58 matt
274 1.58 matt KASSERT(cookie != NULL);
275 1.58 matt KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
276 1.58 matt
277 1.58 matt /*
278 1.58 matt * Allocate bounce pages, if necessary.
279 1.58 matt */
280 1.58 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
281 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
282 1.58 matt if (error)
283 1.100 skrll return error;
284 1.58 matt }
285 1.58 matt
286 1.58 matt /*
287 1.58 matt * Cache a pointer to the caller's buffer and load the DMA map
288 1.58 matt * with the bounce buffer.
289 1.58 matt */
290 1.58 matt cookie->id_origbuf = buf;
291 1.58 matt cookie->id_origbuflen = buflen;
292 1.58 matt error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
293 1.58 matt buflen, vm, flags);
294 1.58 matt if (error)
295 1.100 skrll return error;
296 1.58 matt
297 1.58 matt STAT_INCR(bounced_loads);
298 1.58 matt map->dm_mapsize = buflen;
299 1.58 matt map->_dm_vmspace = vm;
300 1.58 matt map->_dm_buftype = buftype;
301 1.58 matt
302 1.58 matt /* ...so _bus_dmamap_sync() knows we're bouncing */
303 1.63 matt map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING;
304 1.58 matt cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
305 1.58 matt return 0;
306 1.58 matt }
307 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
308 1.58 matt
309 1.41 thorpej /*
310 1.1 chris * Common function for DMA map creation. May be called by bus-specific
311 1.1 chris * DMA map creation functions.
312 1.1 chris */
313 1.1 chris int
314 1.7 thorpej _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
315 1.7 thorpej bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
316 1.1 chris {
317 1.1 chris struct arm32_bus_dmamap *map;
318 1.1 chris void *mapstore;
319 1.120 skrll int error = 0;
320 1.1 chris
321 1.1 chris #ifdef DEBUG_DMA
322 1.103 skrll printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx"
323 1.103 skrll " flags=%x\n", t, size, nsegments, maxsegsz, boundary, flags);
324 1.1 chris #endif /* DEBUG_DMA */
325 1.1 chris
326 1.1 chris /*
327 1.1 chris * Allocate and initialize the DMA map. The end of the map
328 1.1 chris * is a variable-sized array of segments, so we allocate enough
329 1.1 chris * room for them in one shot.
330 1.1 chris *
331 1.1 chris * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
332 1.1 chris * of ALLOCNOW notifies others that we've reserved these resources,
333 1.1 chris * and they are not to be freed.
334 1.1 chris *
335 1.1 chris * The bus_dmamap_t includes one bus_dma_segment_t, hence
336 1.1 chris * the (nsegments - 1).
337 1.1 chris */
338 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
339 1.1 chris (sizeof(bus_dma_segment_t) * (nsegments - 1));
340 1.81 matt const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
341 1.81 matt if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL)
342 1.100 skrll return ENOMEM;
343 1.1 chris
344 1.1 chris map = (struct arm32_bus_dmamap *)mapstore;
345 1.1 chris map->_dm_size = size;
346 1.1 chris map->_dm_segcnt = nsegments;
347 1.43 matt map->_dm_maxmaxsegsz = maxsegsz;
348 1.1 chris map->_dm_boundary = boundary;
349 1.1 chris map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
350 1.14 thorpej map->_dm_origbuf = NULL;
351 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
352 1.48 yamt map->_dm_vmspace = vmspace_kernel();
353 1.58 matt map->_dm_cookie = NULL;
354 1.43 matt map->dm_maxsegsz = maxsegsz;
355 1.1 chris map->dm_mapsize = 0; /* no valid mappings */
356 1.1 chris map->dm_nsegs = 0;
357 1.1 chris
358 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
359 1.58 matt struct arm32_bus_dma_cookie *cookie;
360 1.58 matt int cookieflags;
361 1.58 matt void *cookiestore;
362 1.58 matt
363 1.58 matt cookieflags = 0;
364 1.58 matt
365 1.58 matt if (t->_may_bounce != NULL) {
366 1.58 matt error = (*t->_may_bounce)(t, map, flags, &cookieflags);
367 1.58 matt if (error != 0)
368 1.58 matt goto out;
369 1.58 matt }
370 1.58 matt
371 1.127 jmcneill if (t->_ranges != NULL) {
372 1.127 jmcneill /*
373 1.127 jmcneill * If ranges are defined, we may have to bounce. The only
374 1.127 jmcneill * exception is if there is exactly one range that covers
375 1.127 jmcneill * all of physical memory.
376 1.127 jmcneill */
377 1.127 jmcneill switch (t->_nranges) {
378 1.127 jmcneill case 1:
379 1.127 jmcneill if (t->_ranges[0].dr_sysbase == 0 &&
380 1.127 jmcneill t->_ranges[0].dr_len == UINTPTR_MAX) {
381 1.127 jmcneill break;
382 1.127 jmcneill }
383 1.127 jmcneill /* FALLTHROUGH */
384 1.127 jmcneill default:
385 1.127 jmcneill cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
386 1.127 jmcneill }
387 1.127 jmcneill }
388 1.58 matt
389 1.58 matt if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
390 1.58 matt STAT_INCR(creates);
391 1.98 msaitoh *dmamp = map;
392 1.58 matt return 0;
393 1.58 matt }
394 1.58 matt
395 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
396 1.58 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
397 1.58 matt
398 1.58 matt /*
399 1.58 matt * Allocate our cookie.
400 1.58 matt */
401 1.81 matt if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) {
402 1.58 matt error = ENOMEM;
403 1.58 matt goto out;
404 1.58 matt }
405 1.58 matt cookie = (struct arm32_bus_dma_cookie *)cookiestore;
406 1.58 matt cookie->id_flags = cookieflags;
407 1.58 matt map->_dm_cookie = cookie;
408 1.58 matt STAT_INCR(bounced_creates);
409 1.58 matt
410 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
411 1.58 matt out:
412 1.58 matt if (error)
413 1.58 matt _bus_dmamap_destroy(t, map);
414 1.98 msaitoh else
415 1.98 msaitoh *dmamp = map;
416 1.58 matt #else
417 1.98 msaitoh *dmamp = map;
418 1.58 matt STAT_INCR(creates);
419 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
420 1.1 chris #ifdef DEBUG_DMA
421 1.1 chris printf("dmamap_create:map=%p\n", map);
422 1.1 chris #endif /* DEBUG_DMA */
423 1.119 maya return error;
424 1.1 chris }
425 1.1 chris
426 1.1 chris /*
427 1.1 chris * Common function for DMA map destruction. May be called by bus-specific
428 1.1 chris * DMA map destruction functions.
429 1.1 chris */
430 1.1 chris void
431 1.7 thorpej _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
432 1.1 chris {
433 1.1 chris
434 1.1 chris #ifdef DEBUG_DMA
435 1.1 chris printf("dmamap_destroy: t=%p map=%p\n", t, map);
436 1.1 chris #endif /* DEBUG_DMA */
437 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
438 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
439 1.13 briggs
440 1.13 briggs /*
441 1.58 matt * Free any bounce pages this map might hold.
442 1.13 briggs */
443 1.58 matt if (cookie != NULL) {
444 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
445 1.81 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
446 1.81 matt
447 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
448 1.58 matt STAT_INCR(bounced_unloads);
449 1.58 matt map->dm_nsegs = 0;
450 1.58 matt if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
451 1.58 matt _bus_dma_free_bouncebuf(t, map);
452 1.58 matt STAT_INCR(bounced_destroys);
453 1.81 matt kmem_intr_free(cookie, cookiesize);
454 1.58 matt } else
455 1.58 matt #endif
456 1.58 matt STAT_INCR(destroys);
457 1.58 matt
458 1.58 matt if (map->dm_nsegs > 0)
459 1.58 matt STAT_INCR(unloads);
460 1.13 briggs
461 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
462 1.81 matt (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
463 1.81 matt kmem_intr_free(map, mapsize);
464 1.1 chris }
465 1.1 chris
466 1.1 chris /*
467 1.1 chris * Common function for loading a DMA map with a linear buffer. May
468 1.1 chris * be called by bus-specific DMA map load functions.
469 1.1 chris */
470 1.1 chris int
471 1.7 thorpej _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
472 1.7 thorpej bus_size_t buflen, struct proc *p, int flags)
473 1.1 chris {
474 1.58 matt struct vmspace *vm;
475 1.41 thorpej int error;
476 1.1 chris
477 1.1 chris #ifdef DEBUG_DMA
478 1.1 chris printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
479 1.1 chris t, map, buf, buflen, p, flags);
480 1.1 chris #endif /* DEBUG_DMA */
481 1.1 chris
482 1.58 matt if (map->dm_nsegs > 0) {
483 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
484 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
485 1.58 matt if (cookie != NULL) {
486 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
487 1.58 matt STAT_INCR(bounced_unloads);
488 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
489 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
490 1.58 matt }
491 1.58 matt } else
492 1.58 matt #endif
493 1.58 matt STAT_INCR(unloads);
494 1.58 matt }
495 1.58 matt
496 1.1 chris /*
497 1.1 chris * Make sure that on error condition we return "no valid mappings".
498 1.1 chris */
499 1.1 chris map->dm_mapsize = 0;
500 1.1 chris map->dm_nsegs = 0;
501 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
502 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
503 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
504 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
505 1.1 chris
506 1.1 chris if (buflen > map->_dm_size)
507 1.100 skrll return EINVAL;
508 1.1 chris
509 1.48 yamt if (p != NULL) {
510 1.48 yamt vm = p->p_vmspace;
511 1.48 yamt } else {
512 1.48 yamt vm = vmspace_kernel();
513 1.48 yamt }
514 1.48 yamt
515 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
516 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
517 1.17 thorpej
518 1.48 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
519 1.1 chris if (error == 0) {
520 1.1 chris map->dm_mapsize = buflen;
521 1.58 matt map->_dm_vmspace = vm;
522 1.14 thorpej map->_dm_origbuf = buf;
523 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR;
524 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
525 1.81 matt STAT_INCR(coherent_loads);
526 1.81 matt } else {
527 1.81 matt STAT_INCR(loads);
528 1.81 matt }
529 1.58 matt return 0;
530 1.1 chris }
531 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
532 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
533 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
534 1.58 matt error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
535 1.58 matt _BUS_DMA_BUFTYPE_LINEAR, flags);
536 1.95 skrll }
537 1.95 skrll #endif
538 1.100 skrll return error;
539 1.1 chris }
540 1.1 chris
541 1.1 chris /*
542 1.1 chris * Like _bus_dmamap_load(), but for mbufs.
543 1.1 chris */
544 1.1 chris int
545 1.7 thorpej _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
546 1.7 thorpej int flags)
547 1.1 chris {
548 1.105 skrll struct mbuf *m;
549 1.41 thorpej int error;
550 1.1 chris
551 1.1 chris #ifdef DEBUG_DMA
552 1.1 chris printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
553 1.1 chris t, map, m0, flags);
554 1.1 chris #endif /* DEBUG_DMA */
555 1.1 chris
556 1.58 matt if (map->dm_nsegs > 0) {
557 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
558 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
559 1.58 matt if (cookie != NULL) {
560 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
561 1.58 matt STAT_INCR(bounced_unloads);
562 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
563 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
564 1.58 matt }
565 1.58 matt } else
566 1.58 matt #endif
567 1.58 matt STAT_INCR(unloads);
568 1.58 matt }
569 1.58 matt
570 1.1 chris /*
571 1.1 chris * Make sure that on error condition we return "no valid mappings."
572 1.1 chris */
573 1.1 chris map->dm_mapsize = 0;
574 1.1 chris map->dm_nsegs = 0;
575 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
576 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
577 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
578 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
579 1.1 chris
580 1.79 matt KASSERT(m0->m_flags & M_PKTHDR);
581 1.1 chris
582 1.1 chris if (m0->m_pkthdr.len > map->_dm_size)
583 1.100 skrll return EINVAL;
584 1.1 chris
585 1.61 matt /* _bus_dmamap_load_paddr() clears this if we're not... */
586 1.61 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
587 1.17 thorpej
588 1.1 chris error = 0;
589 1.1 chris for (m = m0; m != NULL && error == 0; m = m->m_next) {
590 1.41 thorpej int offset;
591 1.41 thorpej int remainbytes;
592 1.41 thorpej const struct vm_page * const *pgs;
593 1.41 thorpej paddr_t paddr;
594 1.41 thorpej int size;
595 1.41 thorpej
596 1.28 thorpej if (m->m_len == 0)
597 1.28 thorpej continue;
598 1.57 matt /*
599 1.57 matt * Don't allow reads in read-only mbufs.
600 1.57 matt */
601 1.57 matt if (M_ROMAP(m) && (flags & BUS_DMA_READ)) {
602 1.57 matt error = EFAULT;
603 1.57 matt break;
604 1.57 matt }
605 1.108 maxv switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
606 1.108 maxv case M_EXT|M_EXT_CLUSTER:
607 1.28 thorpej /* XXX KDASSERT */
608 1.28 thorpej KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
609 1.41 thorpej paddr = m->m_ext.ext_paddr +
610 1.28 thorpej (m->m_data - m->m_ext.ext_buf);
611 1.41 thorpej size = m->m_len;
612 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
613 1.61 matt false);
614 1.41 thorpej break;
615 1.95 skrll
616 1.41 thorpej case M_EXT|M_EXT_PAGES:
617 1.41 thorpej KASSERT(m->m_ext.ext_buf <= m->m_data);
618 1.41 thorpej KASSERT(m->m_data <=
619 1.41 thorpej m->m_ext.ext_buf + m->m_ext.ext_size);
620 1.95 skrll
621 1.41 thorpej offset = (vaddr_t)m->m_data -
622 1.41 thorpej trunc_page((vaddr_t)m->m_ext.ext_buf);
623 1.41 thorpej remainbytes = m->m_len;
624 1.41 thorpej
625 1.41 thorpej /* skip uninteresting pages */
626 1.41 thorpej pgs = (const struct vm_page * const *)
627 1.41 thorpej m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
628 1.95 skrll
629 1.41 thorpej offset &= PAGE_MASK; /* offset in the first page */
630 1.41 thorpej
631 1.41 thorpej /* load each page */
632 1.41 thorpej while (remainbytes > 0) {
633 1.41 thorpej const struct vm_page *pg;
634 1.41 thorpej
635 1.41 thorpej size = MIN(remainbytes, PAGE_SIZE - offset);
636 1.41 thorpej
637 1.41 thorpej pg = *pgs++;
638 1.41 thorpej KASSERT(pg);
639 1.41 thorpej paddr = VM_PAGE_TO_PHYS(pg) + offset;
640 1.41 thorpej
641 1.41 thorpej error = _bus_dmamap_load_paddr(t, map,
642 1.61 matt paddr, size, false);
643 1.41 thorpej if (error)
644 1.28 thorpej break;
645 1.41 thorpej offset = 0;
646 1.41 thorpej remainbytes -= size;
647 1.28 thorpej }
648 1.28 thorpej break;
649 1.28 thorpej
650 1.28 thorpej case 0:
651 1.41 thorpej paddr = m->m_paddr + M_BUFOFFSET(m) +
652 1.28 thorpej (m->m_data - M_BUFADDR(m));
653 1.41 thorpej size = m->m_len;
654 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
655 1.61 matt false);
656 1.41 thorpej break;
657 1.28 thorpej
658 1.28 thorpej default:
659 1.28 thorpej error = _bus_dmamap_load_buffer(t, map, m->m_data,
660 1.48 yamt m->m_len, vmspace_kernel(), flags);
661 1.28 thorpej }
662 1.1 chris }
663 1.1 chris if (error == 0) {
664 1.1 chris map->dm_mapsize = m0->m_pkthdr.len;
665 1.14 thorpej map->_dm_origbuf = m0;
666 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF;
667 1.48 yamt map->_dm_vmspace = vmspace_kernel(); /* always kernel */
668 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
669 1.81 matt STAT_INCR(coherent_loads);
670 1.81 matt } else {
671 1.81 matt STAT_INCR(loads);
672 1.81 matt }
673 1.58 matt return 0;
674 1.1 chris }
675 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
676 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
677 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
678 1.58 matt error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
679 1.58 matt _BUS_DMA_BUFTYPE_MBUF, flags);
680 1.95 skrll }
681 1.95 skrll #endif
682 1.100 skrll return error;
683 1.1 chris }
684 1.1 chris
685 1.1 chris /*
686 1.1 chris * Like _bus_dmamap_load(), but for uios.
687 1.1 chris */
688 1.1 chris int
689 1.7 thorpej _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
690 1.7 thorpej int flags)
691 1.1 chris {
692 1.1 chris bus_size_t minlen, resid;
693 1.1 chris struct iovec *iov;
694 1.50 christos void *addr;
695 1.105 skrll int i, error;
696 1.1 chris
697 1.1 chris /*
698 1.1 chris * Make sure that on error condition we return "no valid mappings."
699 1.1 chris */
700 1.1 chris map->dm_mapsize = 0;
701 1.1 chris map->dm_nsegs = 0;
702 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
703 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
704 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
705 1.1 chris
706 1.1 chris resid = uio->uio_resid;
707 1.1 chris iov = uio->uio_iov;
708 1.1 chris
709 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
710 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
711 1.17 thorpej
712 1.1 chris error = 0;
713 1.1 chris for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
714 1.1 chris /*
715 1.1 chris * Now at the first iovec to load. Load each iovec
716 1.1 chris * until we have exhausted the residual count.
717 1.1 chris */
718 1.1 chris minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
719 1.50 christos addr = (void *)iov[i].iov_base;
720 1.1 chris
721 1.1 chris error = _bus_dmamap_load_buffer(t, map, addr, minlen,
722 1.48 yamt uio->uio_vmspace, flags);
723 1.1 chris
724 1.1 chris resid -= minlen;
725 1.1 chris }
726 1.1 chris if (error == 0) {
727 1.1 chris map->dm_mapsize = uio->uio_resid;
728 1.14 thorpej map->_dm_origbuf = uio;
729 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO;
730 1.48 yamt map->_dm_vmspace = uio->uio_vmspace;
731 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
732 1.81 matt STAT_INCR(coherent_loads);
733 1.81 matt } else {
734 1.81 matt STAT_INCR(loads);
735 1.81 matt }
736 1.1 chris }
737 1.100 skrll return error;
738 1.1 chris }
739 1.1 chris
740 1.1 chris /*
741 1.1 chris * Like _bus_dmamap_load(), but for raw memory allocated with
742 1.1 chris * bus_dmamem_alloc().
743 1.1 chris */
744 1.1 chris int
745 1.7 thorpej _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
746 1.94 jmcneill bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
747 1.1 chris {
748 1.1 chris
749 1.94 jmcneill bus_size_t size;
750 1.94 jmcneill int i, error = 0;
751 1.94 jmcneill
752 1.94 jmcneill /*
753 1.94 jmcneill * Make sure that on error conditions we return "no valid mappings."
754 1.94 jmcneill */
755 1.94 jmcneill map->dm_mapsize = 0;
756 1.94 jmcneill map->dm_nsegs = 0;
757 1.94 jmcneill KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
758 1.94 jmcneill
759 1.94 jmcneill if (size0 > map->_dm_size)
760 1.94 jmcneill return EINVAL;
761 1.94 jmcneill
762 1.94 jmcneill for (i = 0, size = size0; i < nsegs && size > 0; i++) {
763 1.94 jmcneill bus_dma_segment_t *ds = &segs[i];
764 1.94 jmcneill bus_size_t sgsize;
765 1.94 jmcneill
766 1.94 jmcneill sgsize = MIN(ds->ds_len, size);
767 1.94 jmcneill if (sgsize == 0)
768 1.94 jmcneill continue;
769 1.116 jmcneill const bool coherent =
770 1.116 jmcneill (ds->_ds_flags & _BUS_DMAMAP_COHERENT) != 0;
771 1.94 jmcneill error = _bus_dmamap_load_paddr(t, map, ds->ds_addr,
772 1.116 jmcneill sgsize, coherent);
773 1.94 jmcneill if (error != 0)
774 1.94 jmcneill break;
775 1.94 jmcneill size -= sgsize;
776 1.94 jmcneill }
777 1.94 jmcneill
778 1.94 jmcneill if (error != 0) {
779 1.94 jmcneill map->dm_mapsize = 0;
780 1.94 jmcneill map->dm_nsegs = 0;
781 1.94 jmcneill return error;
782 1.94 jmcneill }
783 1.94 jmcneill
784 1.94 jmcneill /* XXX TBD bounce */
785 1.94 jmcneill
786 1.94 jmcneill map->dm_mapsize = size0;
787 1.116 jmcneill map->_dm_origbuf = NULL;
788 1.116 jmcneill map->_dm_buftype = _BUS_DMA_BUFTYPE_RAW;
789 1.116 jmcneill map->_dm_vmspace = NULL;
790 1.94 jmcneill return 0;
791 1.1 chris }
792 1.1 chris
793 1.1 chris /*
794 1.1 chris * Common function for unloading a DMA map. May be called by
795 1.1 chris * bus-specific DMA map unload functions.
796 1.1 chris */
797 1.1 chris void
798 1.7 thorpej _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
799 1.1 chris {
800 1.1 chris
801 1.1 chris #ifdef DEBUG_DMA
802 1.1 chris printf("dmamap_unload: t=%p map=%p\n", t, map);
803 1.1 chris #endif /* DEBUG_DMA */
804 1.1 chris
805 1.1 chris /*
806 1.1 chris * No resources to free; just mark the mappings as
807 1.1 chris * invalid.
808 1.1 chris */
809 1.1 chris map->dm_mapsize = 0;
810 1.1 chris map->dm_nsegs = 0;
811 1.14 thorpej map->_dm_origbuf = NULL;
812 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
813 1.48 yamt map->_dm_vmspace = NULL;
814 1.1 chris }
815 1.1 chris
816 1.57 matt static void
817 1.103 skrll _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops,
818 1.103 skrll bool readonly_p)
819 1.14 thorpej {
820 1.106 skrll
821 1.115 skrll #if defined(ARM_MMU_EXTENDED)
822 1.106 skrll /*
823 1.106 skrll * No optimisations are available for readonly mbufs on armv6+, so
824 1.106 skrll * assume it's not readonly from here on.
825 1.106 skrll *
826 1.106 skrll * See the comment in _bus_dmamap_sync_mbuf
827 1.106 skrll */
828 1.106 skrll readonly_p = false;
829 1.106 skrll #endif
830 1.106 skrll
831 1.86 matt KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK),
832 1.86 matt "va %#lx pa %#lx", va, pa);
833 1.62 matt #if 0
834 1.62 matt printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
835 1.62 matt va, pa, len, ops, readonly_p);
836 1.62 matt #endif
837 1.14 thorpej
838 1.14 thorpej switch (ops) {
839 1.14 thorpej case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
840 1.57 matt if (!readonly_p) {
841 1.76 matt STAT_INCR(sync_prereadwrite);
842 1.57 matt cpu_dcache_wbinv_range(va, len);
843 1.57 matt cpu_sdcache_wbinv_range(va, pa, len);
844 1.57 matt break;
845 1.57 matt }
846 1.57 matt /* FALLTHROUGH */
847 1.14 thorpej
848 1.57 matt case BUS_DMASYNC_PREREAD: {
849 1.59 matt const size_t line_size = arm_dcache_align;
850 1.59 matt const size_t line_mask = arm_dcache_align_mask;
851 1.59 matt vsize_t misalignment = va & line_mask;
852 1.57 matt if (misalignment) {
853 1.59 matt va -= misalignment;
854 1.59 matt pa -= misalignment;
855 1.59 matt len += misalignment;
856 1.77 matt STAT_INCR(sync_preread_begin);
857 1.59 matt cpu_dcache_wbinv_range(va, line_size);
858 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
859 1.59 matt if (len <= line_size)
860 1.57 matt break;
861 1.59 matt va += line_size;
862 1.59 matt pa += line_size;
863 1.59 matt len -= line_size;
864 1.57 matt }
865 1.59 matt misalignment = len & line_mask;
866 1.57 matt len -= misalignment;
867 1.65 matt if (len > 0) {
868 1.77 matt STAT_INCR(sync_preread);
869 1.65 matt cpu_dcache_inv_range(va, len);
870 1.65 matt cpu_sdcache_inv_range(va, pa, len);
871 1.65 matt }
872 1.57 matt if (misalignment) {
873 1.57 matt va += len;
874 1.57 matt pa += len;
875 1.77 matt STAT_INCR(sync_preread_tail);
876 1.59 matt cpu_dcache_wbinv_range(va, line_size);
877 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
878 1.57 matt }
879 1.14 thorpej break;
880 1.57 matt }
881 1.14 thorpej
882 1.14 thorpej case BUS_DMASYNC_PREWRITE:
883 1.76 matt STAT_INCR(sync_prewrite);
884 1.57 matt cpu_dcache_wb_range(va, len);
885 1.57 matt cpu_sdcache_wb_range(va, pa, len);
886 1.14 thorpej break;
887 1.67 matt
888 1.115 skrll #if defined(CPU_CORTEX) || defined(CPU_ARMV8)
889 1.115 skrll
890 1.67 matt /*
891 1.67 matt * Cortex CPUs can do speculative loads so we need to clean the cache
892 1.67 matt * after a DMA read to deal with any speculatively loaded cache lines.
893 1.67 matt * Since these can't be dirty, we can just invalidate them and don't
894 1.67 matt * have to worry about having to write back their contents.
895 1.67 matt */
896 1.67 matt case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
897 1.76 matt STAT_INCR(sync_postreadwrite);
898 1.76 matt cpu_dcache_inv_range(va, len);
899 1.76 matt cpu_sdcache_inv_range(va, pa, len);
900 1.76 matt break;
901 1.126 skrll
902 1.67 matt case BUS_DMASYNC_POSTREAD:
903 1.76 matt STAT_INCR(sync_postread);
904 1.67 matt cpu_dcache_inv_range(va, len);
905 1.67 matt cpu_sdcache_inv_range(va, pa, len);
906 1.67 matt break;
907 1.67 matt #endif
908 1.14 thorpej }
909 1.14 thorpej }
910 1.14 thorpej
911 1.47 perry static inline void
912 1.57 matt _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
913 1.14 thorpej bus_size_t len, int ops)
914 1.14 thorpej {
915 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
916 1.57 matt vaddr_t va = (vaddr_t) map->_dm_origbuf;
917 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
918 1.63 matt if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) {
919 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
920 1.58 matt va = (vaddr_t) cookie->id_bouncebuf;
921 1.58 matt }
922 1.58 matt #endif
923 1.57 matt
924 1.57 matt while (len > 0) {
925 1.57 matt while (offset >= ds->ds_len) {
926 1.57 matt offset -= ds->ds_len;
927 1.57 matt va += ds->ds_len;
928 1.57 matt ds++;
929 1.57 matt }
930 1.57 matt
931 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset);
932 1.112 riastrad size_t seglen = uimin(len, ds->ds_len - offset);
933 1.57 matt
934 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
935 1.61 matt _bus_dmamap_sync_segment(va + offset, pa, seglen, ops,
936 1.67 matt false);
937 1.57 matt
938 1.57 matt offset += seglen;
939 1.57 matt len -= seglen;
940 1.57 matt }
941 1.57 matt }
942 1.57 matt
943 1.57 matt static inline void
944 1.57 matt _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset,
945 1.57 matt bus_size_t len, int ops)
946 1.57 matt {
947 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
948 1.57 matt struct mbuf *m = map->_dm_origbuf;
949 1.57 matt bus_size_t voff = offset;
950 1.57 matt bus_size_t ds_off = offset;
951 1.57 matt
952 1.57 matt while (len > 0) {
953 1.57 matt /* Find the current dma segment */
954 1.57 matt while (ds_off >= ds->ds_len) {
955 1.57 matt ds_off -= ds->ds_len;
956 1.57 matt ds++;
957 1.57 matt }
958 1.57 matt /* Find the current mbuf. */
959 1.57 matt while (voff >= m->m_len) {
960 1.57 matt voff -= m->m_len;
961 1.57 matt m = m->m_next;
962 1.14 thorpej }
963 1.14 thorpej
964 1.14 thorpej /*
965 1.14 thorpej * Now at the first mbuf to sync; nail each one until
966 1.14 thorpej * we have exhausted the length.
967 1.14 thorpej */
968 1.112 riastrad vsize_t seglen = uimin(len, uimin(m->m_len - voff, ds->ds_len - ds_off));
969 1.57 matt vaddr_t va = mtod(m, vaddr_t) + voff;
970 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
971 1.14 thorpej
972 1.28 thorpej /*
973 1.28 thorpej * We can save a lot of work here if we know the mapping
974 1.93 matt * is read-only at the MMU and we aren't using the armv6+
975 1.93 matt * MMU:
976 1.28 thorpej *
977 1.28 thorpej * If a mapping is read-only, no dirty cache blocks will
978 1.28 thorpej * exist for it. If a writable mapping was made read-only,
979 1.28 thorpej * we know any dirty cache lines for the range will have
980 1.28 thorpej * been cleaned for us already. Therefore, if the upper
981 1.28 thorpej * layer can tell us we have a read-only mapping, we can
982 1.28 thorpej * skip all cache cleaning.
983 1.28 thorpej *
984 1.28 thorpej * NOTE: This only works if we know the pmap cleans pages
985 1.28 thorpej * before making a read-write -> read-only transition. If
986 1.28 thorpej * this ever becomes non-true (e.g. Physically Indexed
987 1.28 thorpej * cache), this will have to be revisited.
988 1.28 thorpej */
989 1.14 thorpej
990 1.92 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) {
991 1.92 matt /*
992 1.92 matt * If we are doing preread (DMAing into the mbuf),
993 1.95 skrll * this mbuf better not be readonly,
994 1.92 matt */
995 1.92 matt KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m));
996 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops,
997 1.61 matt M_ROMAP(m));
998 1.92 matt }
999 1.57 matt voff += seglen;
1000 1.57 matt ds_off += seglen;
1001 1.57 matt len -= seglen;
1002 1.14 thorpej }
1003 1.14 thorpej }
1004 1.14 thorpej
1005 1.47 perry static inline void
1006 1.14 thorpej _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1007 1.14 thorpej bus_size_t len, int ops)
1008 1.14 thorpej {
1009 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
1010 1.14 thorpej struct uio *uio = map->_dm_origbuf;
1011 1.57 matt struct iovec *iov = uio->uio_iov;
1012 1.57 matt bus_size_t voff = offset;
1013 1.57 matt bus_size_t ds_off = offset;
1014 1.57 matt
1015 1.57 matt while (len > 0) {
1016 1.57 matt /* Find the current dma segment */
1017 1.57 matt while (ds_off >= ds->ds_len) {
1018 1.57 matt ds_off -= ds->ds_len;
1019 1.57 matt ds++;
1020 1.57 matt }
1021 1.14 thorpej
1022 1.57 matt /* Find the current iovec. */
1023 1.57 matt while (voff >= iov->iov_len) {
1024 1.57 matt voff -= iov->iov_len;
1025 1.57 matt iov++;
1026 1.14 thorpej }
1027 1.14 thorpej
1028 1.14 thorpej /*
1029 1.14 thorpej * Now at the first iovec to sync; nail each one until
1030 1.14 thorpej * we have exhausted the length.
1031 1.14 thorpej */
1032 1.112 riastrad vsize_t seglen = uimin(len, uimin(iov->iov_len - voff, ds->ds_len - ds_off));
1033 1.57 matt vaddr_t va = (vaddr_t) iov->iov_base + voff;
1034 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
1035 1.57 matt
1036 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
1037 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops, false);
1038 1.57 matt
1039 1.57 matt voff += seglen;
1040 1.57 matt ds_off += seglen;
1041 1.57 matt len -= seglen;
1042 1.14 thorpej }
1043 1.14 thorpej }
1044 1.14 thorpej
1045 1.1 chris /*
1046 1.1 chris * Common function for DMA map synchronization. May be called
1047 1.1 chris * by bus-specific DMA map synchronization functions.
1048 1.8 thorpej *
1049 1.8 thorpej * XXX Should have separate versions for write-through vs.
1050 1.8 thorpej * XXX write-back caches. We currently assume write-back
1051 1.8 thorpej * XXX here, which is not as efficient as it could be for
1052 1.8 thorpej * XXX the write-through case.
1053 1.1 chris */
1054 1.1 chris void
1055 1.7 thorpej _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1056 1.7 thorpej bus_size_t len, int ops)
1057 1.1 chris {
1058 1.1 chris #ifdef DEBUG_DMA
1059 1.1 chris printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
1060 1.1 chris t, map, offset, len, ops);
1061 1.1 chris #endif /* DEBUG_DMA */
1062 1.1 chris
1063 1.8 thorpej /*
1064 1.8 thorpej * Mixing of PRE and POST operations is not allowed.
1065 1.8 thorpej */
1066 1.8 thorpej if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
1067 1.8 thorpej (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
1068 1.126 skrll panic("%s: mix PRE and POST", __func__);
1069 1.8 thorpej
1070 1.79 matt KASSERTMSG(offset < map->dm_mapsize,
1071 1.79 matt "offset %lu mapsize %lu",
1072 1.79 matt offset, map->dm_mapsize);
1073 1.79 matt KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize,
1074 1.79 matt "len %lu offset %lu mapsize %lu",
1075 1.79 matt len, offset, map->dm_mapsize);
1076 1.8 thorpej
1077 1.8 thorpej /*
1078 1.8 thorpej * For a virtually-indexed write-back cache, we need
1079 1.8 thorpej * to do the following things:
1080 1.8 thorpej *
1081 1.8 thorpej * PREREAD -- Invalidate the D-cache. We do this
1082 1.8 thorpej * here in case a write-back is required by the back-end.
1083 1.8 thorpej *
1084 1.8 thorpej * PREWRITE -- Write-back the D-cache. Note that if
1085 1.8 thorpej * we are doing a PREREAD|PREWRITE, we can collapse
1086 1.8 thorpej * the whole thing into a single Wb-Inv.
1087 1.8 thorpej *
1088 1.67 matt * POSTREAD -- Re-invalidate the D-cache in case speculative
1089 1.67 matt * memory accesses caused cachelines to become valid with now
1090 1.67 matt * invalid data.
1091 1.8 thorpej *
1092 1.8 thorpej * POSTWRITE -- Nothing.
1093 1.8 thorpej */
1094 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1095 1.74 matt const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING);
1096 1.63 matt #else
1097 1.63 matt const bool bouncing = false;
1098 1.58 matt #endif
1099 1.8 thorpej
1100 1.58 matt const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1101 1.115 skrll #if defined(CPU_CORTEX) || defined(CPU_ARMV8)
1102 1.67 matt const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1103 1.67 matt #else
1104 1.67 matt const int post_ops = 0;
1105 1.67 matt #endif
1106 1.115 skrll if (pre_ops == 0 && post_ops == 0)
1107 1.115 skrll return;
1108 1.115 skrll
1109 1.115 skrll if (post_ops == BUS_DMASYNC_POSTWRITE) {
1110 1.115 skrll KASSERT(pre_ops == 0);
1111 1.115 skrll STAT_INCR(sync_postwrite);
1112 1.115 skrll return;
1113 1.61 matt }
1114 1.115 skrll
1115 1.74 matt KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
1116 1.74 matt "pre_ops %#x post_ops %#x", pre_ops, post_ops);
1117 1.115 skrll
1118 1.58 matt if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) {
1119 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1120 1.58 matt STAT_INCR(write_bounces);
1121 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1122 1.58 matt /*
1123 1.58 matt * Copy the caller's buffer to the bounce buffer.
1124 1.58 matt */
1125 1.58 matt switch (map->_dm_buftype) {
1126 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1127 1.58 matt memcpy(dataptr, cookie->id_origlinearbuf + offset, len);
1128 1.58 matt break;
1129 1.126 skrll
1130 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1131 1.58 matt m_copydata(cookie->id_origmbuf, offset, len, dataptr);
1132 1.58 matt break;
1133 1.126 skrll
1134 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1135 1.126 skrll _bus_dma_uiomove(dataptr, cookie->id_origuio, len,
1136 1.126 skrll UIO_WRITE);
1137 1.58 matt break;
1138 1.126 skrll
1139 1.58 matt #ifdef DIAGNOSTIC
1140 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1141 1.126 skrll panic("%s:(pre): _BUS_DMA_BUFTYPE_RAW", __func__);
1142 1.58 matt break;
1143 1.58 matt
1144 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1145 1.126 skrll panic("%s(pre): _BUS_DMA_BUFTYPE_INVALID", __func__);
1146 1.58 matt break;
1147 1.58 matt
1148 1.58 matt default:
1149 1.126 skrll panic("%s(pre): map %p: unknown buffer type %d\n",
1150 1.126 skrll __func__, map, map->_dm_buftype);
1151 1.58 matt break;
1152 1.58 matt #endif /* DIAGNOSTIC */
1153 1.58 matt }
1154 1.58 matt }
1155 1.58 matt
1156 1.115 skrll /* Skip cache frobbing if mapping was COHERENT */
1157 1.115 skrll if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
1158 1.125 skrll switch (ops) {
1159 1.125 skrll case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1160 1.125 skrll STAT_INCR(sync_prereadwrite);
1161 1.125 skrll break;
1162 1.125 skrll
1163 1.125 skrll case BUS_DMASYNC_PREREAD:
1164 1.125 skrll STAT_INCR(sync_preread);
1165 1.125 skrll break;
1166 1.125 skrll
1167 1.125 skrll case BUS_DMASYNC_PREWRITE:
1168 1.125 skrll STAT_INCR(sync_prewrite);
1169 1.125 skrll break;
1170 1.125 skrll
1171 1.125 skrll case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
1172 1.125 skrll STAT_INCR(sync_postreadwrite);
1173 1.125 skrll break;
1174 1.125 skrll
1175 1.125 skrll case BUS_DMASYNC_POSTREAD:
1176 1.125 skrll STAT_INCR(sync_postread);
1177 1.125 skrll break;
1178 1.125 skrll
1179 1.125 skrll /* BUS_DMASYNC_POSTWRITE was aleady handled as a fastpath */
1180 1.125 skrll }
1181 1.115 skrll /*
1182 1.115 skrll * Drain the write buffer of DMA operators.
1183 1.115 skrll * 1) when cpu->device (prewrite)
1184 1.115 skrll * 2) when device->cpu (postread)
1185 1.115 skrll */
1186 1.115 skrll if ((pre_ops & BUS_DMASYNC_PREWRITE) || (post_ops & BUS_DMASYNC_POSTREAD))
1187 1.75 matt cpu_drain_writebuf();
1188 1.115 skrll
1189 1.115 skrll /*
1190 1.115 skrll * Only thing left to do for COHERENT mapping is copy from bounce
1191 1.115 skrll * in the POSTREAD case.
1192 1.115 skrll */
1193 1.115 skrll if (bouncing && (post_ops & BUS_DMASYNC_POSTREAD))
1194 1.115 skrll goto bounce_it;
1195 1.115 skrll
1196 1.17 thorpej return;
1197 1.17 thorpej }
1198 1.8 thorpej
1199 1.128 jmcneill #if !defined(ARM_MMU_EXTENDED)
1200 1.8 thorpej /*
1201 1.38 scw * If the mapping belongs to a non-kernel vmspace, and the
1202 1.38 scw * vmspace has not been active since the last time a full
1203 1.38 scw * cache flush was performed, we don't need to do anything.
1204 1.8 thorpej */
1205 1.48 yamt if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
1206 1.48 yamt vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
1207 1.8 thorpej return;
1208 1.80 matt #endif
1209 1.8 thorpej
1210 1.58 matt int buftype = map->_dm_buftype;
1211 1.58 matt if (bouncing) {
1212 1.58 matt buftype = _BUS_DMA_BUFTYPE_LINEAR;
1213 1.58 matt }
1214 1.58 matt
1215 1.58 matt switch (buftype) {
1216 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1217 1.116 jmcneill case _BUS_DMA_BUFTYPE_RAW:
1218 1.14 thorpej _bus_dmamap_sync_linear(t, map, offset, len, ops);
1219 1.14 thorpej break;
1220 1.14 thorpej
1221 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1222 1.14 thorpej _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
1223 1.14 thorpej break;
1224 1.14 thorpej
1225 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1226 1.14 thorpej _bus_dmamap_sync_uio(t, map, offset, len, ops);
1227 1.14 thorpej break;
1228 1.14 thorpej
1229 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1230 1.126 skrll panic("%s: _BUS_DMA_BUFTYPE_INVALID", __func__);
1231 1.14 thorpej break;
1232 1.14 thorpej
1233 1.14 thorpej default:
1234 1.126 skrll panic("%s: map %p: unknown buffer type %d\n", __func__, map,
1235 1.126 skrll map->_dm_buftype);
1236 1.8 thorpej }
1237 1.1 chris
1238 1.8 thorpej /* Drain the write buffer. */
1239 1.8 thorpej cpu_drain_writebuf();
1240 1.58 matt
1241 1.76 matt if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
1242 1.58 matt return;
1243 1.58 matt
1244 1.115 skrll bounce_it:
1245 1.115 skrll STAT_INCR(read_bounces);
1246 1.115 skrll
1247 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1248 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1249 1.58 matt /*
1250 1.58 matt * Copy the bounce buffer to the caller's buffer.
1251 1.58 matt */
1252 1.58 matt switch (map->_dm_buftype) {
1253 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1254 1.58 matt memcpy(cookie->id_origlinearbuf + offset, dataptr, len);
1255 1.58 matt break;
1256 1.58 matt
1257 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1258 1.58 matt m_copyback(cookie->id_origmbuf, offset, len, dataptr);
1259 1.58 matt break;
1260 1.58 matt
1261 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1262 1.58 matt _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ);
1263 1.58 matt break;
1264 1.126 skrll
1265 1.58 matt #ifdef DIAGNOSTIC
1266 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1267 1.126 skrll panic("%s(post): _BUS_DMA_BUFTYPE_RAW", __func__);
1268 1.58 matt break;
1269 1.58 matt
1270 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1271 1.126 skrll panic("%s(post): _BUS_DMA_BUFTYPE_INVALID", __func__);
1272 1.58 matt break;
1273 1.58 matt
1274 1.58 matt default:
1275 1.126 skrll panic("%s(post): map %p: unknown buffer type %d\n", __func__,
1276 1.58 matt map, map->_dm_buftype);
1277 1.58 matt break;
1278 1.58 matt #endif
1279 1.58 matt }
1280 1.1 chris }
1281 1.1 chris
1282 1.1 chris /*
1283 1.1 chris * Common function for DMA-safe memory allocation. May be called
1284 1.1 chris * by bus-specific DMA memory allocation functions.
1285 1.1 chris */
1286 1.1 chris
1287 1.11 thorpej extern paddr_t physical_start;
1288 1.11 thorpej extern paddr_t physical_end;
1289 1.1 chris
1290 1.1 chris int
1291 1.7 thorpej _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1292 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1293 1.7 thorpej int flags)
1294 1.1 chris {
1295 1.15 thorpej struct arm32_dma_range *dr;
1296 1.37 mycroft int error, i;
1297 1.15 thorpej
1298 1.1 chris #ifdef DEBUG_DMA
1299 1.15 thorpej printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
1300 1.15 thorpej "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
1301 1.15 thorpej boundary, segs, nsegs, rsegs, flags);
1302 1.15 thorpej #endif
1303 1.15 thorpej
1304 1.15 thorpej if ((dr = t->_ranges) != NULL) {
1305 1.37 mycroft error = ENOMEM;
1306 1.15 thorpej for (i = 0; i < t->_nranges; i++, dr++) {
1307 1.70 matt if (dr->dr_len == 0
1308 1.70 matt || (dr->dr_flags & _BUS_DMAMAP_NOALLOC))
1309 1.15 thorpej continue;
1310 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment,
1311 1.15 thorpej boundary, segs, nsegs, rsegs, flags,
1312 1.15 thorpej trunc_page(dr->dr_sysbase),
1313 1.15 thorpej trunc_page(dr->dr_sysbase + dr->dr_len));
1314 1.15 thorpej if (error == 0)
1315 1.15 thorpej break;
1316 1.15 thorpej }
1317 1.15 thorpej } else {
1318 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
1319 1.15 thorpej segs, nsegs, rsegs, flags, trunc_page(physical_start),
1320 1.15 thorpej trunc_page(physical_end));
1321 1.15 thorpej }
1322 1.15 thorpej
1323 1.1 chris #ifdef DEBUG_DMA
1324 1.1 chris printf("dmamem_alloc: =%d\n", error);
1325 1.15 thorpej #endif
1326 1.15 thorpej
1327 1.100 skrll return error;
1328 1.1 chris }
1329 1.1 chris
1330 1.1 chris /*
1331 1.1 chris * Common function for freeing DMA-safe memory. May be called by
1332 1.1 chris * bus-specific DMA memory free functions.
1333 1.1 chris */
1334 1.1 chris void
1335 1.7 thorpej _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1336 1.1 chris {
1337 1.1 chris struct vm_page *m;
1338 1.1 chris bus_addr_t addr;
1339 1.1 chris struct pglist mlist;
1340 1.1 chris int curseg;
1341 1.1 chris
1342 1.1 chris #ifdef DEBUG_DMA
1343 1.1 chris printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
1344 1.1 chris #endif /* DEBUG_DMA */
1345 1.1 chris
1346 1.1 chris /*
1347 1.1 chris * Build a list of pages to free back to the VM system.
1348 1.1 chris */
1349 1.1 chris TAILQ_INIT(&mlist);
1350 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1351 1.1 chris for (addr = segs[curseg].ds_addr;
1352 1.1 chris addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1353 1.1 chris addr += PAGE_SIZE) {
1354 1.1 chris m = PHYS_TO_VM_PAGE(addr);
1355 1.52 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1356 1.1 chris }
1357 1.1 chris }
1358 1.1 chris uvm_pglistfree(&mlist);
1359 1.1 chris }
1360 1.1 chris
1361 1.1 chris /*
1362 1.1 chris * Common function for mapping DMA-safe memory. May be called by
1363 1.1 chris * bus-specific DMA memory map functions.
1364 1.1 chris */
1365 1.1 chris int
1366 1.7 thorpej _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1367 1.50 christos size_t size, void **kvap, int flags)
1368 1.1 chris {
1369 1.11 thorpej vaddr_t va;
1370 1.57 matt paddr_t pa;
1371 1.1 chris int curseg;
1372 1.65 matt const uvm_flag_t kmflags = UVM_KMF_VAONLY
1373 1.65 matt | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0);
1374 1.65 matt vsize_t align = 0;
1375 1.1 chris
1376 1.1 chris #ifdef DEBUG_DMA
1377 1.3 rearnsha printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
1378 1.3 rearnsha segs, nsegs, (unsigned long)size, flags);
1379 1.1 chris #endif /* DEBUG_DMA */
1380 1.1 chris
1381 1.62 matt #ifdef PMAP_MAP_POOLPAGE
1382 1.62 matt /*
1383 1.62 matt * If all of memory is mapped, and we are mapping a single physically
1384 1.62 matt * contiguous area then this area is already mapped. Let's see if we
1385 1.62 matt * avoid having a separate mapping for it.
1386 1.62 matt */
1387 1.118 jmcneill if (nsegs == 1 && (flags & BUS_DMA_PREFETCHABLE) == 0) {
1388 1.62 matt /*
1389 1.62 matt * If this is a non-COHERENT mapping, then the existing kernel
1390 1.62 matt * mapping is already compatible with it.
1391 1.62 matt */
1392 1.68 matt bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0;
1393 1.68 matt pa = segs[0].ds_addr;
1394 1.68 matt
1395 1.62 matt /*
1396 1.68 matt * This is a COHERENT mapping which, unless this address is in
1397 1.62 matt * a COHERENT dma range, will not be compatible.
1398 1.62 matt */
1399 1.62 matt if (t->_ranges != NULL) {
1400 1.62 matt const struct arm32_dma_range * const dr =
1401 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1402 1.71 matt if (dr != NULL
1403 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1404 1.71 matt direct_mapable = true;
1405 1.68 matt }
1406 1.68 matt }
1407 1.68 matt
1408 1.87 matt #ifdef PMAP_NEED_ALLOC_POOLPAGE
1409 1.87 matt /*
1410 1.87 matt * The page can only be direct mapped if was allocated out
1411 1.95 skrll * of the arm poolpage vm freelist.
1412 1.87 matt */
1413 1.97 cherry uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
1414 1.97 cherry KASSERT(uvm_physseg_valid_p(upm));
1415 1.87 matt if (direct_mapable) {
1416 1.87 matt direct_mapable =
1417 1.97 cherry (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
1418 1.87 matt }
1419 1.87 matt #endif
1420 1.87 matt
1421 1.68 matt if (direct_mapable) {
1422 1.68 matt *kvap = (void *)PMAP_MAP_POOLPAGE(pa);
1423 1.64 matt #ifdef DEBUG_DMA
1424 1.68 matt printf("dmamem_map: =%p\n", *kvap);
1425 1.64 matt #endif /* DEBUG_DMA */
1426 1.68 matt return 0;
1427 1.62 matt }
1428 1.62 matt }
1429 1.62 matt #endif
1430 1.62 matt
1431 1.1 chris size = round_page(size);
1432 1.107 ryo
1433 1.107 ryo #ifdef PMAP_MAPSIZE1
1434 1.107 ryo if (size >= PMAP_MAPSIZE1)
1435 1.107 ryo align = PMAP_MAPSIZE1;
1436 1.107 ryo
1437 1.107 ryo #ifdef PMAP_MAPSIZE2
1438 1.107 ryo
1439 1.107 ryo #if PMAP_MAPSIZE1 > PMAP_MAPSIZE2
1440 1.107 ryo #error PMAP_MAPSIZE1 must be smaller than PMAP_MAPSIZE2
1441 1.107 ryo #endif
1442 1.107 ryo
1443 1.107 ryo if (size >= PMAP_MAPSIZE2)
1444 1.107 ryo align = PMAP_MAPSIZE2;
1445 1.107 ryo
1446 1.107 ryo #ifdef PMAP_MAPSIZE3
1447 1.107 ryo
1448 1.107 ryo #if PMAP_MAPSIZE2 > PMAP_MAPSIZE3
1449 1.107 ryo #error PMAP_MAPSIZE2 must be smaller than PMAP_MAPSIZE3
1450 1.107 ryo #endif
1451 1.107 ryo
1452 1.107 ryo if (size >= PMAP_MAPSIZE3)
1453 1.107 ryo align = PMAP_MAPSIZE3;
1454 1.107 ryo #endif
1455 1.107 ryo #endif
1456 1.107 ryo #endif
1457 1.65 matt
1458 1.65 matt va = uvm_km_alloc(kernel_map, size, align, kmflags);
1459 1.65 matt if (__predict_false(va == 0 && align > 0)) {
1460 1.65 matt align = 0;
1461 1.65 matt va = uvm_km_alloc(kernel_map, size, 0, kmflags);
1462 1.65 matt }
1463 1.1 chris
1464 1.1 chris if (va == 0)
1465 1.100 skrll return ENOMEM;
1466 1.1 chris
1467 1.50 christos *kvap = (void *)va;
1468 1.1 chris
1469 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1470 1.57 matt for (pa = segs[curseg].ds_addr;
1471 1.57 matt pa < (segs[curseg].ds_addr + segs[curseg].ds_len);
1472 1.57 matt pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1473 1.68 matt bool uncached = (flags & BUS_DMA_COHERENT);
1474 1.117 jmcneill bool prefetchable = (flags & BUS_DMA_PREFETCHABLE);
1475 1.1 chris #ifdef DEBUG_DMA
1476 1.57 matt printf("wiring p%lx to v%lx", pa, va);
1477 1.1 chris #endif /* DEBUG_DMA */
1478 1.1 chris if (size == 0)
1479 1.1 chris panic("_bus_dmamem_map: size botch");
1480 1.68 matt
1481 1.68 matt const struct arm32_dma_range * const dr =
1482 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1483 1.68 matt /*
1484 1.68 matt * If this dma region is coherent then there is
1485 1.68 matt * no need for an uncached mapping.
1486 1.68 matt */
1487 1.71 matt if (dr != NULL
1488 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1489 1.71 matt uncached = false;
1490 1.68 matt }
1491 1.71 matt
1492 1.117 jmcneill u_int pmap_flags = PMAP_WIRED;
1493 1.117 jmcneill if (prefetchable)
1494 1.117 jmcneill pmap_flags |= PMAP_WRITE_COMBINE;
1495 1.117 jmcneill else if (uncached)
1496 1.117 jmcneill pmap_flags |= PMAP_NOCACHE;
1497 1.117 jmcneill
1498 1.81 matt pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE,
1499 1.117 jmcneill pmap_flags);
1500 1.1 chris }
1501 1.1 chris }
1502 1.2 chris pmap_update(pmap_kernel());
1503 1.1 chris #ifdef DEBUG_DMA
1504 1.1 chris printf("dmamem_map: =%p\n", *kvap);
1505 1.1 chris #endif /* DEBUG_DMA */
1506 1.100 skrll return 0;
1507 1.1 chris }
1508 1.1 chris
1509 1.1 chris /*
1510 1.1 chris * Common function for unmapping DMA-safe memory. May be called by
1511 1.1 chris * bus-specific DMA memory unmapping functions.
1512 1.1 chris */
1513 1.1 chris void
1514 1.50 christos _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1515 1.1 chris {
1516 1.1 chris
1517 1.1 chris #ifdef DEBUG_DMA
1518 1.65 matt printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size);
1519 1.1 chris #endif /* DEBUG_DMA */
1520 1.79 matt KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0,
1521 1.83 christos "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK));
1522 1.1 chris
1523 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1524 1.84 matt /*
1525 1.88 snj * Check to see if this used direct mapped memory. Get its physical
1526 1.84 matt * address and try to map it. If the resultant matches the kva, then
1527 1.99 skrll * it was and so we can just return since we have nothing to free up.
1528 1.84 matt */
1529 1.84 matt paddr_t pa;
1530 1.84 matt vaddr_t va;
1531 1.84 matt (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa);
1532 1.84 matt if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva)
1533 1.84 matt return;
1534 1.84 matt #endif
1535 1.84 matt
1536 1.1 chris size = round_page(size);
1537 1.65 matt pmap_kremove((vaddr_t)kva, size);
1538 1.44 yamt pmap_update(pmap_kernel());
1539 1.44 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1540 1.1 chris }
1541 1.1 chris
1542 1.1 chris /*
1543 1.1 chris * Common functin for mmap(2)'ing DMA-safe memory. May be called by
1544 1.1 chris * bus-specific DMA mmap(2)'ing functions.
1545 1.1 chris */
1546 1.1 chris paddr_t
1547 1.7 thorpej _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1548 1.7 thorpej off_t off, int prot, int flags)
1549 1.1 chris {
1550 1.73 macallan paddr_t map_flags;
1551 1.1 chris int i;
1552 1.1 chris
1553 1.1 chris for (i = 0; i < nsegs; i++) {
1554 1.79 matt KASSERTMSG((off & PAGE_MASK) == 0,
1555 1.111 christos "off %#jx (%#x)", (uintmax_t)off, (int)off & PAGE_MASK);
1556 1.79 matt KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0,
1557 1.79 matt "ds_addr %#lx (%#x)", segs[i].ds_addr,
1558 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1559 1.79 matt KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0,
1560 1.79 matt "ds_len %#lx (%#x)", segs[i].ds_addr,
1561 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1562 1.1 chris if (off >= segs[i].ds_len) {
1563 1.1 chris off -= segs[i].ds_len;
1564 1.1 chris continue;
1565 1.1 chris }
1566 1.1 chris
1567 1.73 macallan map_flags = 0;
1568 1.73 macallan if (flags & BUS_DMA_PREFETCHABLE)
1569 1.107 ryo map_flags |= ARM_MMAP_WRITECOMBINE;
1570 1.73 macallan
1571 1.100 skrll return arm_btop((u_long)segs[i].ds_addr + off) | map_flags;
1572 1.95 skrll
1573 1.1 chris }
1574 1.1 chris
1575 1.1 chris /* Page not found. */
1576 1.100 skrll return -1;
1577 1.1 chris }
1578 1.1 chris
1579 1.1 chris /**********************************************************************
1580 1.1 chris * DMA utility functions
1581 1.1 chris **********************************************************************/
1582 1.1 chris
1583 1.1 chris /*
1584 1.1 chris * Utility function to load a linear buffer. lastaddrp holds state
1585 1.1 chris * between invocations (for multiple-buffer loads). segp contains
1586 1.1 chris * the starting segment on entrace, and the ending segment on exit.
1587 1.1 chris * first indicates if this is the first invocation of this function.
1588 1.1 chris */
1589 1.1 chris int
1590 1.7 thorpej _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1591 1.48 yamt bus_size_t buflen, struct vmspace *vm, int flags)
1592 1.1 chris {
1593 1.1 chris bus_size_t sgsize;
1594 1.41 thorpej bus_addr_t curaddr;
1595 1.11 thorpej vaddr_t vaddr = (vaddr_t)buf;
1596 1.41 thorpej int error;
1597 1.1 chris pmap_t pmap;
1598 1.1 chris
1599 1.1 chris #ifdef DEBUG_DMA
1600 1.40 scw printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
1601 1.40 scw buf, buflen, flags);
1602 1.1 chris #endif /* DEBUG_DMA */
1603 1.1 chris
1604 1.48 yamt pmap = vm_map_pmap(&vm->vm_map);
1605 1.1 chris
1606 1.41 thorpej while (buflen > 0) {
1607 1.1 chris /*
1608 1.1 chris * Get the physical address for this segment.
1609 1.17 thorpej *
1610 1.1 chris */
1611 1.61 matt bool coherent;
1612 1.107 ryo pmap_extract_coherency(pmap, vaddr, &curaddr, &coherent);
1613 1.107 ryo
1614 1.86 matt KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK),
1615 1.86 matt "va %#lx curaddr %#lx", vaddr, curaddr);
1616 1.1 chris
1617 1.1 chris /*
1618 1.1 chris * Compute the segment size, and adjust counts.
1619 1.1 chris */
1620 1.27 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1621 1.1 chris if (buflen < sgsize)
1622 1.1 chris sgsize = buflen;
1623 1.1 chris
1624 1.61 matt error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize,
1625 1.61 matt coherent);
1626 1.41 thorpej if (error)
1627 1.100 skrll return error;
1628 1.1 chris
1629 1.1 chris vaddr += sgsize;
1630 1.1 chris buflen -= sgsize;
1631 1.1 chris }
1632 1.1 chris
1633 1.100 skrll return 0;
1634 1.1 chris }
1635 1.1 chris
1636 1.1 chris /*
1637 1.1 chris * Allocate physical memory from the given physical address range.
1638 1.1 chris * Called by DMA-safe memory allocation methods.
1639 1.1 chris */
1640 1.1 chris int
1641 1.7 thorpej _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1642 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1643 1.11 thorpej int flags, paddr_t low, paddr_t high)
1644 1.1 chris {
1645 1.11 thorpej paddr_t curaddr, lastaddr;
1646 1.1 chris struct vm_page *m;
1647 1.1 chris struct pglist mlist;
1648 1.1 chris int curseg, error;
1649 1.1 chris
1650 1.101 skrll KASSERTMSG(boundary == 0 || (boundary & (boundary - 1)) == 0,
1651 1.76 matt "invalid boundary %#lx", boundary);
1652 1.76 matt
1653 1.1 chris #ifdef DEBUG_DMA
1654 1.1 chris printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
1655 1.1 chris t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
1656 1.1 chris #endif /* DEBUG_DMA */
1657 1.1 chris
1658 1.1 chris /* Always round the size. */
1659 1.1 chris size = round_page(size);
1660 1.1 chris
1661 1.1 chris /*
1662 1.76 matt * We accept boundaries < size, splitting in multiple segments
1663 1.76 matt * if needed. uvm_pglistalloc does not, so compute an appropriate
1664 1.76 matt * boundary: next power of 2 >= size
1665 1.76 matt */
1666 1.76 matt bus_size_t uboundary = boundary;
1667 1.76 matt if (uboundary <= PAGE_SIZE) {
1668 1.76 matt uboundary = 0;
1669 1.76 matt } else {
1670 1.76 matt while (uboundary < size) {
1671 1.76 matt uboundary <<= 1;
1672 1.76 matt }
1673 1.76 matt }
1674 1.76 matt
1675 1.76 matt /*
1676 1.1 chris * Allocate pages from the VM system.
1677 1.1 chris */
1678 1.78 matt error = uvm_pglistalloc(size, low, high, alignment, uboundary,
1679 1.1 chris &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1680 1.1 chris if (error)
1681 1.100 skrll return error;
1682 1.1 chris
1683 1.1 chris /*
1684 1.1 chris * Compute the location, size, and number of segments actually
1685 1.1 chris * returned by the VM code.
1686 1.1 chris */
1687 1.42 chris m = TAILQ_FIRST(&mlist);
1688 1.1 chris curseg = 0;
1689 1.1 chris lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1690 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1691 1.1 chris #ifdef DEBUG_DMA
1692 1.1 chris printf("alloc: page %lx\n", lastaddr);
1693 1.1 chris #endif /* DEBUG_DMA */
1694 1.52 ad m = TAILQ_NEXT(m, pageq.queue);
1695 1.1 chris
1696 1.52 ad for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1697 1.1 chris curaddr = VM_PAGE_TO_PHYS(m);
1698 1.76 matt KASSERTMSG(low <= curaddr && curaddr < high,
1699 1.76 matt "uvm_pglistalloc returned non-sensicaladdress %#lx "
1700 1.76 matt "(low=%#lx, high=%#lx\n", curaddr, low, high);
1701 1.1 chris #ifdef DEBUG_DMA
1702 1.1 chris printf("alloc: page %lx\n", curaddr);
1703 1.1 chris #endif /* DEBUG_DMA */
1704 1.76 matt if (curaddr == lastaddr + PAGE_SIZE
1705 1.76 matt && (lastaddr & boundary) == (curaddr & boundary))
1706 1.1 chris segs[curseg].ds_len += PAGE_SIZE;
1707 1.1 chris else {
1708 1.1 chris curseg++;
1709 1.76 matt if (curseg >= nsegs) {
1710 1.76 matt uvm_pglistfree(&mlist);
1711 1.76 matt return EFBIG;
1712 1.76 matt }
1713 1.1 chris segs[curseg].ds_addr = curaddr;
1714 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1715 1.1 chris }
1716 1.1 chris lastaddr = curaddr;
1717 1.1 chris }
1718 1.1 chris
1719 1.1 chris *rsegs = curseg + 1;
1720 1.1 chris
1721 1.100 skrll return 0;
1722 1.15 thorpej }
1723 1.15 thorpej
1724 1.15 thorpej /*
1725 1.15 thorpej * Check if a memory region intersects with a DMA range, and return the
1726 1.15 thorpej * page-rounded intersection if it does.
1727 1.15 thorpej */
1728 1.15 thorpej int
1729 1.15 thorpej arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1730 1.15 thorpej paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1731 1.15 thorpej {
1732 1.15 thorpej struct arm32_dma_range *dr;
1733 1.15 thorpej int i;
1734 1.15 thorpej
1735 1.15 thorpej if (ranges == NULL)
1736 1.100 skrll return 0;
1737 1.15 thorpej
1738 1.15 thorpej for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1739 1.15 thorpej if (dr->dr_sysbase <= pa &&
1740 1.15 thorpej pa < (dr->dr_sysbase + dr->dr_len)) {
1741 1.15 thorpej /*
1742 1.15 thorpej * Beginning of region intersects with this range.
1743 1.15 thorpej */
1744 1.15 thorpej *pap = trunc_page(pa);
1745 1.112 riastrad *sizep = round_page(uimin(pa + size,
1746 1.15 thorpej dr->dr_sysbase + dr->dr_len) - pa);
1747 1.100 skrll return 1;
1748 1.15 thorpej }
1749 1.15 thorpej if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1750 1.15 thorpej /*
1751 1.15 thorpej * End of region intersects with this range.
1752 1.15 thorpej */
1753 1.15 thorpej *pap = trunc_page(dr->dr_sysbase);
1754 1.112 riastrad *sizep = round_page(uimin((pa + size) - dr->dr_sysbase,
1755 1.15 thorpej dr->dr_len));
1756 1.100 skrll return 1;
1757 1.15 thorpej }
1758 1.15 thorpej }
1759 1.15 thorpej
1760 1.15 thorpej /* No intersection found. */
1761 1.100 skrll return 0;
1762 1.1 chris }
1763 1.58 matt
1764 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1765 1.58 matt static int
1766 1.58 matt _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1767 1.58 matt bus_size_t size, int flags)
1768 1.58 matt {
1769 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1770 1.58 matt int error = 0;
1771 1.58 matt
1772 1.79 matt KASSERT(cookie != NULL);
1773 1.58 matt
1774 1.58 matt cookie->id_bouncebuflen = round_page(size);
1775 1.58 matt error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1776 1.58 matt PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1777 1.58 matt map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1778 1.76 matt if (error == 0) {
1779 1.76 matt error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1780 1.76 matt cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1781 1.76 matt (void **)&cookie->id_bouncebuf, flags);
1782 1.76 matt if (error) {
1783 1.76 matt _bus_dmamem_free(t, cookie->id_bouncesegs,
1784 1.76 matt cookie->id_nbouncesegs);
1785 1.76 matt cookie->id_bouncebuflen = 0;
1786 1.76 matt cookie->id_nbouncesegs = 0;
1787 1.76 matt } else {
1788 1.76 matt cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1789 1.76 matt }
1790 1.76 matt } else {
1791 1.58 matt cookie->id_bouncebuflen = 0;
1792 1.58 matt cookie->id_nbouncesegs = 0;
1793 1.58 matt }
1794 1.58 matt
1795 1.100 skrll return error;
1796 1.58 matt }
1797 1.58 matt
1798 1.58 matt static void
1799 1.58 matt _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1800 1.58 matt {
1801 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1802 1.58 matt
1803 1.79 matt KASSERT(cookie != NULL);
1804 1.58 matt
1805 1.58 matt _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1806 1.79 matt _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1807 1.58 matt cookie->id_bouncebuflen = 0;
1808 1.58 matt cookie->id_nbouncesegs = 0;
1809 1.58 matt cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1810 1.58 matt }
1811 1.115 skrll #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1812 1.58 matt
1813 1.58 matt /*
1814 1.58 matt * This function does the same as uiomove, but takes an explicit
1815 1.58 matt * direction, and does not update the uio structure.
1816 1.58 matt */
1817 1.58 matt static int
1818 1.58 matt _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1819 1.58 matt {
1820 1.58 matt struct iovec *iov;
1821 1.58 matt int error;
1822 1.58 matt struct vmspace *vm;
1823 1.58 matt char *cp;
1824 1.58 matt size_t resid, cnt;
1825 1.58 matt int i;
1826 1.58 matt
1827 1.58 matt iov = uio->uio_iov;
1828 1.58 matt vm = uio->uio_vmspace;
1829 1.58 matt cp = buf;
1830 1.58 matt resid = n;
1831 1.58 matt
1832 1.58 matt for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1833 1.58 matt iov = &uio->uio_iov[i];
1834 1.58 matt if (iov->iov_len == 0)
1835 1.58 matt continue;
1836 1.58 matt cnt = MIN(resid, iov->iov_len);
1837 1.58 matt
1838 1.121 ad if (!VMSPACE_IS_KERNEL_P(vm)) {
1839 1.121 ad preempt_point();
1840 1.58 matt }
1841 1.58 matt if (direction == UIO_READ) {
1842 1.58 matt error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1843 1.58 matt } else {
1844 1.58 matt error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1845 1.58 matt }
1846 1.58 matt if (error)
1847 1.100 skrll return error;
1848 1.58 matt cp += cnt;
1849 1.58 matt resid -= cnt;
1850 1.58 matt }
1851 1.100 skrll return 0;
1852 1.58 matt }
1853 1.58 matt
1854 1.58 matt int
1855 1.58 matt _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1856 1.58 matt bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1857 1.58 matt {
1858 1.123 skrll if (min_addr >= max_addr)
1859 1.123 skrll return EOPNOTSUPP;
1860 1.58 matt
1861 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1862 1.58 matt struct arm32_dma_range *dr;
1863 1.124 skrll bool psubset = true;
1864 1.58 matt size_t nranges = 0;
1865 1.58 matt size_t i;
1866 1.58 matt for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) {
1867 1.123 skrll /*
1868 1.124 skrll * If the new {min,max}_addr are narrower than any of the
1869 1.124 skrll * ranges in the parent tag then we need a new tag;
1870 1.124 skrll * otherwise the parent tag is a subset of the new
1871 1.124 skrll * range and can continue to be used.
1872 1.123 skrll */
1873 1.124 skrll if (min_addr > dr->dr_sysbase
1874 1.124 skrll || max_addr < dr->dr_sysbase + dr->dr_len - 1) {
1875 1.124 skrll psubset = false;
1876 1.58 matt }
1877 1.58 matt if (min_addr <= dr->dr_sysbase + dr->dr_len
1878 1.58 matt && max_addr >= dr->dr_sysbase) {
1879 1.58 matt nranges++;
1880 1.58 matt }
1881 1.58 matt }
1882 1.124 skrll if (nranges == 0) {
1883 1.124 skrll nranges = 1;
1884 1.124 skrll psubset = false;
1885 1.124 skrll }
1886 1.124 skrll if (psubset) {
1887 1.58 matt *newtag = tag;
1888 1.58 matt /* if the tag must be freed, add a reference */
1889 1.58 matt if (tag->_tag_needs_free)
1890 1.58 matt (tag->_tag_needs_free)++;
1891 1.58 matt return 0;
1892 1.58 matt }
1893 1.58 matt
1894 1.81 matt const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr);
1895 1.81 matt if ((*newtag = kmem_intr_zalloc(tagsize,
1896 1.81 matt (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1897 1.58 matt return ENOMEM;
1898 1.58 matt
1899 1.58 matt dr = (void *)(*newtag + 1);
1900 1.58 matt **newtag = *tag;
1901 1.58 matt (*newtag)->_tag_needs_free = 1;
1902 1.58 matt (*newtag)->_ranges = dr;
1903 1.58 matt (*newtag)->_nranges = nranges;
1904 1.58 matt
1905 1.58 matt if (tag->_ranges == NULL) {
1906 1.58 matt dr->dr_sysbase = min_addr;
1907 1.58 matt dr->dr_busbase = min_addr;
1908 1.58 matt dr->dr_len = max_addr + 1 - min_addr;
1909 1.58 matt } else {
1910 1.123 skrll struct arm32_dma_range *pdr;
1911 1.123 skrll
1912 1.123 skrll for (i = 0, pdr = tag->_ranges; i < tag->_nranges; i++, pdr++) {
1913 1.123 skrll KASSERT(nranges != 0);
1914 1.123 skrll
1915 1.123 skrll if (min_addr > pdr->dr_sysbase + pdr->dr_len
1916 1.123 skrll || max_addr < pdr->dr_sysbase) {
1917 1.123 skrll /*
1918 1.123 skrll * this range doesn't overlap with new limits,
1919 1.123 skrll * so skip.
1920 1.123 skrll */
1921 1.58 matt continue;
1922 1.123 skrll }
1923 1.123 skrll /*
1924 1.123 skrll * Copy the range and adjust to fit within the new
1925 1.123 skrll * limits
1926 1.123 skrll */
1927 1.123 skrll dr[0] = pdr[0];
1928 1.58 matt if (dr->dr_sysbase < min_addr) {
1929 1.58 matt psize_t diff = min_addr - dr->dr_sysbase;
1930 1.58 matt dr->dr_busbase += diff;
1931 1.58 matt dr->dr_len -= diff;
1932 1.58 matt dr->dr_sysbase += diff;
1933 1.58 matt }
1934 1.123 skrll if (max_addr <= dr->dr_sysbase + dr->dr_len - 1) {
1935 1.58 matt dr->dr_len = max_addr + 1 - dr->dr_sysbase;
1936 1.58 matt }
1937 1.58 matt dr++;
1938 1.123 skrll nranges--;
1939 1.58 matt }
1940 1.58 matt }
1941 1.58 matt
1942 1.58 matt return 0;
1943 1.58 matt #else
1944 1.58 matt return EOPNOTSUPP;
1945 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1946 1.58 matt }
1947 1.58 matt
1948 1.58 matt void
1949 1.58 matt _bus_dmatag_destroy(bus_dma_tag_t tag)
1950 1.58 matt {
1951 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1952 1.58 matt switch (tag->_tag_needs_free) {
1953 1.58 matt case 0:
1954 1.81 matt break; /* not allocated with kmem */
1955 1.81 matt case 1: {
1956 1.81 matt const size_t tagsize = sizeof(*tag)
1957 1.81 matt + tag->_nranges * sizeof(*tag->_ranges);
1958 1.81 matt kmem_intr_free(tag, tagsize); /* last reference to tag */
1959 1.58 matt break;
1960 1.81 matt }
1961 1.58 matt default:
1962 1.58 matt (tag->_tag_needs_free)--; /* one less reference */
1963 1.58 matt }
1964 1.58 matt #endif
1965 1.58 matt }
1966