bus_dma.c revision 1.139 1 1.139 skrll /* $NetBSD: bus_dma.c,v 1.139 2023/02/25 08:05:46 skrll Exp $ */
2 1.1 chris
3 1.1 chris /*-
4 1.121 ad * Copyright (c) 1996, 1997, 1998, 2020 The NetBSD Foundation, Inc.
5 1.1 chris * All rights reserved.
6 1.1 chris *
7 1.1 chris * This code is derived from software contributed to The NetBSD Foundation
8 1.1 chris * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 chris * NASA Ames Research Center.
10 1.1 chris *
11 1.1 chris * Redistribution and use in source and binary forms, with or without
12 1.1 chris * modification, are permitted provided that the following conditions
13 1.1 chris * are met:
14 1.1 chris * 1. Redistributions of source code must retain the above copyright
15 1.1 chris * notice, this list of conditions and the following disclaimer.
16 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 chris * notice, this list of conditions and the following disclaimer in the
18 1.1 chris * documentation and/or other materials provided with the distribution.
19 1.1 chris *
20 1.1 chris * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 chris * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 chris * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 chris * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 chris * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 chris * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 chris * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 chris * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 chris * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 chris * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 chris * POSSIBILITY OF SUCH DAMAGE.
31 1.1 chris */
32 1.33 lukem
33 1.35 rearnsha #define _ARM32_BUS_DMA_PRIVATE
34 1.35 rearnsha
35 1.81 matt #include "opt_arm_bus_space.h"
36 1.107 ryo #include "opt_cputypes.h"
37 1.81 matt
38 1.33 lukem #include <sys/cdefs.h>
39 1.139 skrll __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.139 2023/02/25 08:05:46 skrll Exp $");
40 1.1 chris
41 1.1 chris #include <sys/param.h>
42 1.122 skrll
43 1.84 matt #include <sys/bus.h>
44 1.84 matt #include <sys/cpu.h>
45 1.81 matt #include <sys/kmem.h>
46 1.1 chris #include <sys/mbuf.h>
47 1.1 chris
48 1.53 uebayasi #include <uvm/uvm.h>
49 1.1 chris
50 1.107 ryo #include <arm/cpuconf.h>
51 1.84 matt #include <arm/cpufunc.h>
52 1.4 thorpej
53 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
54 1.84 matt #include <dev/mm.h>
55 1.84 matt #endif
56 1.1 chris
57 1.76 matt #ifdef BUSDMA_COUNTERS
58 1.58 matt static struct evcnt bus_dma_creates =
59 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
60 1.58 matt static struct evcnt bus_dma_bounced_creates =
61 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
62 1.58 matt static struct evcnt bus_dma_loads =
63 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
64 1.58 matt static struct evcnt bus_dma_bounced_loads =
65 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
66 1.81 matt static struct evcnt bus_dma_coherent_loads =
67 1.81 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads");
68 1.58 matt static struct evcnt bus_dma_read_bounces =
69 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
70 1.58 matt static struct evcnt bus_dma_write_bounces =
71 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
72 1.58 matt static struct evcnt bus_dma_bounced_unloads =
73 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
74 1.135 mrg static struct evcnt bus_dma_bounced_mbuf_loads =
75 1.135 mrg EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced mbuf loads");
76 1.58 matt static struct evcnt bus_dma_unloads =
77 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
78 1.58 matt static struct evcnt bus_dma_bounced_destroys =
79 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
80 1.58 matt static struct evcnt bus_dma_destroys =
81 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
82 1.95 skrll static struct evcnt bus_dma_sync_prereadwrite =
83 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
84 1.76 matt static struct evcnt bus_dma_sync_preread_begin =
85 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
86 1.76 matt static struct evcnt bus_dma_sync_preread =
87 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
88 1.76 matt static struct evcnt bus_dma_sync_preread_tail =
89 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
90 1.95 skrll static struct evcnt bus_dma_sync_prewrite =
91 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
92 1.95 skrll static struct evcnt bus_dma_sync_postread =
93 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
94 1.95 skrll static struct evcnt bus_dma_sync_postreadwrite =
95 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
96 1.95 skrll static struct evcnt bus_dma_sync_postwrite =
97 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
98 1.135 mrg static struct evcnt bus_dma_inrange_fail =
99 1.135 mrg EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "inrange check failed");
100 1.58 matt
101 1.129 skrll static struct evcnt bus_dma_sync_coherent_prereadwrite =
102 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent prereadwrite");
103 1.129 skrll static struct evcnt bus_dma_sync_coherent_preread =
104 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent preread");
105 1.129 skrll static struct evcnt bus_dma_sync_coherent_prewrite =
106 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent prewrite");
107 1.129 skrll static struct evcnt bus_dma_sync_coherent_postread =
108 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postread");
109 1.129 skrll static struct evcnt bus_dma_sync_coherent_postreadwrite =
110 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postreadwrite");
111 1.129 skrll static struct evcnt bus_dma_sync_coherent_postwrite =
112 1.129 skrll EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync coherent postwrite");
113 1.129 skrll
114 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_creates);
115 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
116 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_loads);
117 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
118 1.81 matt EVCNT_ATTACH_STATIC(bus_dma_coherent_loads);
119 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
120 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
121 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_unloads);
122 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
123 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_destroys);
124 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
125 1.135 mrg EVCNT_ATTACH_STATIC(bus_dma_bounced_mbuf_loads);
126 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
127 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
128 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
129 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
130 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
131 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
132 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
133 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
134 1.135 mrg EVCNT_ATTACH_STATIC(bus_dma_inrange_fail);
135 1.58 matt
136 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_prereadwrite);
137 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_preread);
138 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_prewrite);
139 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postread);
140 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postreadwrite);
141 1.129 skrll EVCNT_ATTACH_STATIC(bus_dma_sync_coherent_postwrite);
142 1.129 skrll
143 1.58 matt #define STAT_INCR(x) (bus_dma_ ## x.ev_count++)
144 1.76 matt #else
145 1.107 ryo #define STAT_INCR(x) __nothing
146 1.76 matt #endif
147 1.58 matt
148 1.7 thorpej int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
149 1.48 yamt bus_size_t, struct vmspace *, int);
150 1.1 chris
151 1.1 chris /*
152 1.19 briggs * Check to see if the specified page is in an allowed DMA range.
153 1.19 briggs */
154 1.105 skrll static inline struct arm32_dma_range *
155 1.59 matt _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges,
156 1.19 briggs bus_addr_t curaddr)
157 1.19 briggs {
158 1.19 briggs struct arm32_dma_range *dr;
159 1.19 briggs int i;
160 1.19 briggs
161 1.19 briggs for (i = 0, dr = ranges; i < nranges; i++, dr++) {
162 1.19 briggs if (curaddr >= dr->dr_sysbase &&
163 1.82 skrll curaddr < (dr->dr_sysbase + dr->dr_len))
164 1.100 skrll return dr;
165 1.19 briggs }
166 1.19 briggs
167 1.100 skrll return NULL;
168 1.19 briggs }
169 1.19 briggs
170 1.19 briggs /*
171 1.59 matt * Check to see if the specified busaddr is in an allowed DMA range.
172 1.59 matt */
173 1.59 matt static inline paddr_t
174 1.59 matt _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr)
175 1.59 matt {
176 1.59 matt struct arm32_dma_range *dr;
177 1.59 matt u_int i;
178 1.59 matt
179 1.59 matt if (t->_nranges == 0)
180 1.59 matt return curaddr;
181 1.59 matt
182 1.59 matt for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) {
183 1.59 matt if (dr->dr_busbase <= curaddr
184 1.82 skrll && curaddr < dr->dr_busbase + dr->dr_len)
185 1.59 matt return curaddr - dr->dr_busbase + dr->dr_sysbase;
186 1.59 matt }
187 1.59 matt panic("%s: curaddr %#lx not in range", __func__, curaddr);
188 1.59 matt }
189 1.59 matt
190 1.59 matt /*
191 1.41 thorpej * Common function to load the specified physical address into the
192 1.41 thorpej * DMA map, coalescing segments and boundary checking as necessary.
193 1.41 thorpej */
194 1.41 thorpej static int
195 1.41 thorpej _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
196 1.61 matt bus_addr_t paddr, bus_size_t size, bool coherent)
197 1.41 thorpej {
198 1.41 thorpej bus_dma_segment_t * const segs = map->dm_segs;
199 1.41 thorpej int nseg = map->dm_nsegs;
200 1.58 matt bus_addr_t lastaddr;
201 1.41 thorpej bus_addr_t bmask = ~(map->_dm_boundary - 1);
202 1.41 thorpej bus_addr_t curaddr;
203 1.41 thorpej bus_size_t sgsize;
204 1.61 matt uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0;
205 1.41 thorpej
206 1.41 thorpej if (nseg > 0)
207 1.101 skrll lastaddr = segs[nseg - 1].ds_addr + segs[nseg - 1].ds_len;
208 1.58 matt else
209 1.58 matt lastaddr = 0xdead;
210 1.95 skrll
211 1.41 thorpej again:
212 1.41 thorpej sgsize = size;
213 1.41 thorpej
214 1.41 thorpej /* Make sure we're in an allowed DMA range. */
215 1.41 thorpej if (t->_ranges != NULL) {
216 1.41 thorpej /* XXX cache last result? */
217 1.41 thorpej const struct arm32_dma_range * const dr =
218 1.59 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr);
219 1.135 mrg if (dr == NULL) {
220 1.135 mrg STAT_INCR(inrange_fail);
221 1.100 skrll return EINVAL;
222 1.135 mrg }
223 1.61 matt
224 1.61 matt /*
225 1.61 matt * If this region is coherent, mark the segment as coherent.
226 1.61 matt */
227 1.61 matt _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT;
228 1.72 skrll
229 1.41 thorpej /*
230 1.41 thorpej * In a valid DMA range. Translate the physical
231 1.41 thorpej * memory address to an address in the DMA window.
232 1.41 thorpej */
233 1.41 thorpej curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
234 1.72 skrll #if 0
235 1.72 skrll printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n",
236 1.72 skrll t, paddr, dr->dr_sysbase, dr->dr_busbase,
237 1.72 skrll dr->dr_len, dr->dr_flags, _ds_flags, curaddr);
238 1.72 skrll #endif
239 1.41 thorpej } else
240 1.41 thorpej curaddr = paddr;
241 1.41 thorpej
242 1.41 thorpej /*
243 1.41 thorpej * Make sure we don't cross any boundaries.
244 1.41 thorpej */
245 1.41 thorpej if (map->_dm_boundary > 0) {
246 1.41 thorpej bus_addr_t baddr; /* next boundary address */
247 1.41 thorpej
248 1.41 thorpej baddr = (curaddr + map->_dm_boundary) & bmask;
249 1.41 thorpej if (sgsize > (baddr - curaddr))
250 1.41 thorpej sgsize = (baddr - curaddr);
251 1.41 thorpej }
252 1.41 thorpej
253 1.41 thorpej /*
254 1.41 thorpej * Insert chunk into a segment, coalescing with the
255 1.41 thorpej * previous segment if possible.
256 1.41 thorpej */
257 1.41 thorpej if (nseg > 0 && curaddr == lastaddr &&
258 1.101 skrll segs[nseg - 1].ds_len + sgsize <= map->dm_maxsegsz &&
259 1.101 skrll ((segs[nseg - 1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 &&
260 1.41 thorpej (map->_dm_boundary == 0 ||
261 1.101 skrll (segs[nseg - 1].ds_addr & bmask) == (curaddr & bmask))) {
262 1.41 thorpej /* coalesce */
263 1.101 skrll segs[nseg - 1].ds_len += sgsize;
264 1.41 thorpej } else if (nseg >= map->_dm_segcnt) {
265 1.100 skrll return EFBIG;
266 1.41 thorpej } else {
267 1.41 thorpej /* new segment */
268 1.41 thorpej segs[nseg].ds_addr = curaddr;
269 1.41 thorpej segs[nseg].ds_len = sgsize;
270 1.133 jmcneill segs[nseg]._ds_paddr = curaddr;
271 1.61 matt segs[nseg]._ds_flags = _ds_flags;
272 1.41 thorpej nseg++;
273 1.41 thorpej }
274 1.41 thorpej
275 1.41 thorpej lastaddr = curaddr + sgsize;
276 1.41 thorpej
277 1.41 thorpej paddr += sgsize;
278 1.41 thorpej size -= sgsize;
279 1.41 thorpej if (size > 0)
280 1.41 thorpej goto again;
281 1.61 matt
282 1.61 matt map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT);
283 1.41 thorpej map->dm_nsegs = nseg;
284 1.100 skrll return 0;
285 1.41 thorpej }
286 1.41 thorpej
287 1.115 skrll static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
288 1.115 skrll int direction);
289 1.115 skrll
290 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
291 1.58 matt static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
292 1.58 matt bus_size_t size, int flags);
293 1.58 matt static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
294 1.58 matt
295 1.58 matt static int
296 1.58 matt _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
297 1.58 matt size_t buflen, int buftype, int flags)
298 1.58 matt {
299 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
300 1.58 matt struct vmspace * const vm = vmspace_kernel();
301 1.58 matt int error;
302 1.58 matt
303 1.58 matt KASSERT(cookie != NULL);
304 1.58 matt KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
305 1.58 matt
306 1.58 matt /*
307 1.58 matt * Allocate bounce pages, if necessary.
308 1.58 matt */
309 1.58 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
310 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
311 1.58 matt if (error)
312 1.100 skrll return error;
313 1.58 matt }
314 1.58 matt
315 1.58 matt /*
316 1.135 mrg * Since we're trying again, clear the previous attempt.
317 1.135 mrg */
318 1.135 mrg map->dm_mapsize = 0;
319 1.135 mrg map->dm_nsegs = 0;
320 1.135 mrg map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
321 1.135 mrg /* _bus_dmamap_load_buffer() clears this if we're not... */
322 1.135 mrg map->_dm_flags |= _BUS_DMAMAP_COHERENT;
323 1.135 mrg
324 1.135 mrg /*
325 1.58 matt * Cache a pointer to the caller's buffer and load the DMA map
326 1.58 matt * with the bounce buffer.
327 1.58 matt */
328 1.58 matt cookie->id_origbuf = buf;
329 1.58 matt cookie->id_origbuflen = buflen;
330 1.58 matt error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
331 1.58 matt buflen, vm, flags);
332 1.58 matt if (error)
333 1.100 skrll return error;
334 1.58 matt
335 1.58 matt STAT_INCR(bounced_loads);
336 1.58 matt map->dm_mapsize = buflen;
337 1.58 matt map->_dm_vmspace = vm;
338 1.58 matt map->_dm_buftype = buftype;
339 1.58 matt
340 1.58 matt /* ...so _bus_dmamap_sync() knows we're bouncing */
341 1.63 matt map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING;
342 1.58 matt cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
343 1.58 matt return 0;
344 1.58 matt }
345 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
346 1.58 matt
347 1.41 thorpej /*
348 1.1 chris * Common function for DMA map creation. May be called by bus-specific
349 1.1 chris * DMA map creation functions.
350 1.1 chris */
351 1.1 chris int
352 1.7 thorpej _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
353 1.7 thorpej bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
354 1.1 chris {
355 1.1 chris struct arm32_bus_dmamap *map;
356 1.1 chris void *mapstore;
357 1.120 skrll int error = 0;
358 1.1 chris
359 1.1 chris #ifdef DEBUG_DMA
360 1.130 skrll printf("dmamap_create: t=%p size=%#lx nseg=%#x msegsz=%#lx boundary=%#lx"
361 1.130 skrll " flags=%#x\n", t, size, nsegments, maxsegsz, boundary, flags);
362 1.1 chris #endif /* DEBUG_DMA */
363 1.1 chris
364 1.1 chris /*
365 1.1 chris * Allocate and initialize the DMA map. The end of the map
366 1.1 chris * is a variable-sized array of segments, so we allocate enough
367 1.1 chris * room for them in one shot.
368 1.1 chris *
369 1.1 chris * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
370 1.1 chris * of ALLOCNOW notifies others that we've reserved these resources,
371 1.1 chris * and they are not to be freed.
372 1.1 chris *
373 1.1 chris * The bus_dmamap_t includes one bus_dma_segment_t, hence
374 1.1 chris * the (nsegments - 1).
375 1.1 chris */
376 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
377 1.1 chris (sizeof(bus_dma_segment_t) * (nsegments - 1));
378 1.81 matt const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
379 1.81 matt if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL)
380 1.100 skrll return ENOMEM;
381 1.1 chris
382 1.1 chris map = (struct arm32_bus_dmamap *)mapstore;
383 1.1 chris map->_dm_size = size;
384 1.1 chris map->_dm_segcnt = nsegments;
385 1.43 matt map->_dm_maxmaxsegsz = maxsegsz;
386 1.1 chris map->_dm_boundary = boundary;
387 1.1 chris map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
388 1.14 thorpej map->_dm_origbuf = NULL;
389 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
390 1.48 yamt map->_dm_vmspace = vmspace_kernel();
391 1.58 matt map->_dm_cookie = NULL;
392 1.43 matt map->dm_maxsegsz = maxsegsz;
393 1.1 chris map->dm_mapsize = 0; /* no valid mappings */
394 1.1 chris map->dm_nsegs = 0;
395 1.1 chris
396 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
397 1.58 matt struct arm32_bus_dma_cookie *cookie;
398 1.58 matt int cookieflags;
399 1.58 matt void *cookiestore;
400 1.58 matt
401 1.58 matt cookieflags = 0;
402 1.58 matt
403 1.58 matt if (t->_may_bounce != NULL) {
404 1.58 matt error = (*t->_may_bounce)(t, map, flags, &cookieflags);
405 1.58 matt if (error != 0)
406 1.58 matt goto out;
407 1.58 matt }
408 1.58 matt
409 1.127 jmcneill if (t->_ranges != NULL) {
410 1.127 jmcneill /*
411 1.127 jmcneill * If ranges are defined, we may have to bounce. The only
412 1.127 jmcneill * exception is if there is exactly one range that covers
413 1.127 jmcneill * all of physical memory.
414 1.127 jmcneill */
415 1.127 jmcneill switch (t->_nranges) {
416 1.127 jmcneill case 1:
417 1.127 jmcneill if (t->_ranges[0].dr_sysbase == 0 &&
418 1.127 jmcneill t->_ranges[0].dr_len == UINTPTR_MAX) {
419 1.127 jmcneill break;
420 1.127 jmcneill }
421 1.127 jmcneill /* FALLTHROUGH */
422 1.127 jmcneill default:
423 1.127 jmcneill cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
424 1.127 jmcneill }
425 1.127 jmcneill }
426 1.58 matt
427 1.58 matt if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
428 1.58 matt STAT_INCR(creates);
429 1.98 msaitoh *dmamp = map;
430 1.58 matt return 0;
431 1.58 matt }
432 1.58 matt
433 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
434 1.58 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
435 1.58 matt
436 1.58 matt /*
437 1.58 matt * Allocate our cookie.
438 1.58 matt */
439 1.81 matt if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) {
440 1.58 matt error = ENOMEM;
441 1.58 matt goto out;
442 1.58 matt }
443 1.58 matt cookie = (struct arm32_bus_dma_cookie *)cookiestore;
444 1.58 matt cookie->id_flags = cookieflags;
445 1.58 matt map->_dm_cookie = cookie;
446 1.58 matt STAT_INCR(bounced_creates);
447 1.58 matt
448 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
449 1.58 matt out:
450 1.58 matt if (error)
451 1.58 matt _bus_dmamap_destroy(t, map);
452 1.98 msaitoh else
453 1.98 msaitoh *dmamp = map;
454 1.58 matt #else
455 1.98 msaitoh *dmamp = map;
456 1.58 matt STAT_INCR(creates);
457 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
458 1.1 chris #ifdef DEBUG_DMA
459 1.1 chris printf("dmamap_create:map=%p\n", map);
460 1.1 chris #endif /* DEBUG_DMA */
461 1.119 maya return error;
462 1.1 chris }
463 1.1 chris
464 1.1 chris /*
465 1.1 chris * Common function for DMA map destruction. May be called by bus-specific
466 1.1 chris * DMA map destruction functions.
467 1.1 chris */
468 1.1 chris void
469 1.7 thorpej _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
470 1.1 chris {
471 1.1 chris
472 1.1 chris #ifdef DEBUG_DMA
473 1.1 chris printf("dmamap_destroy: t=%p map=%p\n", t, map);
474 1.1 chris #endif /* DEBUG_DMA */
475 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
476 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
477 1.13 briggs
478 1.13 briggs /*
479 1.58 matt * Free any bounce pages this map might hold.
480 1.13 briggs */
481 1.58 matt if (cookie != NULL) {
482 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
483 1.81 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
484 1.81 matt
485 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
486 1.58 matt STAT_INCR(bounced_unloads);
487 1.58 matt map->dm_nsegs = 0;
488 1.58 matt if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
489 1.58 matt _bus_dma_free_bouncebuf(t, map);
490 1.58 matt STAT_INCR(bounced_destroys);
491 1.81 matt kmem_intr_free(cookie, cookiesize);
492 1.58 matt } else
493 1.58 matt #endif
494 1.58 matt STAT_INCR(destroys);
495 1.58 matt
496 1.58 matt if (map->dm_nsegs > 0)
497 1.58 matt STAT_INCR(unloads);
498 1.13 briggs
499 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
500 1.81 matt (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
501 1.81 matt kmem_intr_free(map, mapsize);
502 1.1 chris }
503 1.1 chris
504 1.1 chris /*
505 1.1 chris * Common function for loading a DMA map with a linear buffer. May
506 1.1 chris * be called by bus-specific DMA map load functions.
507 1.1 chris */
508 1.1 chris int
509 1.7 thorpej _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
510 1.7 thorpej bus_size_t buflen, struct proc *p, int flags)
511 1.1 chris {
512 1.58 matt struct vmspace *vm;
513 1.41 thorpej int error;
514 1.1 chris
515 1.1 chris #ifdef DEBUG_DMA
516 1.130 skrll printf("dmamap_load: t=%p map=%p buf=%p len=%#lx p=%p f=%#x\n",
517 1.1 chris t, map, buf, buflen, p, flags);
518 1.1 chris #endif /* DEBUG_DMA */
519 1.1 chris
520 1.58 matt if (map->dm_nsegs > 0) {
521 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
522 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
523 1.58 matt if (cookie != NULL) {
524 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
525 1.58 matt STAT_INCR(bounced_unloads);
526 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
527 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
528 1.58 matt }
529 1.58 matt } else
530 1.58 matt #endif
531 1.58 matt STAT_INCR(unloads);
532 1.58 matt }
533 1.58 matt
534 1.1 chris /*
535 1.1 chris * Make sure that on error condition we return "no valid mappings".
536 1.1 chris */
537 1.1 chris map->dm_mapsize = 0;
538 1.1 chris map->dm_nsegs = 0;
539 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
540 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
541 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
542 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
543 1.1 chris
544 1.1 chris if (buflen > map->_dm_size)
545 1.100 skrll return EINVAL;
546 1.1 chris
547 1.48 yamt if (p != NULL) {
548 1.48 yamt vm = p->p_vmspace;
549 1.48 yamt } else {
550 1.48 yamt vm = vmspace_kernel();
551 1.48 yamt }
552 1.48 yamt
553 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
554 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
555 1.17 thorpej
556 1.48 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
557 1.1 chris if (error == 0) {
558 1.1 chris map->dm_mapsize = buflen;
559 1.58 matt map->_dm_vmspace = vm;
560 1.14 thorpej map->_dm_origbuf = buf;
561 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR;
562 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
563 1.81 matt STAT_INCR(coherent_loads);
564 1.81 matt } else {
565 1.81 matt STAT_INCR(loads);
566 1.81 matt }
567 1.58 matt return 0;
568 1.1 chris }
569 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
570 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
571 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
572 1.58 matt error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
573 1.58 matt _BUS_DMA_BUFTYPE_LINEAR, flags);
574 1.95 skrll }
575 1.95 skrll #endif
576 1.100 skrll return error;
577 1.1 chris }
578 1.1 chris
579 1.1 chris /*
580 1.1 chris * Like _bus_dmamap_load(), but for mbufs.
581 1.1 chris */
582 1.1 chris int
583 1.7 thorpej _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
584 1.7 thorpej int flags)
585 1.1 chris {
586 1.105 skrll struct mbuf *m;
587 1.41 thorpej int error;
588 1.1 chris
589 1.1 chris #ifdef DEBUG_DMA
590 1.130 skrll printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%#x\n",
591 1.1 chris t, map, m0, flags);
592 1.1 chris #endif /* DEBUG_DMA */
593 1.1 chris
594 1.58 matt if (map->dm_nsegs > 0) {
595 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
596 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
597 1.58 matt if (cookie != NULL) {
598 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
599 1.58 matt STAT_INCR(bounced_unloads);
600 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
601 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
602 1.58 matt }
603 1.58 matt } else
604 1.58 matt #endif
605 1.58 matt STAT_INCR(unloads);
606 1.58 matt }
607 1.58 matt
608 1.1 chris /*
609 1.1 chris * Make sure that on error condition we return "no valid mappings."
610 1.1 chris */
611 1.1 chris map->dm_mapsize = 0;
612 1.1 chris map->dm_nsegs = 0;
613 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
614 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
615 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
616 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
617 1.1 chris
618 1.79 matt KASSERT(m0->m_flags & M_PKTHDR);
619 1.1 chris
620 1.1 chris if (m0->m_pkthdr.len > map->_dm_size)
621 1.100 skrll return EINVAL;
622 1.1 chris
623 1.61 matt /* _bus_dmamap_load_paddr() clears this if we're not... */
624 1.61 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
625 1.17 thorpej
626 1.1 chris error = 0;
627 1.1 chris for (m = m0; m != NULL && error == 0; m = m->m_next) {
628 1.41 thorpej int offset;
629 1.41 thorpej int remainbytes;
630 1.41 thorpej const struct vm_page * const *pgs;
631 1.41 thorpej paddr_t paddr;
632 1.41 thorpej int size;
633 1.41 thorpej
634 1.28 thorpej if (m->m_len == 0)
635 1.28 thorpej continue;
636 1.57 matt /*
637 1.57 matt * Don't allow reads in read-only mbufs.
638 1.57 matt */
639 1.57 matt if (M_ROMAP(m) && (flags & BUS_DMA_READ)) {
640 1.57 matt error = EFAULT;
641 1.57 matt break;
642 1.57 matt }
643 1.108 maxv switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
644 1.108 maxv case M_EXT|M_EXT_CLUSTER:
645 1.28 thorpej /* XXX KDASSERT */
646 1.28 thorpej KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
647 1.41 thorpej paddr = m->m_ext.ext_paddr +
648 1.28 thorpej (m->m_data - m->m_ext.ext_buf);
649 1.41 thorpej size = m->m_len;
650 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
651 1.61 matt false);
652 1.41 thorpej break;
653 1.95 skrll
654 1.41 thorpej case M_EXT|M_EXT_PAGES:
655 1.41 thorpej KASSERT(m->m_ext.ext_buf <= m->m_data);
656 1.41 thorpej KASSERT(m->m_data <=
657 1.41 thorpej m->m_ext.ext_buf + m->m_ext.ext_size);
658 1.95 skrll
659 1.41 thorpej offset = (vaddr_t)m->m_data -
660 1.41 thorpej trunc_page((vaddr_t)m->m_ext.ext_buf);
661 1.41 thorpej remainbytes = m->m_len;
662 1.41 thorpej
663 1.41 thorpej /* skip uninteresting pages */
664 1.41 thorpej pgs = (const struct vm_page * const *)
665 1.41 thorpej m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
666 1.95 skrll
667 1.41 thorpej offset &= PAGE_MASK; /* offset in the first page */
668 1.41 thorpej
669 1.41 thorpej /* load each page */
670 1.41 thorpej while (remainbytes > 0) {
671 1.41 thorpej const struct vm_page *pg;
672 1.41 thorpej
673 1.41 thorpej size = MIN(remainbytes, PAGE_SIZE - offset);
674 1.41 thorpej
675 1.41 thorpej pg = *pgs++;
676 1.41 thorpej KASSERT(pg);
677 1.41 thorpej paddr = VM_PAGE_TO_PHYS(pg) + offset;
678 1.41 thorpej
679 1.41 thorpej error = _bus_dmamap_load_paddr(t, map,
680 1.61 matt paddr, size, false);
681 1.41 thorpej if (error)
682 1.28 thorpej break;
683 1.41 thorpej offset = 0;
684 1.41 thorpej remainbytes -= size;
685 1.28 thorpej }
686 1.28 thorpej break;
687 1.28 thorpej
688 1.28 thorpej case 0:
689 1.41 thorpej paddr = m->m_paddr + M_BUFOFFSET(m) +
690 1.28 thorpej (m->m_data - M_BUFADDR(m));
691 1.41 thorpej size = m->m_len;
692 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
693 1.61 matt false);
694 1.41 thorpej break;
695 1.28 thorpej
696 1.28 thorpej default:
697 1.28 thorpej error = _bus_dmamap_load_buffer(t, map, m->m_data,
698 1.48 yamt m->m_len, vmspace_kernel(), flags);
699 1.28 thorpej }
700 1.1 chris }
701 1.1 chris if (error == 0) {
702 1.1 chris map->dm_mapsize = m0->m_pkthdr.len;
703 1.14 thorpej map->_dm_origbuf = m0;
704 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF;
705 1.48 yamt map->_dm_vmspace = vmspace_kernel(); /* always kernel */
706 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
707 1.81 matt STAT_INCR(coherent_loads);
708 1.81 matt } else {
709 1.81 matt STAT_INCR(loads);
710 1.81 matt }
711 1.58 matt return 0;
712 1.1 chris }
713 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
714 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
715 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
716 1.58 matt error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
717 1.58 matt _BUS_DMA_BUFTYPE_MBUF, flags);
718 1.135 mrg STAT_INCR(bounced_mbuf_loads);
719 1.95 skrll }
720 1.95 skrll #endif
721 1.100 skrll return error;
722 1.1 chris }
723 1.1 chris
724 1.1 chris /*
725 1.1 chris * Like _bus_dmamap_load(), but for uios.
726 1.1 chris */
727 1.1 chris int
728 1.7 thorpej _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
729 1.7 thorpej int flags)
730 1.1 chris {
731 1.1 chris bus_size_t minlen, resid;
732 1.1 chris struct iovec *iov;
733 1.50 christos void *addr;
734 1.105 skrll int i, error;
735 1.1 chris
736 1.1 chris /*
737 1.1 chris * Make sure that on error condition we return "no valid mappings."
738 1.1 chris */
739 1.1 chris map->dm_mapsize = 0;
740 1.1 chris map->dm_nsegs = 0;
741 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
742 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
743 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
744 1.1 chris
745 1.1 chris resid = uio->uio_resid;
746 1.1 chris iov = uio->uio_iov;
747 1.1 chris
748 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
749 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
750 1.17 thorpej
751 1.1 chris error = 0;
752 1.1 chris for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
753 1.1 chris /*
754 1.1 chris * Now at the first iovec to load. Load each iovec
755 1.1 chris * until we have exhausted the residual count.
756 1.1 chris */
757 1.1 chris minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
758 1.50 christos addr = (void *)iov[i].iov_base;
759 1.1 chris
760 1.1 chris error = _bus_dmamap_load_buffer(t, map, addr, minlen,
761 1.48 yamt uio->uio_vmspace, flags);
762 1.1 chris
763 1.1 chris resid -= minlen;
764 1.1 chris }
765 1.1 chris if (error == 0) {
766 1.1 chris map->dm_mapsize = uio->uio_resid;
767 1.14 thorpej map->_dm_origbuf = uio;
768 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO;
769 1.48 yamt map->_dm_vmspace = uio->uio_vmspace;
770 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
771 1.81 matt STAT_INCR(coherent_loads);
772 1.81 matt } else {
773 1.81 matt STAT_INCR(loads);
774 1.81 matt }
775 1.1 chris }
776 1.100 skrll return error;
777 1.1 chris }
778 1.1 chris
779 1.1 chris /*
780 1.1 chris * Like _bus_dmamap_load(), but for raw memory allocated with
781 1.1 chris * bus_dmamem_alloc().
782 1.1 chris */
783 1.1 chris int
784 1.7 thorpej _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
785 1.94 jmcneill bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
786 1.1 chris {
787 1.1 chris
788 1.94 jmcneill bus_size_t size;
789 1.94 jmcneill int i, error = 0;
790 1.94 jmcneill
791 1.94 jmcneill /*
792 1.94 jmcneill * Make sure that on error conditions we return "no valid mappings."
793 1.94 jmcneill */
794 1.94 jmcneill map->dm_mapsize = 0;
795 1.94 jmcneill map->dm_nsegs = 0;
796 1.94 jmcneill KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
797 1.94 jmcneill
798 1.94 jmcneill if (size0 > map->_dm_size)
799 1.94 jmcneill return EINVAL;
800 1.94 jmcneill
801 1.94 jmcneill for (i = 0, size = size0; i < nsegs && size > 0; i++) {
802 1.94 jmcneill bus_dma_segment_t *ds = &segs[i];
803 1.94 jmcneill bus_size_t sgsize;
804 1.94 jmcneill
805 1.94 jmcneill sgsize = MIN(ds->ds_len, size);
806 1.94 jmcneill if (sgsize == 0)
807 1.94 jmcneill continue;
808 1.116 jmcneill const bool coherent =
809 1.116 jmcneill (ds->_ds_flags & _BUS_DMAMAP_COHERENT) != 0;
810 1.94 jmcneill error = _bus_dmamap_load_paddr(t, map, ds->ds_addr,
811 1.116 jmcneill sgsize, coherent);
812 1.94 jmcneill if (error != 0)
813 1.94 jmcneill break;
814 1.94 jmcneill size -= sgsize;
815 1.94 jmcneill }
816 1.94 jmcneill
817 1.94 jmcneill if (error != 0) {
818 1.94 jmcneill map->dm_mapsize = 0;
819 1.94 jmcneill map->dm_nsegs = 0;
820 1.94 jmcneill return error;
821 1.94 jmcneill }
822 1.94 jmcneill
823 1.94 jmcneill /* XXX TBD bounce */
824 1.94 jmcneill
825 1.94 jmcneill map->dm_mapsize = size0;
826 1.116 jmcneill map->_dm_origbuf = NULL;
827 1.116 jmcneill map->_dm_buftype = _BUS_DMA_BUFTYPE_RAW;
828 1.116 jmcneill map->_dm_vmspace = NULL;
829 1.94 jmcneill return 0;
830 1.1 chris }
831 1.1 chris
832 1.1 chris /*
833 1.1 chris * Common function for unloading a DMA map. May be called by
834 1.1 chris * bus-specific DMA map unload functions.
835 1.1 chris */
836 1.1 chris void
837 1.7 thorpej _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
838 1.1 chris {
839 1.1 chris
840 1.1 chris #ifdef DEBUG_DMA
841 1.1 chris printf("dmamap_unload: t=%p map=%p\n", t, map);
842 1.1 chris #endif /* DEBUG_DMA */
843 1.1 chris
844 1.1 chris /*
845 1.1 chris * No resources to free; just mark the mappings as
846 1.1 chris * invalid.
847 1.1 chris */
848 1.1 chris map->dm_mapsize = 0;
849 1.1 chris map->dm_nsegs = 0;
850 1.14 thorpej map->_dm_origbuf = NULL;
851 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
852 1.48 yamt map->_dm_vmspace = NULL;
853 1.1 chris }
854 1.1 chris
855 1.57 matt static void
856 1.103 skrll _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops,
857 1.103 skrll bool readonly_p)
858 1.14 thorpej {
859 1.106 skrll
860 1.115 skrll #if defined(ARM_MMU_EXTENDED)
861 1.106 skrll /*
862 1.106 skrll * No optimisations are available for readonly mbufs on armv6+, so
863 1.106 skrll * assume it's not readonly from here on.
864 1.106 skrll *
865 1.106 skrll * See the comment in _bus_dmamap_sync_mbuf
866 1.106 skrll */
867 1.106 skrll readonly_p = false;
868 1.106 skrll #endif
869 1.106 skrll
870 1.86 matt KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK),
871 1.86 matt "va %#lx pa %#lx", va, pa);
872 1.62 matt #if 0
873 1.62 matt printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
874 1.62 matt va, pa, len, ops, readonly_p);
875 1.62 matt #endif
876 1.14 thorpej
877 1.14 thorpej switch (ops) {
878 1.14 thorpej case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
879 1.57 matt if (!readonly_p) {
880 1.76 matt STAT_INCR(sync_prereadwrite);
881 1.57 matt cpu_dcache_wbinv_range(va, len);
882 1.57 matt cpu_sdcache_wbinv_range(va, pa, len);
883 1.57 matt break;
884 1.57 matt }
885 1.57 matt /* FALLTHROUGH */
886 1.14 thorpej
887 1.57 matt case BUS_DMASYNC_PREREAD: {
888 1.59 matt const size_t line_size = arm_dcache_align;
889 1.59 matt const size_t line_mask = arm_dcache_align_mask;
890 1.59 matt vsize_t misalignment = va & line_mask;
891 1.57 matt if (misalignment) {
892 1.59 matt va -= misalignment;
893 1.59 matt pa -= misalignment;
894 1.59 matt len += misalignment;
895 1.77 matt STAT_INCR(sync_preread_begin);
896 1.59 matt cpu_dcache_wbinv_range(va, line_size);
897 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
898 1.59 matt if (len <= line_size)
899 1.57 matt break;
900 1.59 matt va += line_size;
901 1.59 matt pa += line_size;
902 1.59 matt len -= line_size;
903 1.57 matt }
904 1.59 matt misalignment = len & line_mask;
905 1.57 matt len -= misalignment;
906 1.65 matt if (len > 0) {
907 1.77 matt STAT_INCR(sync_preread);
908 1.65 matt cpu_dcache_inv_range(va, len);
909 1.65 matt cpu_sdcache_inv_range(va, pa, len);
910 1.65 matt }
911 1.57 matt if (misalignment) {
912 1.57 matt va += len;
913 1.57 matt pa += len;
914 1.77 matt STAT_INCR(sync_preread_tail);
915 1.59 matt cpu_dcache_wbinv_range(va, line_size);
916 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
917 1.57 matt }
918 1.14 thorpej break;
919 1.57 matt }
920 1.14 thorpej
921 1.14 thorpej case BUS_DMASYNC_PREWRITE:
922 1.76 matt STAT_INCR(sync_prewrite);
923 1.57 matt cpu_dcache_wb_range(va, len);
924 1.57 matt cpu_sdcache_wb_range(va, pa, len);
925 1.14 thorpej break;
926 1.67 matt
927 1.115 skrll #if defined(CPU_CORTEX) || defined(CPU_ARMV8)
928 1.115 skrll
929 1.67 matt /*
930 1.67 matt * Cortex CPUs can do speculative loads so we need to clean the cache
931 1.67 matt * after a DMA read to deal with any speculatively loaded cache lines.
932 1.67 matt * Since these can't be dirty, we can just invalidate them and don't
933 1.67 matt * have to worry about having to write back their contents.
934 1.67 matt */
935 1.67 matt case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
936 1.76 matt STAT_INCR(sync_postreadwrite);
937 1.76 matt cpu_dcache_inv_range(va, len);
938 1.76 matt cpu_sdcache_inv_range(va, pa, len);
939 1.76 matt break;
940 1.126 skrll
941 1.67 matt case BUS_DMASYNC_POSTREAD:
942 1.76 matt STAT_INCR(sync_postread);
943 1.67 matt cpu_dcache_inv_range(va, len);
944 1.67 matt cpu_sdcache_inv_range(va, pa, len);
945 1.67 matt break;
946 1.67 matt #endif
947 1.14 thorpej }
948 1.14 thorpej }
949 1.14 thorpej
950 1.47 perry static inline void
951 1.57 matt _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
952 1.14 thorpej bus_size_t len, int ops)
953 1.14 thorpej {
954 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
955 1.57 matt vaddr_t va = (vaddr_t) map->_dm_origbuf;
956 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
957 1.63 matt if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) {
958 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
959 1.58 matt va = (vaddr_t) cookie->id_bouncebuf;
960 1.58 matt }
961 1.58 matt #endif
962 1.57 matt
963 1.57 matt while (len > 0) {
964 1.57 matt while (offset >= ds->ds_len) {
965 1.57 matt offset -= ds->ds_len;
966 1.57 matt va += ds->ds_len;
967 1.57 matt ds++;
968 1.57 matt }
969 1.57 matt
970 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset);
971 1.112 riastrad size_t seglen = uimin(len, ds->ds_len - offset);
972 1.57 matt
973 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
974 1.61 matt _bus_dmamap_sync_segment(va + offset, pa, seglen, ops,
975 1.67 matt false);
976 1.57 matt
977 1.57 matt offset += seglen;
978 1.57 matt len -= seglen;
979 1.57 matt }
980 1.57 matt }
981 1.57 matt
982 1.57 matt static inline void
983 1.57 matt _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset,
984 1.57 matt bus_size_t len, int ops)
985 1.57 matt {
986 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
987 1.57 matt struct mbuf *m = map->_dm_origbuf;
988 1.57 matt bus_size_t voff = offset;
989 1.57 matt bus_size_t ds_off = offset;
990 1.57 matt
991 1.57 matt while (len > 0) {
992 1.57 matt /* Find the current dma segment */
993 1.57 matt while (ds_off >= ds->ds_len) {
994 1.57 matt ds_off -= ds->ds_len;
995 1.57 matt ds++;
996 1.57 matt }
997 1.57 matt /* Find the current mbuf. */
998 1.57 matt while (voff >= m->m_len) {
999 1.57 matt voff -= m->m_len;
1000 1.57 matt m = m->m_next;
1001 1.14 thorpej }
1002 1.14 thorpej
1003 1.14 thorpej /*
1004 1.14 thorpej * Now at the first mbuf to sync; nail each one until
1005 1.14 thorpej * we have exhausted the length.
1006 1.14 thorpej */
1007 1.112 riastrad vsize_t seglen = uimin(len, uimin(m->m_len - voff, ds->ds_len - ds_off));
1008 1.57 matt vaddr_t va = mtod(m, vaddr_t) + voff;
1009 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
1010 1.14 thorpej
1011 1.28 thorpej /*
1012 1.28 thorpej * We can save a lot of work here if we know the mapping
1013 1.93 matt * is read-only at the MMU and we aren't using the armv6+
1014 1.93 matt * MMU:
1015 1.28 thorpej *
1016 1.28 thorpej * If a mapping is read-only, no dirty cache blocks will
1017 1.28 thorpej * exist for it. If a writable mapping was made read-only,
1018 1.28 thorpej * we know any dirty cache lines for the range will have
1019 1.28 thorpej * been cleaned for us already. Therefore, if the upper
1020 1.28 thorpej * layer can tell us we have a read-only mapping, we can
1021 1.28 thorpej * skip all cache cleaning.
1022 1.28 thorpej *
1023 1.28 thorpej * NOTE: This only works if we know the pmap cleans pages
1024 1.28 thorpej * before making a read-write -> read-only transition. If
1025 1.28 thorpej * this ever becomes non-true (e.g. Physically Indexed
1026 1.28 thorpej * cache), this will have to be revisited.
1027 1.28 thorpej */
1028 1.14 thorpej
1029 1.92 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) {
1030 1.92 matt /*
1031 1.92 matt * If we are doing preread (DMAing into the mbuf),
1032 1.95 skrll * this mbuf better not be readonly,
1033 1.92 matt */
1034 1.92 matt KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m));
1035 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops,
1036 1.61 matt M_ROMAP(m));
1037 1.92 matt }
1038 1.57 matt voff += seglen;
1039 1.57 matt ds_off += seglen;
1040 1.57 matt len -= seglen;
1041 1.14 thorpej }
1042 1.14 thorpej }
1043 1.14 thorpej
1044 1.47 perry static inline void
1045 1.14 thorpej _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1046 1.14 thorpej bus_size_t len, int ops)
1047 1.14 thorpej {
1048 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
1049 1.14 thorpej struct uio *uio = map->_dm_origbuf;
1050 1.57 matt struct iovec *iov = uio->uio_iov;
1051 1.57 matt bus_size_t voff = offset;
1052 1.57 matt bus_size_t ds_off = offset;
1053 1.57 matt
1054 1.57 matt while (len > 0) {
1055 1.57 matt /* Find the current dma segment */
1056 1.57 matt while (ds_off >= ds->ds_len) {
1057 1.57 matt ds_off -= ds->ds_len;
1058 1.57 matt ds++;
1059 1.57 matt }
1060 1.14 thorpej
1061 1.57 matt /* Find the current iovec. */
1062 1.57 matt while (voff >= iov->iov_len) {
1063 1.57 matt voff -= iov->iov_len;
1064 1.57 matt iov++;
1065 1.14 thorpej }
1066 1.14 thorpej
1067 1.14 thorpej /*
1068 1.14 thorpej * Now at the first iovec to sync; nail each one until
1069 1.14 thorpej * we have exhausted the length.
1070 1.14 thorpej */
1071 1.112 riastrad vsize_t seglen = uimin(len, uimin(iov->iov_len - voff, ds->ds_len - ds_off));
1072 1.57 matt vaddr_t va = (vaddr_t) iov->iov_base + voff;
1073 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
1074 1.57 matt
1075 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
1076 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops, false);
1077 1.57 matt
1078 1.57 matt voff += seglen;
1079 1.57 matt ds_off += seglen;
1080 1.57 matt len -= seglen;
1081 1.14 thorpej }
1082 1.14 thorpej }
1083 1.14 thorpej
1084 1.1 chris /*
1085 1.1 chris * Common function for DMA map synchronization. May be called
1086 1.1 chris * by bus-specific DMA map synchronization functions.
1087 1.8 thorpej *
1088 1.8 thorpej * XXX Should have separate versions for write-through vs.
1089 1.8 thorpej * XXX write-back caches. We currently assume write-back
1090 1.8 thorpej * XXX here, which is not as efficient as it could be for
1091 1.8 thorpej * XXX the write-through case.
1092 1.1 chris */
1093 1.1 chris void
1094 1.7 thorpej _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1095 1.7 thorpej bus_size_t len, int ops)
1096 1.1 chris {
1097 1.1 chris #ifdef DEBUG_DMA
1098 1.130 skrll printf("dmamap_sync: t=%p map=%p offset=%#lx len=%#lx ops=%#x\n",
1099 1.1 chris t, map, offset, len, ops);
1100 1.1 chris #endif /* DEBUG_DMA */
1101 1.1 chris
1102 1.8 thorpej /*
1103 1.8 thorpej * Mixing of PRE and POST operations is not allowed.
1104 1.8 thorpej */
1105 1.8 thorpej if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
1106 1.8 thorpej (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
1107 1.126 skrll panic("%s: mix PRE and POST", __func__);
1108 1.8 thorpej
1109 1.79 matt KASSERTMSG(offset < map->dm_mapsize,
1110 1.79 matt "offset %lu mapsize %lu",
1111 1.79 matt offset, map->dm_mapsize);
1112 1.79 matt KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize,
1113 1.79 matt "len %lu offset %lu mapsize %lu",
1114 1.79 matt len, offset, map->dm_mapsize);
1115 1.8 thorpej
1116 1.8 thorpej /*
1117 1.8 thorpej * For a virtually-indexed write-back cache, we need
1118 1.8 thorpej * to do the following things:
1119 1.8 thorpej *
1120 1.8 thorpej * PREREAD -- Invalidate the D-cache. We do this
1121 1.8 thorpej * here in case a write-back is required by the back-end.
1122 1.8 thorpej *
1123 1.8 thorpej * PREWRITE -- Write-back the D-cache. Note that if
1124 1.8 thorpej * we are doing a PREREAD|PREWRITE, we can collapse
1125 1.8 thorpej * the whole thing into a single Wb-Inv.
1126 1.8 thorpej *
1127 1.67 matt * POSTREAD -- Re-invalidate the D-cache in case speculative
1128 1.67 matt * memory accesses caused cachelines to become valid with now
1129 1.67 matt * invalid data.
1130 1.8 thorpej *
1131 1.8 thorpej * POSTWRITE -- Nothing.
1132 1.8 thorpej */
1133 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1134 1.74 matt const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING);
1135 1.63 matt #else
1136 1.63 matt const bool bouncing = false;
1137 1.58 matt #endif
1138 1.8 thorpej
1139 1.58 matt const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1140 1.115 skrll #if defined(CPU_CORTEX) || defined(CPU_ARMV8)
1141 1.67 matt const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1142 1.67 matt #else
1143 1.67 matt const int post_ops = 0;
1144 1.67 matt #endif
1145 1.115 skrll if (pre_ops == 0 && post_ops == 0)
1146 1.115 skrll return;
1147 1.115 skrll
1148 1.115 skrll if (post_ops == BUS_DMASYNC_POSTWRITE) {
1149 1.115 skrll KASSERT(pre_ops == 0);
1150 1.129 skrll if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
1151 1.129 skrll STAT_INCR(sync_coherent_postwrite);
1152 1.129 skrll } else {
1153 1.129 skrll STAT_INCR(sync_postwrite);
1154 1.129 skrll }
1155 1.115 skrll return;
1156 1.61 matt }
1157 1.115 skrll
1158 1.74 matt KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
1159 1.74 matt "pre_ops %#x post_ops %#x", pre_ops, post_ops);
1160 1.115 skrll
1161 1.58 matt if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) {
1162 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1163 1.58 matt STAT_INCR(write_bounces);
1164 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1165 1.58 matt /*
1166 1.58 matt * Copy the caller's buffer to the bounce buffer.
1167 1.58 matt */
1168 1.58 matt switch (map->_dm_buftype) {
1169 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1170 1.58 matt memcpy(dataptr, cookie->id_origlinearbuf + offset, len);
1171 1.58 matt break;
1172 1.126 skrll
1173 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1174 1.58 matt m_copydata(cookie->id_origmbuf, offset, len, dataptr);
1175 1.58 matt break;
1176 1.126 skrll
1177 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1178 1.126 skrll _bus_dma_uiomove(dataptr, cookie->id_origuio, len,
1179 1.126 skrll UIO_WRITE);
1180 1.58 matt break;
1181 1.126 skrll
1182 1.58 matt #ifdef DIAGNOSTIC
1183 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1184 1.126 skrll panic("%s:(pre): _BUS_DMA_BUFTYPE_RAW", __func__);
1185 1.58 matt break;
1186 1.58 matt
1187 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1188 1.126 skrll panic("%s(pre): _BUS_DMA_BUFTYPE_INVALID", __func__);
1189 1.58 matt break;
1190 1.58 matt
1191 1.58 matt default:
1192 1.126 skrll panic("%s(pre): map %p: unknown buffer type %d\n",
1193 1.126 skrll __func__, map, map->_dm_buftype);
1194 1.58 matt break;
1195 1.58 matt #endif /* DIAGNOSTIC */
1196 1.58 matt }
1197 1.58 matt }
1198 1.58 matt
1199 1.115 skrll /* Skip cache frobbing if mapping was COHERENT */
1200 1.115 skrll if ((map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
1201 1.125 skrll switch (ops) {
1202 1.125 skrll case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1203 1.129 skrll STAT_INCR(sync_coherent_prereadwrite);
1204 1.125 skrll break;
1205 1.125 skrll
1206 1.125 skrll case BUS_DMASYNC_PREREAD:
1207 1.129 skrll STAT_INCR(sync_coherent_preread);
1208 1.125 skrll break;
1209 1.125 skrll
1210 1.125 skrll case BUS_DMASYNC_PREWRITE:
1211 1.129 skrll STAT_INCR(sync_coherent_prewrite);
1212 1.125 skrll break;
1213 1.125 skrll
1214 1.125 skrll case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
1215 1.129 skrll STAT_INCR(sync_coherent_postreadwrite);
1216 1.125 skrll break;
1217 1.125 skrll
1218 1.125 skrll case BUS_DMASYNC_POSTREAD:
1219 1.129 skrll STAT_INCR(sync_coherent_postread);
1220 1.125 skrll break;
1221 1.125 skrll
1222 1.125 skrll /* BUS_DMASYNC_POSTWRITE was aleady handled as a fastpath */
1223 1.125 skrll }
1224 1.115 skrll /*
1225 1.115 skrll * Drain the write buffer of DMA operators.
1226 1.115 skrll * 1) when cpu->device (prewrite)
1227 1.115 skrll * 2) when device->cpu (postread)
1228 1.115 skrll */
1229 1.115 skrll if ((pre_ops & BUS_DMASYNC_PREWRITE) || (post_ops & BUS_DMASYNC_POSTREAD))
1230 1.75 matt cpu_drain_writebuf();
1231 1.115 skrll
1232 1.115 skrll /*
1233 1.115 skrll * Only thing left to do for COHERENT mapping is copy from bounce
1234 1.115 skrll * in the POSTREAD case.
1235 1.115 skrll */
1236 1.115 skrll if (bouncing && (post_ops & BUS_DMASYNC_POSTREAD))
1237 1.115 skrll goto bounce_it;
1238 1.115 skrll
1239 1.17 thorpej return;
1240 1.17 thorpej }
1241 1.8 thorpej
1242 1.128 jmcneill #if !defined(ARM_MMU_EXTENDED)
1243 1.8 thorpej /*
1244 1.38 scw * If the mapping belongs to a non-kernel vmspace, and the
1245 1.38 scw * vmspace has not been active since the last time a full
1246 1.38 scw * cache flush was performed, we don't need to do anything.
1247 1.8 thorpej */
1248 1.48 yamt if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
1249 1.48 yamt vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
1250 1.8 thorpej return;
1251 1.80 matt #endif
1252 1.8 thorpej
1253 1.58 matt int buftype = map->_dm_buftype;
1254 1.58 matt if (bouncing) {
1255 1.58 matt buftype = _BUS_DMA_BUFTYPE_LINEAR;
1256 1.58 matt }
1257 1.58 matt
1258 1.58 matt switch (buftype) {
1259 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1260 1.116 jmcneill case _BUS_DMA_BUFTYPE_RAW:
1261 1.14 thorpej _bus_dmamap_sync_linear(t, map, offset, len, ops);
1262 1.14 thorpej break;
1263 1.14 thorpej
1264 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1265 1.14 thorpej _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
1266 1.14 thorpej break;
1267 1.14 thorpej
1268 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1269 1.14 thorpej _bus_dmamap_sync_uio(t, map, offset, len, ops);
1270 1.14 thorpej break;
1271 1.14 thorpej
1272 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1273 1.126 skrll panic("%s: _BUS_DMA_BUFTYPE_INVALID", __func__);
1274 1.14 thorpej break;
1275 1.14 thorpej
1276 1.14 thorpej default:
1277 1.126 skrll panic("%s: map %p: unknown buffer type %d\n", __func__, map,
1278 1.126 skrll map->_dm_buftype);
1279 1.8 thorpej }
1280 1.1 chris
1281 1.8 thorpej /* Drain the write buffer. */
1282 1.8 thorpej cpu_drain_writebuf();
1283 1.58 matt
1284 1.76 matt if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
1285 1.58 matt return;
1286 1.58 matt
1287 1.115 skrll bounce_it:
1288 1.115 skrll STAT_INCR(read_bounces);
1289 1.115 skrll
1290 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1291 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1292 1.58 matt /*
1293 1.58 matt * Copy the bounce buffer to the caller's buffer.
1294 1.58 matt */
1295 1.58 matt switch (map->_dm_buftype) {
1296 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1297 1.58 matt memcpy(cookie->id_origlinearbuf + offset, dataptr, len);
1298 1.58 matt break;
1299 1.58 matt
1300 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1301 1.58 matt m_copyback(cookie->id_origmbuf, offset, len, dataptr);
1302 1.58 matt break;
1303 1.58 matt
1304 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1305 1.58 matt _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ);
1306 1.58 matt break;
1307 1.126 skrll
1308 1.58 matt #ifdef DIAGNOSTIC
1309 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1310 1.126 skrll panic("%s(post): _BUS_DMA_BUFTYPE_RAW", __func__);
1311 1.58 matt break;
1312 1.58 matt
1313 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1314 1.126 skrll panic("%s(post): _BUS_DMA_BUFTYPE_INVALID", __func__);
1315 1.58 matt break;
1316 1.58 matt
1317 1.58 matt default:
1318 1.126 skrll panic("%s(post): map %p: unknown buffer type %d\n", __func__,
1319 1.58 matt map, map->_dm_buftype);
1320 1.58 matt break;
1321 1.58 matt #endif
1322 1.58 matt }
1323 1.1 chris }
1324 1.1 chris
1325 1.1 chris /*
1326 1.1 chris * Common function for DMA-safe memory allocation. May be called
1327 1.1 chris * by bus-specific DMA memory allocation functions.
1328 1.1 chris */
1329 1.1 chris
1330 1.1 chris int
1331 1.7 thorpej _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1332 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1333 1.7 thorpej int flags)
1334 1.1 chris {
1335 1.15 thorpej struct arm32_dma_range *dr;
1336 1.37 mycroft int error, i;
1337 1.15 thorpej
1338 1.1 chris #ifdef DEBUG_DMA
1339 1.130 skrll printf("dmamem_alloc t=%p size=%#lx align=%#lx boundary=%#lx "
1340 1.130 skrll "segs=%p nsegs=%#x rsegs=%p flags=%#x\n", t, size, alignment,
1341 1.15 thorpej boundary, segs, nsegs, rsegs, flags);
1342 1.15 thorpej #endif
1343 1.15 thorpej
1344 1.15 thorpej if ((dr = t->_ranges) != NULL) {
1345 1.37 mycroft error = ENOMEM;
1346 1.15 thorpej for (i = 0; i < t->_nranges; i++, dr++) {
1347 1.70 matt if (dr->dr_len == 0
1348 1.70 matt || (dr->dr_flags & _BUS_DMAMAP_NOALLOC))
1349 1.15 thorpej continue;
1350 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment,
1351 1.15 thorpej boundary, segs, nsegs, rsegs, flags,
1352 1.15 thorpej trunc_page(dr->dr_sysbase),
1353 1.15 thorpej trunc_page(dr->dr_sysbase + dr->dr_len));
1354 1.15 thorpej if (error == 0)
1355 1.15 thorpej break;
1356 1.15 thorpej }
1357 1.15 thorpej } else {
1358 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
1359 1.139 skrll segs, nsegs, rsegs, flags, 0UL, ~0UL);
1360 1.15 thorpej }
1361 1.15 thorpej
1362 1.1 chris #ifdef DEBUG_DMA
1363 1.1 chris printf("dmamem_alloc: =%d\n", error);
1364 1.15 thorpej #endif
1365 1.15 thorpej
1366 1.100 skrll return error;
1367 1.1 chris }
1368 1.1 chris
1369 1.1 chris /*
1370 1.1 chris * Common function for freeing DMA-safe memory. May be called by
1371 1.1 chris * bus-specific DMA memory free functions.
1372 1.1 chris */
1373 1.1 chris void
1374 1.7 thorpej _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1375 1.1 chris {
1376 1.1 chris struct vm_page *m;
1377 1.1 chris bus_addr_t addr;
1378 1.1 chris struct pglist mlist;
1379 1.1 chris int curseg;
1380 1.1 chris
1381 1.1 chris #ifdef DEBUG_DMA
1382 1.130 skrll printf("dmamem_free: t=%p segs=%p nsegs=%#x\n", t, segs, nsegs);
1383 1.1 chris #endif /* DEBUG_DMA */
1384 1.1 chris
1385 1.1 chris /*
1386 1.1 chris * Build a list of pages to free back to the VM system.
1387 1.1 chris */
1388 1.1 chris TAILQ_INIT(&mlist);
1389 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1390 1.1 chris for (addr = segs[curseg].ds_addr;
1391 1.1 chris addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1392 1.1 chris addr += PAGE_SIZE) {
1393 1.1 chris m = PHYS_TO_VM_PAGE(addr);
1394 1.52 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1395 1.1 chris }
1396 1.1 chris }
1397 1.1 chris uvm_pglistfree(&mlist);
1398 1.1 chris }
1399 1.1 chris
1400 1.1 chris /*
1401 1.1 chris * Common function for mapping DMA-safe memory. May be called by
1402 1.1 chris * bus-specific DMA memory map functions.
1403 1.1 chris */
1404 1.1 chris int
1405 1.7 thorpej _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1406 1.50 christos size_t size, void **kvap, int flags)
1407 1.1 chris {
1408 1.11 thorpej vaddr_t va;
1409 1.57 matt paddr_t pa;
1410 1.1 chris int curseg;
1411 1.65 matt const uvm_flag_t kmflags = UVM_KMF_VAONLY
1412 1.65 matt | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0);
1413 1.65 matt vsize_t align = 0;
1414 1.1 chris
1415 1.1 chris #ifdef DEBUG_DMA
1416 1.130 skrll printf("dmamem_map: t=%p segs=%p nsegs=%#x size=%#lx flags=%#x\n", t,
1417 1.3 rearnsha segs, nsegs, (unsigned long)size, flags);
1418 1.1 chris #endif /* DEBUG_DMA */
1419 1.1 chris
1420 1.62 matt #ifdef PMAP_MAP_POOLPAGE
1421 1.62 matt /*
1422 1.62 matt * If all of memory is mapped, and we are mapping a single physically
1423 1.62 matt * contiguous area then this area is already mapped. Let's see if we
1424 1.62 matt * avoid having a separate mapping for it.
1425 1.62 matt */
1426 1.118 jmcneill if (nsegs == 1 && (flags & BUS_DMA_PREFETCHABLE) == 0) {
1427 1.62 matt /*
1428 1.62 matt * If this is a non-COHERENT mapping, then the existing kernel
1429 1.62 matt * mapping is already compatible with it.
1430 1.62 matt */
1431 1.68 matt bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0;
1432 1.68 matt pa = segs[0].ds_addr;
1433 1.68 matt
1434 1.62 matt /*
1435 1.68 matt * This is a COHERENT mapping which, unless this address is in
1436 1.62 matt * a COHERENT dma range, will not be compatible.
1437 1.62 matt */
1438 1.62 matt if (t->_ranges != NULL) {
1439 1.62 matt const struct arm32_dma_range * const dr =
1440 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1441 1.71 matt if (dr != NULL
1442 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1443 1.71 matt direct_mapable = true;
1444 1.68 matt }
1445 1.68 matt }
1446 1.68 matt
1447 1.87 matt #ifdef PMAP_NEED_ALLOC_POOLPAGE
1448 1.87 matt /*
1449 1.87 matt * The page can only be direct mapped if was allocated out
1450 1.95 skrll * of the arm poolpage vm freelist.
1451 1.87 matt */
1452 1.97 cherry uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
1453 1.97 cherry KASSERT(uvm_physseg_valid_p(upm));
1454 1.87 matt if (direct_mapable) {
1455 1.87 matt direct_mapable =
1456 1.97 cherry (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
1457 1.87 matt }
1458 1.87 matt #endif
1459 1.87 matt
1460 1.68 matt if (direct_mapable) {
1461 1.68 matt *kvap = (void *)PMAP_MAP_POOLPAGE(pa);
1462 1.64 matt #ifdef DEBUG_DMA
1463 1.68 matt printf("dmamem_map: =%p\n", *kvap);
1464 1.64 matt #endif /* DEBUG_DMA */
1465 1.68 matt return 0;
1466 1.62 matt }
1467 1.62 matt }
1468 1.62 matt #endif
1469 1.62 matt
1470 1.1 chris size = round_page(size);
1471 1.107 ryo
1472 1.107 ryo #ifdef PMAP_MAPSIZE1
1473 1.107 ryo if (size >= PMAP_MAPSIZE1)
1474 1.107 ryo align = PMAP_MAPSIZE1;
1475 1.107 ryo
1476 1.107 ryo #ifdef PMAP_MAPSIZE2
1477 1.107 ryo
1478 1.107 ryo #if PMAP_MAPSIZE1 > PMAP_MAPSIZE2
1479 1.107 ryo #error PMAP_MAPSIZE1 must be smaller than PMAP_MAPSIZE2
1480 1.107 ryo #endif
1481 1.107 ryo
1482 1.107 ryo if (size >= PMAP_MAPSIZE2)
1483 1.107 ryo align = PMAP_MAPSIZE2;
1484 1.107 ryo
1485 1.107 ryo #ifdef PMAP_MAPSIZE3
1486 1.107 ryo
1487 1.107 ryo #if PMAP_MAPSIZE2 > PMAP_MAPSIZE3
1488 1.107 ryo #error PMAP_MAPSIZE2 must be smaller than PMAP_MAPSIZE3
1489 1.107 ryo #endif
1490 1.107 ryo
1491 1.107 ryo if (size >= PMAP_MAPSIZE3)
1492 1.107 ryo align = PMAP_MAPSIZE3;
1493 1.107 ryo #endif
1494 1.107 ryo #endif
1495 1.107 ryo #endif
1496 1.65 matt
1497 1.65 matt va = uvm_km_alloc(kernel_map, size, align, kmflags);
1498 1.65 matt if (__predict_false(va == 0 && align > 0)) {
1499 1.65 matt align = 0;
1500 1.65 matt va = uvm_km_alloc(kernel_map, size, 0, kmflags);
1501 1.65 matt }
1502 1.1 chris
1503 1.1 chris if (va == 0)
1504 1.100 skrll return ENOMEM;
1505 1.1 chris
1506 1.50 christos *kvap = (void *)va;
1507 1.1 chris
1508 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1509 1.57 matt for (pa = segs[curseg].ds_addr;
1510 1.57 matt pa < (segs[curseg].ds_addr + segs[curseg].ds_len);
1511 1.57 matt pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1512 1.68 matt bool uncached = (flags & BUS_DMA_COHERENT);
1513 1.117 jmcneill bool prefetchable = (flags & BUS_DMA_PREFETCHABLE);
1514 1.1 chris #ifdef DEBUG_DMA
1515 1.131 skrll printf("wiring P%#lx to V%#lx\n", pa, va);
1516 1.1 chris #endif /* DEBUG_DMA */
1517 1.1 chris if (size == 0)
1518 1.1 chris panic("_bus_dmamem_map: size botch");
1519 1.68 matt
1520 1.68 matt const struct arm32_dma_range * const dr =
1521 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1522 1.68 matt /*
1523 1.68 matt * If this dma region is coherent then there is
1524 1.68 matt * no need for an uncached mapping.
1525 1.68 matt */
1526 1.71 matt if (dr != NULL
1527 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1528 1.71 matt uncached = false;
1529 1.68 matt }
1530 1.71 matt
1531 1.117 jmcneill u_int pmap_flags = PMAP_WIRED;
1532 1.117 jmcneill if (prefetchable)
1533 1.117 jmcneill pmap_flags |= PMAP_WRITE_COMBINE;
1534 1.117 jmcneill else if (uncached)
1535 1.117 jmcneill pmap_flags |= PMAP_NOCACHE;
1536 1.117 jmcneill
1537 1.81 matt pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE,
1538 1.117 jmcneill pmap_flags);
1539 1.1 chris }
1540 1.1 chris }
1541 1.2 chris pmap_update(pmap_kernel());
1542 1.1 chris #ifdef DEBUG_DMA
1543 1.1 chris printf("dmamem_map: =%p\n", *kvap);
1544 1.1 chris #endif /* DEBUG_DMA */
1545 1.100 skrll return 0;
1546 1.1 chris }
1547 1.1 chris
1548 1.1 chris /*
1549 1.1 chris * Common function for unmapping DMA-safe memory. May be called by
1550 1.1 chris * bus-specific DMA memory unmapping functions.
1551 1.1 chris */
1552 1.1 chris void
1553 1.50 christos _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1554 1.1 chris {
1555 1.1 chris
1556 1.1 chris #ifdef DEBUG_DMA
1557 1.130 skrll printf("dmamem_unmap: t=%p kva=%p size=%#zx\n", t, kva, size);
1558 1.1 chris #endif /* DEBUG_DMA */
1559 1.79 matt KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0,
1560 1.83 christos "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK));
1561 1.1 chris
1562 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1563 1.84 matt /*
1564 1.88 snj * Check to see if this used direct mapped memory. Get its physical
1565 1.84 matt * address and try to map it. If the resultant matches the kva, then
1566 1.99 skrll * it was and so we can just return since we have nothing to free up.
1567 1.84 matt */
1568 1.84 matt paddr_t pa;
1569 1.84 matt vaddr_t va;
1570 1.84 matt (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa);
1571 1.84 matt if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva)
1572 1.84 matt return;
1573 1.84 matt #endif
1574 1.84 matt
1575 1.1 chris size = round_page(size);
1576 1.65 matt pmap_kremove((vaddr_t)kva, size);
1577 1.44 yamt pmap_update(pmap_kernel());
1578 1.44 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1579 1.1 chris }
1580 1.1 chris
1581 1.1 chris /*
1582 1.137 andvar * Common function for mmap(2)'ing DMA-safe memory. May be called by
1583 1.1 chris * bus-specific DMA mmap(2)'ing functions.
1584 1.1 chris */
1585 1.1 chris paddr_t
1586 1.7 thorpej _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1587 1.7 thorpej off_t off, int prot, int flags)
1588 1.1 chris {
1589 1.73 macallan paddr_t map_flags;
1590 1.1 chris int i;
1591 1.1 chris
1592 1.1 chris for (i = 0; i < nsegs; i++) {
1593 1.79 matt KASSERTMSG((off & PAGE_MASK) == 0,
1594 1.111 christos "off %#jx (%#x)", (uintmax_t)off, (int)off & PAGE_MASK);
1595 1.79 matt KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0,
1596 1.79 matt "ds_addr %#lx (%#x)", segs[i].ds_addr,
1597 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1598 1.79 matt KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0,
1599 1.79 matt "ds_len %#lx (%#x)", segs[i].ds_addr,
1600 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1601 1.1 chris if (off >= segs[i].ds_len) {
1602 1.1 chris off -= segs[i].ds_len;
1603 1.1 chris continue;
1604 1.1 chris }
1605 1.1 chris
1606 1.73 macallan map_flags = 0;
1607 1.73 macallan if (flags & BUS_DMA_PREFETCHABLE)
1608 1.107 ryo map_flags |= ARM_MMAP_WRITECOMBINE;
1609 1.73 macallan
1610 1.100 skrll return arm_btop((u_long)segs[i].ds_addr + off) | map_flags;
1611 1.95 skrll
1612 1.1 chris }
1613 1.1 chris
1614 1.1 chris /* Page not found. */
1615 1.100 skrll return -1;
1616 1.1 chris }
1617 1.1 chris
1618 1.1 chris /**********************************************************************
1619 1.1 chris * DMA utility functions
1620 1.1 chris **********************************************************************/
1621 1.1 chris
1622 1.1 chris /*
1623 1.1 chris * Utility function to load a linear buffer. lastaddrp holds state
1624 1.1 chris * between invocations (for multiple-buffer loads). segp contains
1625 1.136 andvar * the starting segment on entrance, and the ending segment on exit.
1626 1.1 chris * first indicates if this is the first invocation of this function.
1627 1.1 chris */
1628 1.1 chris int
1629 1.7 thorpej _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1630 1.48 yamt bus_size_t buflen, struct vmspace *vm, int flags)
1631 1.1 chris {
1632 1.1 chris bus_size_t sgsize;
1633 1.41 thorpej bus_addr_t curaddr;
1634 1.11 thorpej vaddr_t vaddr = (vaddr_t)buf;
1635 1.41 thorpej int error;
1636 1.1 chris pmap_t pmap;
1637 1.1 chris
1638 1.1 chris #ifdef DEBUG_DMA
1639 1.138 andvar printf("_bus_dmamap_load_buffer(buf=%p, len=%#lx, flags=%#x)\n",
1640 1.40 scw buf, buflen, flags);
1641 1.1 chris #endif /* DEBUG_DMA */
1642 1.1 chris
1643 1.48 yamt pmap = vm_map_pmap(&vm->vm_map);
1644 1.1 chris
1645 1.41 thorpej while (buflen > 0) {
1646 1.1 chris /*
1647 1.1 chris * Get the physical address for this segment.
1648 1.17 thorpej *
1649 1.1 chris */
1650 1.61 matt bool coherent;
1651 1.132 skrll bool ok __diagused;
1652 1.132 skrll ok = pmap_extract_coherency(pmap, vaddr, &curaddr, &coherent);
1653 1.132 skrll
1654 1.132 skrll /*
1655 1.132 skrll * trying to bus_dmamap_load an unmapped buffer is a
1656 1.132 skrll * programming error.
1657 1.132 skrll */
1658 1.132 skrll KASSERT(ok);
1659 1.107 ryo
1660 1.86 matt KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK),
1661 1.86 matt "va %#lx curaddr %#lx", vaddr, curaddr);
1662 1.1 chris
1663 1.1 chris /*
1664 1.1 chris * Compute the segment size, and adjust counts.
1665 1.1 chris */
1666 1.27 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1667 1.1 chris if (buflen < sgsize)
1668 1.1 chris sgsize = buflen;
1669 1.1 chris
1670 1.61 matt error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize,
1671 1.61 matt coherent);
1672 1.41 thorpej if (error)
1673 1.100 skrll return error;
1674 1.1 chris
1675 1.1 chris vaddr += sgsize;
1676 1.1 chris buflen -= sgsize;
1677 1.1 chris }
1678 1.1 chris
1679 1.100 skrll return 0;
1680 1.1 chris }
1681 1.1 chris
1682 1.1 chris /*
1683 1.1 chris * Allocate physical memory from the given physical address range.
1684 1.1 chris * Called by DMA-safe memory allocation methods.
1685 1.1 chris */
1686 1.1 chris int
1687 1.7 thorpej _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1688 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1689 1.11 thorpej int flags, paddr_t low, paddr_t high)
1690 1.1 chris {
1691 1.11 thorpej paddr_t curaddr, lastaddr;
1692 1.1 chris struct vm_page *m;
1693 1.1 chris struct pglist mlist;
1694 1.1 chris int curseg, error;
1695 1.1 chris
1696 1.101 skrll KASSERTMSG(boundary == 0 || (boundary & (boundary - 1)) == 0,
1697 1.76 matt "invalid boundary %#lx", boundary);
1698 1.76 matt
1699 1.1 chris #ifdef DEBUG_DMA
1700 1.130 skrll printf("alloc_range: t=%p size=%#lx align=%#lx boundary=%#lx segs=%p nsegs=%#x rsegs=%p flags=%#x lo=%#lx hi=%#lx\n",
1701 1.1 chris t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
1702 1.1 chris #endif /* DEBUG_DMA */
1703 1.1 chris
1704 1.1 chris /* Always round the size. */
1705 1.1 chris size = round_page(size);
1706 1.1 chris
1707 1.1 chris /*
1708 1.76 matt * We accept boundaries < size, splitting in multiple segments
1709 1.76 matt * if needed. uvm_pglistalloc does not, so compute an appropriate
1710 1.76 matt * boundary: next power of 2 >= size
1711 1.76 matt */
1712 1.76 matt bus_size_t uboundary = boundary;
1713 1.76 matt if (uboundary <= PAGE_SIZE) {
1714 1.76 matt uboundary = 0;
1715 1.76 matt } else {
1716 1.76 matt while (uboundary < size) {
1717 1.76 matt uboundary <<= 1;
1718 1.76 matt }
1719 1.76 matt }
1720 1.76 matt
1721 1.76 matt /*
1722 1.1 chris * Allocate pages from the VM system.
1723 1.1 chris */
1724 1.78 matt error = uvm_pglistalloc(size, low, high, alignment, uboundary,
1725 1.1 chris &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1726 1.1 chris if (error)
1727 1.100 skrll return error;
1728 1.1 chris
1729 1.1 chris /*
1730 1.1 chris * Compute the location, size, and number of segments actually
1731 1.1 chris * returned by the VM code.
1732 1.1 chris */
1733 1.42 chris m = TAILQ_FIRST(&mlist);
1734 1.1 chris curseg = 0;
1735 1.133 jmcneill lastaddr = segs[curseg].ds_addr = segs[curseg]._ds_paddr =
1736 1.133 jmcneill VM_PAGE_TO_PHYS(m);
1737 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1738 1.1 chris #ifdef DEBUG_DMA
1739 1.130 skrll printf("alloc: page %#lx\n", lastaddr);
1740 1.1 chris #endif /* DEBUG_DMA */
1741 1.52 ad m = TAILQ_NEXT(m, pageq.queue);
1742 1.1 chris
1743 1.52 ad for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1744 1.1 chris curaddr = VM_PAGE_TO_PHYS(m);
1745 1.76 matt KASSERTMSG(low <= curaddr && curaddr < high,
1746 1.76 matt "uvm_pglistalloc returned non-sensicaladdress %#lx "
1747 1.76 matt "(low=%#lx, high=%#lx\n", curaddr, low, high);
1748 1.1 chris #ifdef DEBUG_DMA
1749 1.130 skrll printf("alloc: page %#lx\n", curaddr);
1750 1.1 chris #endif /* DEBUG_DMA */
1751 1.76 matt if (curaddr == lastaddr + PAGE_SIZE
1752 1.76 matt && (lastaddr & boundary) == (curaddr & boundary))
1753 1.1 chris segs[curseg].ds_len += PAGE_SIZE;
1754 1.1 chris else {
1755 1.1 chris curseg++;
1756 1.76 matt if (curseg >= nsegs) {
1757 1.76 matt uvm_pglistfree(&mlist);
1758 1.76 matt return EFBIG;
1759 1.76 matt }
1760 1.1 chris segs[curseg].ds_addr = curaddr;
1761 1.133 jmcneill segs[curseg]._ds_paddr = curaddr;
1762 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1763 1.1 chris }
1764 1.1 chris lastaddr = curaddr;
1765 1.1 chris }
1766 1.1 chris
1767 1.1 chris *rsegs = curseg + 1;
1768 1.1 chris
1769 1.100 skrll return 0;
1770 1.15 thorpej }
1771 1.15 thorpej
1772 1.15 thorpej /*
1773 1.15 thorpej * Check if a memory region intersects with a DMA range, and return the
1774 1.15 thorpej * page-rounded intersection if it does.
1775 1.15 thorpej */
1776 1.15 thorpej int
1777 1.15 thorpej arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1778 1.15 thorpej paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1779 1.15 thorpej {
1780 1.15 thorpej struct arm32_dma_range *dr;
1781 1.15 thorpej int i;
1782 1.15 thorpej
1783 1.15 thorpej if (ranges == NULL)
1784 1.100 skrll return 0;
1785 1.15 thorpej
1786 1.15 thorpej for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1787 1.15 thorpej if (dr->dr_sysbase <= pa &&
1788 1.15 thorpej pa < (dr->dr_sysbase + dr->dr_len)) {
1789 1.15 thorpej /*
1790 1.15 thorpej * Beginning of region intersects with this range.
1791 1.15 thorpej */
1792 1.15 thorpej *pap = trunc_page(pa);
1793 1.112 riastrad *sizep = round_page(uimin(pa + size,
1794 1.15 thorpej dr->dr_sysbase + dr->dr_len) - pa);
1795 1.100 skrll return 1;
1796 1.15 thorpej }
1797 1.15 thorpej if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1798 1.15 thorpej /*
1799 1.15 thorpej * End of region intersects with this range.
1800 1.15 thorpej */
1801 1.15 thorpej *pap = trunc_page(dr->dr_sysbase);
1802 1.112 riastrad *sizep = round_page(uimin((pa + size) - dr->dr_sysbase,
1803 1.15 thorpej dr->dr_len));
1804 1.100 skrll return 1;
1805 1.15 thorpej }
1806 1.15 thorpej }
1807 1.15 thorpej
1808 1.15 thorpej /* No intersection found. */
1809 1.100 skrll return 0;
1810 1.1 chris }
1811 1.58 matt
1812 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1813 1.58 matt static int
1814 1.58 matt _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1815 1.58 matt bus_size_t size, int flags)
1816 1.58 matt {
1817 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1818 1.58 matt int error = 0;
1819 1.58 matt
1820 1.79 matt KASSERT(cookie != NULL);
1821 1.58 matt
1822 1.58 matt cookie->id_bouncebuflen = round_page(size);
1823 1.58 matt error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1824 1.58 matt PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1825 1.58 matt map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1826 1.76 matt if (error == 0) {
1827 1.76 matt error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1828 1.76 matt cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1829 1.76 matt (void **)&cookie->id_bouncebuf, flags);
1830 1.76 matt if (error) {
1831 1.76 matt _bus_dmamem_free(t, cookie->id_bouncesegs,
1832 1.76 matt cookie->id_nbouncesegs);
1833 1.76 matt cookie->id_bouncebuflen = 0;
1834 1.76 matt cookie->id_nbouncesegs = 0;
1835 1.76 matt } else {
1836 1.76 matt cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1837 1.76 matt }
1838 1.76 matt } else {
1839 1.58 matt cookie->id_bouncebuflen = 0;
1840 1.58 matt cookie->id_nbouncesegs = 0;
1841 1.58 matt }
1842 1.58 matt
1843 1.100 skrll return error;
1844 1.58 matt }
1845 1.58 matt
1846 1.58 matt static void
1847 1.58 matt _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1848 1.58 matt {
1849 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1850 1.58 matt
1851 1.79 matt KASSERT(cookie != NULL);
1852 1.58 matt
1853 1.58 matt _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1854 1.79 matt _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1855 1.58 matt cookie->id_bouncebuflen = 0;
1856 1.58 matt cookie->id_nbouncesegs = 0;
1857 1.58 matt cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1858 1.58 matt }
1859 1.115 skrll #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1860 1.58 matt
1861 1.58 matt /*
1862 1.58 matt * This function does the same as uiomove, but takes an explicit
1863 1.58 matt * direction, and does not update the uio structure.
1864 1.58 matt */
1865 1.58 matt static int
1866 1.58 matt _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1867 1.58 matt {
1868 1.58 matt struct iovec *iov;
1869 1.58 matt int error;
1870 1.58 matt struct vmspace *vm;
1871 1.58 matt char *cp;
1872 1.58 matt size_t resid, cnt;
1873 1.58 matt int i;
1874 1.58 matt
1875 1.58 matt iov = uio->uio_iov;
1876 1.58 matt vm = uio->uio_vmspace;
1877 1.58 matt cp = buf;
1878 1.58 matt resid = n;
1879 1.58 matt
1880 1.58 matt for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1881 1.58 matt iov = &uio->uio_iov[i];
1882 1.58 matt if (iov->iov_len == 0)
1883 1.58 matt continue;
1884 1.58 matt cnt = MIN(resid, iov->iov_len);
1885 1.58 matt
1886 1.121 ad if (!VMSPACE_IS_KERNEL_P(vm)) {
1887 1.121 ad preempt_point();
1888 1.58 matt }
1889 1.58 matt if (direction == UIO_READ) {
1890 1.58 matt error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1891 1.58 matt } else {
1892 1.58 matt error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1893 1.58 matt }
1894 1.58 matt if (error)
1895 1.100 skrll return error;
1896 1.58 matt cp += cnt;
1897 1.58 matt resid -= cnt;
1898 1.58 matt }
1899 1.100 skrll return 0;
1900 1.58 matt }
1901 1.58 matt
1902 1.58 matt int
1903 1.58 matt _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1904 1.58 matt bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1905 1.58 matt {
1906 1.134 skrll #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1907 1.123 skrll if (min_addr >= max_addr)
1908 1.123 skrll return EOPNOTSUPP;
1909 1.58 matt
1910 1.58 matt struct arm32_dma_range *dr;
1911 1.124 skrll bool psubset = true;
1912 1.58 matt size_t nranges = 0;
1913 1.58 matt size_t i;
1914 1.58 matt for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) {
1915 1.123 skrll /*
1916 1.124 skrll * If the new {min,max}_addr are narrower than any of the
1917 1.124 skrll * ranges in the parent tag then we need a new tag;
1918 1.124 skrll * otherwise the parent tag is a subset of the new
1919 1.124 skrll * range and can continue to be used.
1920 1.123 skrll */
1921 1.124 skrll if (min_addr > dr->dr_sysbase
1922 1.124 skrll || max_addr < dr->dr_sysbase + dr->dr_len - 1) {
1923 1.124 skrll psubset = false;
1924 1.58 matt }
1925 1.58 matt if (min_addr <= dr->dr_sysbase + dr->dr_len
1926 1.58 matt && max_addr >= dr->dr_sysbase) {
1927 1.58 matt nranges++;
1928 1.58 matt }
1929 1.58 matt }
1930 1.124 skrll if (nranges == 0) {
1931 1.124 skrll nranges = 1;
1932 1.124 skrll psubset = false;
1933 1.124 skrll }
1934 1.124 skrll if (psubset) {
1935 1.58 matt *newtag = tag;
1936 1.58 matt /* if the tag must be freed, add a reference */
1937 1.58 matt if (tag->_tag_needs_free)
1938 1.58 matt (tag->_tag_needs_free)++;
1939 1.58 matt return 0;
1940 1.58 matt }
1941 1.58 matt
1942 1.81 matt const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr);
1943 1.81 matt if ((*newtag = kmem_intr_zalloc(tagsize,
1944 1.81 matt (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1945 1.58 matt return ENOMEM;
1946 1.58 matt
1947 1.58 matt dr = (void *)(*newtag + 1);
1948 1.58 matt **newtag = *tag;
1949 1.58 matt (*newtag)->_tag_needs_free = 1;
1950 1.58 matt (*newtag)->_ranges = dr;
1951 1.58 matt (*newtag)->_nranges = nranges;
1952 1.58 matt
1953 1.58 matt if (tag->_ranges == NULL) {
1954 1.58 matt dr->dr_sysbase = min_addr;
1955 1.58 matt dr->dr_busbase = min_addr;
1956 1.58 matt dr->dr_len = max_addr + 1 - min_addr;
1957 1.58 matt } else {
1958 1.123 skrll struct arm32_dma_range *pdr;
1959 1.123 skrll
1960 1.123 skrll for (i = 0, pdr = tag->_ranges; i < tag->_nranges; i++, pdr++) {
1961 1.123 skrll KASSERT(nranges != 0);
1962 1.123 skrll
1963 1.123 skrll if (min_addr > pdr->dr_sysbase + pdr->dr_len
1964 1.123 skrll || max_addr < pdr->dr_sysbase) {
1965 1.123 skrll /*
1966 1.123 skrll * this range doesn't overlap with new limits,
1967 1.123 skrll * so skip.
1968 1.123 skrll */
1969 1.58 matt continue;
1970 1.123 skrll }
1971 1.123 skrll /*
1972 1.123 skrll * Copy the range and adjust to fit within the new
1973 1.123 skrll * limits
1974 1.123 skrll */
1975 1.123 skrll dr[0] = pdr[0];
1976 1.58 matt if (dr->dr_sysbase < min_addr) {
1977 1.58 matt psize_t diff = min_addr - dr->dr_sysbase;
1978 1.58 matt dr->dr_busbase += diff;
1979 1.58 matt dr->dr_len -= diff;
1980 1.58 matt dr->dr_sysbase += diff;
1981 1.58 matt }
1982 1.123 skrll if (max_addr <= dr->dr_sysbase + dr->dr_len - 1) {
1983 1.58 matt dr->dr_len = max_addr + 1 - dr->dr_sysbase;
1984 1.58 matt }
1985 1.58 matt dr++;
1986 1.123 skrll nranges--;
1987 1.58 matt }
1988 1.58 matt }
1989 1.58 matt
1990 1.58 matt return 0;
1991 1.58 matt #else
1992 1.58 matt return EOPNOTSUPP;
1993 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1994 1.58 matt }
1995 1.58 matt
1996 1.58 matt void
1997 1.58 matt _bus_dmatag_destroy(bus_dma_tag_t tag)
1998 1.58 matt {
1999 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
2000 1.58 matt switch (tag->_tag_needs_free) {
2001 1.58 matt case 0:
2002 1.81 matt break; /* not allocated with kmem */
2003 1.81 matt case 1: {
2004 1.81 matt const size_t tagsize = sizeof(*tag)
2005 1.81 matt + tag->_nranges * sizeof(*tag->_ranges);
2006 1.81 matt kmem_intr_free(tag, tagsize); /* last reference to tag */
2007 1.58 matt break;
2008 1.81 matt }
2009 1.58 matt default:
2010 1.58 matt (tag->_tag_needs_free)--; /* one less reference */
2011 1.58 matt }
2012 1.58 matt #endif
2013 1.58 matt }
2014