bus_dma.c revision 1.106 1 1.106 skrll /* $NetBSD: bus_dma.c,v 1.106 2018/03/04 08:04:59 skrll Exp $ */
2 1.1 chris
3 1.1 chris /*-
4 1.1 chris * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 chris * All rights reserved.
6 1.1 chris *
7 1.1 chris * This code is derived from software contributed to The NetBSD Foundation
8 1.1 chris * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 chris * NASA Ames Research Center.
10 1.1 chris *
11 1.1 chris * Redistribution and use in source and binary forms, with or without
12 1.1 chris * modification, are permitted provided that the following conditions
13 1.1 chris * are met:
14 1.1 chris * 1. Redistributions of source code must retain the above copyright
15 1.1 chris * notice, this list of conditions and the following disclaimer.
16 1.1 chris * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 chris * notice, this list of conditions and the following disclaimer in the
18 1.1 chris * documentation and/or other materials provided with the distribution.
19 1.1 chris *
20 1.1 chris * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 chris * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 chris * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 chris * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 chris * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 chris * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 chris * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 chris * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 chris * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 chris * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 chris * POSSIBILITY OF SUCH DAMAGE.
31 1.1 chris */
32 1.33 lukem
33 1.35 rearnsha #define _ARM32_BUS_DMA_PRIVATE
34 1.35 rearnsha
35 1.81 matt #include "opt_arm_bus_space.h"
36 1.81 matt
37 1.33 lukem #include <sys/cdefs.h>
38 1.106 skrll __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.106 2018/03/04 08:04:59 skrll Exp $");
39 1.1 chris
40 1.1 chris #include <sys/param.h>
41 1.84 matt #include <sys/bus.h>
42 1.84 matt #include <sys/cpu.h>
43 1.81 matt #include <sys/kmem.h>
44 1.1 chris #include <sys/mbuf.h>
45 1.1 chris
46 1.53 uebayasi #include <uvm/uvm.h>
47 1.1 chris
48 1.84 matt #include <arm/cpufunc.h>
49 1.4 thorpej
50 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
51 1.84 matt #include <dev/mm.h>
52 1.84 matt #endif
53 1.1 chris
54 1.76 matt #ifdef BUSDMA_COUNTERS
55 1.58 matt static struct evcnt bus_dma_creates =
56 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
57 1.58 matt static struct evcnt bus_dma_bounced_creates =
58 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
59 1.58 matt static struct evcnt bus_dma_loads =
60 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
61 1.58 matt static struct evcnt bus_dma_bounced_loads =
62 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
63 1.81 matt static struct evcnt bus_dma_coherent_loads =
64 1.81 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads");
65 1.58 matt static struct evcnt bus_dma_read_bounces =
66 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
67 1.58 matt static struct evcnt bus_dma_write_bounces =
68 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
69 1.58 matt static struct evcnt bus_dma_bounced_unloads =
70 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
71 1.58 matt static struct evcnt bus_dma_unloads =
72 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
73 1.58 matt static struct evcnt bus_dma_bounced_destroys =
74 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
75 1.58 matt static struct evcnt bus_dma_destroys =
76 1.58 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
77 1.95 skrll static struct evcnt bus_dma_sync_prereadwrite =
78 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
79 1.76 matt static struct evcnt bus_dma_sync_preread_begin =
80 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
81 1.76 matt static struct evcnt bus_dma_sync_preread =
82 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
83 1.76 matt static struct evcnt bus_dma_sync_preread_tail =
84 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
85 1.95 skrll static struct evcnt bus_dma_sync_prewrite =
86 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
87 1.95 skrll static struct evcnt bus_dma_sync_postread =
88 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
89 1.95 skrll static struct evcnt bus_dma_sync_postreadwrite =
90 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
91 1.95 skrll static struct evcnt bus_dma_sync_postwrite =
92 1.76 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
93 1.58 matt
94 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_creates);
95 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
96 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_loads);
97 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
98 1.81 matt EVCNT_ATTACH_STATIC(bus_dma_coherent_loads);
99 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
100 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
101 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_unloads);
102 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
103 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_destroys);
104 1.58 matt EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
105 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
106 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
107 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
108 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
109 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
110 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
111 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
112 1.76 matt EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
113 1.58 matt
114 1.58 matt #define STAT_INCR(x) (bus_dma_ ## x.ev_count++)
115 1.76 matt #else
116 1.76 matt #define STAT_INCR(x) /*(bus_dma_ ## x.ev_count++)*/
117 1.76 matt #endif
118 1.58 matt
119 1.7 thorpej int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
120 1.48 yamt bus_size_t, struct vmspace *, int);
121 1.1 chris
122 1.1 chris /*
123 1.19 briggs * Check to see if the specified page is in an allowed DMA range.
124 1.19 briggs */
125 1.105 skrll static inline struct arm32_dma_range *
126 1.59 matt _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges,
127 1.19 briggs bus_addr_t curaddr)
128 1.19 briggs {
129 1.19 briggs struct arm32_dma_range *dr;
130 1.19 briggs int i;
131 1.19 briggs
132 1.19 briggs for (i = 0, dr = ranges; i < nranges; i++, dr++) {
133 1.19 briggs if (curaddr >= dr->dr_sysbase &&
134 1.82 skrll curaddr < (dr->dr_sysbase + dr->dr_len))
135 1.100 skrll return dr;
136 1.19 briggs }
137 1.19 briggs
138 1.100 skrll return NULL;
139 1.19 briggs }
140 1.19 briggs
141 1.19 briggs /*
142 1.59 matt * Check to see if the specified busaddr is in an allowed DMA range.
143 1.59 matt */
144 1.59 matt static inline paddr_t
145 1.59 matt _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr)
146 1.59 matt {
147 1.59 matt struct arm32_dma_range *dr;
148 1.59 matt u_int i;
149 1.59 matt
150 1.59 matt if (t->_nranges == 0)
151 1.59 matt return curaddr;
152 1.59 matt
153 1.59 matt for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) {
154 1.59 matt if (dr->dr_busbase <= curaddr
155 1.82 skrll && curaddr < dr->dr_busbase + dr->dr_len)
156 1.59 matt return curaddr - dr->dr_busbase + dr->dr_sysbase;
157 1.59 matt }
158 1.59 matt panic("%s: curaddr %#lx not in range", __func__, curaddr);
159 1.59 matt }
160 1.59 matt
161 1.59 matt /*
162 1.41 thorpej * Common function to load the specified physical address into the
163 1.41 thorpej * DMA map, coalescing segments and boundary checking as necessary.
164 1.41 thorpej */
165 1.41 thorpej static int
166 1.41 thorpej _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
167 1.61 matt bus_addr_t paddr, bus_size_t size, bool coherent)
168 1.41 thorpej {
169 1.41 thorpej bus_dma_segment_t * const segs = map->dm_segs;
170 1.41 thorpej int nseg = map->dm_nsegs;
171 1.58 matt bus_addr_t lastaddr;
172 1.41 thorpej bus_addr_t bmask = ~(map->_dm_boundary - 1);
173 1.41 thorpej bus_addr_t curaddr;
174 1.41 thorpej bus_size_t sgsize;
175 1.61 matt uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0;
176 1.41 thorpej
177 1.41 thorpej if (nseg > 0)
178 1.101 skrll lastaddr = segs[nseg - 1].ds_addr + segs[nseg - 1].ds_len;
179 1.58 matt else
180 1.58 matt lastaddr = 0xdead;
181 1.95 skrll
182 1.41 thorpej again:
183 1.41 thorpej sgsize = size;
184 1.41 thorpej
185 1.41 thorpej /* Make sure we're in an allowed DMA range. */
186 1.41 thorpej if (t->_ranges != NULL) {
187 1.41 thorpej /* XXX cache last result? */
188 1.41 thorpej const struct arm32_dma_range * const dr =
189 1.59 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr);
190 1.41 thorpej if (dr == NULL)
191 1.100 skrll return EINVAL;
192 1.61 matt
193 1.61 matt /*
194 1.61 matt * If this region is coherent, mark the segment as coherent.
195 1.61 matt */
196 1.61 matt _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT;
197 1.72 skrll
198 1.41 thorpej /*
199 1.41 thorpej * In a valid DMA range. Translate the physical
200 1.41 thorpej * memory address to an address in the DMA window.
201 1.41 thorpej */
202 1.41 thorpej curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
203 1.72 skrll #if 0
204 1.72 skrll printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n",
205 1.72 skrll t, paddr, dr->dr_sysbase, dr->dr_busbase,
206 1.72 skrll dr->dr_len, dr->dr_flags, _ds_flags, curaddr);
207 1.72 skrll #endif
208 1.41 thorpej } else
209 1.41 thorpej curaddr = paddr;
210 1.41 thorpej
211 1.41 thorpej /*
212 1.41 thorpej * Make sure we don't cross any boundaries.
213 1.41 thorpej */
214 1.41 thorpej if (map->_dm_boundary > 0) {
215 1.41 thorpej bus_addr_t baddr; /* next boundary address */
216 1.41 thorpej
217 1.41 thorpej baddr = (curaddr + map->_dm_boundary) & bmask;
218 1.41 thorpej if (sgsize > (baddr - curaddr))
219 1.41 thorpej sgsize = (baddr - curaddr);
220 1.41 thorpej }
221 1.41 thorpej
222 1.41 thorpej /*
223 1.41 thorpej * Insert chunk into a segment, coalescing with the
224 1.41 thorpej * previous segment if possible.
225 1.41 thorpej */
226 1.41 thorpej if (nseg > 0 && curaddr == lastaddr &&
227 1.101 skrll segs[nseg - 1].ds_len + sgsize <= map->dm_maxsegsz &&
228 1.101 skrll ((segs[nseg - 1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 &&
229 1.41 thorpej (map->_dm_boundary == 0 ||
230 1.101 skrll (segs[nseg - 1].ds_addr & bmask) == (curaddr & bmask))) {
231 1.41 thorpej /* coalesce */
232 1.101 skrll segs[nseg - 1].ds_len += sgsize;
233 1.41 thorpej } else if (nseg >= map->_dm_segcnt) {
234 1.100 skrll return EFBIG;
235 1.41 thorpej } else {
236 1.41 thorpej /* new segment */
237 1.41 thorpej segs[nseg].ds_addr = curaddr;
238 1.41 thorpej segs[nseg].ds_len = sgsize;
239 1.61 matt segs[nseg]._ds_flags = _ds_flags;
240 1.41 thorpej nseg++;
241 1.41 thorpej }
242 1.41 thorpej
243 1.41 thorpej lastaddr = curaddr + sgsize;
244 1.41 thorpej
245 1.41 thorpej paddr += sgsize;
246 1.41 thorpej size -= sgsize;
247 1.41 thorpej if (size > 0)
248 1.41 thorpej goto again;
249 1.61 matt
250 1.61 matt map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT);
251 1.41 thorpej map->dm_nsegs = nseg;
252 1.100 skrll return 0;
253 1.41 thorpej }
254 1.41 thorpej
255 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
256 1.58 matt static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
257 1.58 matt bus_size_t size, int flags);
258 1.58 matt static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
259 1.58 matt static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
260 1.58 matt int direction);
261 1.58 matt
262 1.58 matt static int
263 1.58 matt _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
264 1.58 matt size_t buflen, int buftype, int flags)
265 1.58 matt {
266 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
267 1.58 matt struct vmspace * const vm = vmspace_kernel();
268 1.58 matt int error;
269 1.58 matt
270 1.58 matt KASSERT(cookie != NULL);
271 1.58 matt KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
272 1.58 matt
273 1.58 matt /*
274 1.58 matt * Allocate bounce pages, if necessary.
275 1.58 matt */
276 1.58 matt if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
277 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
278 1.58 matt if (error)
279 1.100 skrll return error;
280 1.58 matt }
281 1.58 matt
282 1.58 matt /*
283 1.58 matt * Cache a pointer to the caller's buffer and load the DMA map
284 1.58 matt * with the bounce buffer.
285 1.58 matt */
286 1.58 matt cookie->id_origbuf = buf;
287 1.58 matt cookie->id_origbuflen = buflen;
288 1.58 matt error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
289 1.58 matt buflen, vm, flags);
290 1.58 matt if (error)
291 1.100 skrll return error;
292 1.58 matt
293 1.58 matt STAT_INCR(bounced_loads);
294 1.58 matt map->dm_mapsize = buflen;
295 1.58 matt map->_dm_vmspace = vm;
296 1.58 matt map->_dm_buftype = buftype;
297 1.58 matt
298 1.58 matt /* ...so _bus_dmamap_sync() knows we're bouncing */
299 1.63 matt map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING;
300 1.58 matt cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
301 1.58 matt return 0;
302 1.58 matt }
303 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
304 1.58 matt
305 1.41 thorpej /*
306 1.1 chris * Common function for DMA map creation. May be called by bus-specific
307 1.1 chris * DMA map creation functions.
308 1.1 chris */
309 1.1 chris int
310 1.7 thorpej _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
311 1.7 thorpej bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
312 1.1 chris {
313 1.1 chris struct arm32_bus_dmamap *map;
314 1.1 chris void *mapstore;
315 1.1 chris
316 1.1 chris #ifdef DEBUG_DMA
317 1.103 skrll printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx"
318 1.103 skrll " flags=%x\n", t, size, nsegments, maxsegsz, boundary, flags);
319 1.1 chris #endif /* DEBUG_DMA */
320 1.1 chris
321 1.1 chris /*
322 1.1 chris * Allocate and initialize the DMA map. The end of the map
323 1.1 chris * is a variable-sized array of segments, so we allocate enough
324 1.1 chris * room for them in one shot.
325 1.1 chris *
326 1.1 chris * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
327 1.1 chris * of ALLOCNOW notifies others that we've reserved these resources,
328 1.1 chris * and they are not to be freed.
329 1.1 chris *
330 1.1 chris * The bus_dmamap_t includes one bus_dma_segment_t, hence
331 1.1 chris * the (nsegments - 1).
332 1.1 chris */
333 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
334 1.1 chris (sizeof(bus_dma_segment_t) * (nsegments - 1));
335 1.81 matt const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
336 1.81 matt if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL)
337 1.100 skrll return ENOMEM;
338 1.1 chris
339 1.1 chris map = (struct arm32_bus_dmamap *)mapstore;
340 1.1 chris map->_dm_size = size;
341 1.1 chris map->_dm_segcnt = nsegments;
342 1.43 matt map->_dm_maxmaxsegsz = maxsegsz;
343 1.1 chris map->_dm_boundary = boundary;
344 1.1 chris map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
345 1.14 thorpej map->_dm_origbuf = NULL;
346 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
347 1.48 yamt map->_dm_vmspace = vmspace_kernel();
348 1.58 matt map->_dm_cookie = NULL;
349 1.43 matt map->dm_maxsegsz = maxsegsz;
350 1.1 chris map->dm_mapsize = 0; /* no valid mappings */
351 1.1 chris map->dm_nsegs = 0;
352 1.1 chris
353 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
354 1.58 matt struct arm32_bus_dma_cookie *cookie;
355 1.58 matt int cookieflags;
356 1.58 matt void *cookiestore;
357 1.58 matt int error;
358 1.58 matt
359 1.58 matt cookieflags = 0;
360 1.58 matt
361 1.58 matt if (t->_may_bounce != NULL) {
362 1.58 matt error = (*t->_may_bounce)(t, map, flags, &cookieflags);
363 1.58 matt if (error != 0)
364 1.58 matt goto out;
365 1.58 matt }
366 1.58 matt
367 1.58 matt if (t->_ranges != NULL)
368 1.58 matt cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
369 1.58 matt
370 1.58 matt if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
371 1.58 matt STAT_INCR(creates);
372 1.98 msaitoh *dmamp = map;
373 1.58 matt return 0;
374 1.58 matt }
375 1.58 matt
376 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
377 1.58 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
378 1.58 matt
379 1.58 matt /*
380 1.58 matt * Allocate our cookie.
381 1.58 matt */
382 1.81 matt if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) {
383 1.58 matt error = ENOMEM;
384 1.58 matt goto out;
385 1.58 matt }
386 1.58 matt cookie = (struct arm32_bus_dma_cookie *)cookiestore;
387 1.58 matt cookie->id_flags = cookieflags;
388 1.58 matt map->_dm_cookie = cookie;
389 1.58 matt STAT_INCR(bounced_creates);
390 1.58 matt
391 1.58 matt error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
392 1.58 matt out:
393 1.58 matt if (error)
394 1.58 matt _bus_dmamap_destroy(t, map);
395 1.98 msaitoh else
396 1.98 msaitoh *dmamp = map;
397 1.58 matt #else
398 1.98 msaitoh *dmamp = map;
399 1.58 matt STAT_INCR(creates);
400 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
401 1.1 chris #ifdef DEBUG_DMA
402 1.1 chris printf("dmamap_create:map=%p\n", map);
403 1.1 chris #endif /* DEBUG_DMA */
404 1.100 skrll return 0;
405 1.1 chris }
406 1.1 chris
407 1.1 chris /*
408 1.1 chris * Common function for DMA map destruction. May be called by bus-specific
409 1.1 chris * DMA map destruction functions.
410 1.1 chris */
411 1.1 chris void
412 1.7 thorpej _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
413 1.1 chris {
414 1.1 chris
415 1.1 chris #ifdef DEBUG_DMA
416 1.1 chris printf("dmamap_destroy: t=%p map=%p\n", t, map);
417 1.1 chris #endif /* DEBUG_DMA */
418 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
419 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
420 1.13 briggs
421 1.13 briggs /*
422 1.58 matt * Free any bounce pages this map might hold.
423 1.13 briggs */
424 1.58 matt if (cookie != NULL) {
425 1.81 matt const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
426 1.81 matt (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
427 1.81 matt
428 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
429 1.58 matt STAT_INCR(bounced_unloads);
430 1.58 matt map->dm_nsegs = 0;
431 1.58 matt if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
432 1.58 matt _bus_dma_free_bouncebuf(t, map);
433 1.58 matt STAT_INCR(bounced_destroys);
434 1.81 matt kmem_intr_free(cookie, cookiesize);
435 1.58 matt } else
436 1.58 matt #endif
437 1.58 matt STAT_INCR(destroys);
438 1.58 matt
439 1.58 matt if (map->dm_nsegs > 0)
440 1.58 matt STAT_INCR(unloads);
441 1.13 briggs
442 1.81 matt const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
443 1.81 matt (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
444 1.81 matt kmem_intr_free(map, mapsize);
445 1.1 chris }
446 1.1 chris
447 1.1 chris /*
448 1.1 chris * Common function for loading a DMA map with a linear buffer. May
449 1.1 chris * be called by bus-specific DMA map load functions.
450 1.1 chris */
451 1.1 chris int
452 1.7 thorpej _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
453 1.7 thorpej bus_size_t buflen, struct proc *p, int flags)
454 1.1 chris {
455 1.58 matt struct vmspace *vm;
456 1.41 thorpej int error;
457 1.1 chris
458 1.1 chris #ifdef DEBUG_DMA
459 1.1 chris printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
460 1.1 chris t, map, buf, buflen, p, flags);
461 1.1 chris #endif /* DEBUG_DMA */
462 1.1 chris
463 1.58 matt if (map->dm_nsegs > 0) {
464 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
465 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
466 1.58 matt if (cookie != NULL) {
467 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
468 1.58 matt STAT_INCR(bounced_unloads);
469 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
470 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
471 1.58 matt }
472 1.58 matt } else
473 1.58 matt #endif
474 1.58 matt STAT_INCR(unloads);
475 1.58 matt }
476 1.58 matt
477 1.1 chris /*
478 1.1 chris * Make sure that on error condition we return "no valid mappings".
479 1.1 chris */
480 1.1 chris map->dm_mapsize = 0;
481 1.1 chris map->dm_nsegs = 0;
482 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
483 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
484 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
485 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
486 1.1 chris
487 1.1 chris if (buflen > map->_dm_size)
488 1.100 skrll return EINVAL;
489 1.1 chris
490 1.48 yamt if (p != NULL) {
491 1.48 yamt vm = p->p_vmspace;
492 1.48 yamt } else {
493 1.48 yamt vm = vmspace_kernel();
494 1.48 yamt }
495 1.48 yamt
496 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
497 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
498 1.17 thorpej
499 1.48 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
500 1.1 chris if (error == 0) {
501 1.1 chris map->dm_mapsize = buflen;
502 1.58 matt map->_dm_vmspace = vm;
503 1.14 thorpej map->_dm_origbuf = buf;
504 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR;
505 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
506 1.81 matt STAT_INCR(coherent_loads);
507 1.81 matt } else {
508 1.81 matt STAT_INCR(loads);
509 1.81 matt }
510 1.58 matt return 0;
511 1.1 chris }
512 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
513 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
514 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
515 1.58 matt error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
516 1.58 matt _BUS_DMA_BUFTYPE_LINEAR, flags);
517 1.95 skrll }
518 1.95 skrll #endif
519 1.100 skrll return error;
520 1.1 chris }
521 1.1 chris
522 1.1 chris /*
523 1.1 chris * Like _bus_dmamap_load(), but for mbufs.
524 1.1 chris */
525 1.1 chris int
526 1.7 thorpej _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
527 1.7 thorpej int flags)
528 1.1 chris {
529 1.105 skrll struct mbuf *m;
530 1.41 thorpej int error;
531 1.1 chris
532 1.1 chris #ifdef DEBUG_DMA
533 1.1 chris printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
534 1.1 chris t, map, m0, flags);
535 1.1 chris #endif /* DEBUG_DMA */
536 1.1 chris
537 1.58 matt if (map->dm_nsegs > 0) {
538 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
539 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
540 1.58 matt if (cookie != NULL) {
541 1.58 matt if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
542 1.58 matt STAT_INCR(bounced_unloads);
543 1.58 matt cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
544 1.63 matt map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
545 1.58 matt }
546 1.58 matt } else
547 1.58 matt #endif
548 1.58 matt STAT_INCR(unloads);
549 1.58 matt }
550 1.58 matt
551 1.1 chris /*
552 1.1 chris * Make sure that on error condition we return "no valid mappings."
553 1.1 chris */
554 1.1 chris map->dm_mapsize = 0;
555 1.1 chris map->dm_nsegs = 0;
556 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
557 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
558 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
559 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
560 1.1 chris
561 1.79 matt KASSERT(m0->m_flags & M_PKTHDR);
562 1.1 chris
563 1.1 chris if (m0->m_pkthdr.len > map->_dm_size)
564 1.100 skrll return EINVAL;
565 1.1 chris
566 1.61 matt /* _bus_dmamap_load_paddr() clears this if we're not... */
567 1.61 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
568 1.17 thorpej
569 1.1 chris error = 0;
570 1.1 chris for (m = m0; m != NULL && error == 0; m = m->m_next) {
571 1.41 thorpej int offset;
572 1.41 thorpej int remainbytes;
573 1.41 thorpej const struct vm_page * const *pgs;
574 1.41 thorpej paddr_t paddr;
575 1.41 thorpej int size;
576 1.41 thorpej
577 1.28 thorpej if (m->m_len == 0)
578 1.28 thorpej continue;
579 1.57 matt /*
580 1.57 matt * Don't allow reads in read-only mbufs.
581 1.57 matt */
582 1.57 matt if (M_ROMAP(m) && (flags & BUS_DMA_READ)) {
583 1.57 matt error = EFAULT;
584 1.57 matt break;
585 1.57 matt }
586 1.41 thorpej switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) {
587 1.28 thorpej case M_EXT|M_CLUSTER:
588 1.28 thorpej /* XXX KDASSERT */
589 1.28 thorpej KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
590 1.41 thorpej paddr = m->m_ext.ext_paddr +
591 1.28 thorpej (m->m_data - m->m_ext.ext_buf);
592 1.41 thorpej size = m->m_len;
593 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
594 1.61 matt false);
595 1.41 thorpej break;
596 1.95 skrll
597 1.41 thorpej case M_EXT|M_EXT_PAGES:
598 1.41 thorpej KASSERT(m->m_ext.ext_buf <= m->m_data);
599 1.41 thorpej KASSERT(m->m_data <=
600 1.41 thorpej m->m_ext.ext_buf + m->m_ext.ext_size);
601 1.95 skrll
602 1.41 thorpej offset = (vaddr_t)m->m_data -
603 1.41 thorpej trunc_page((vaddr_t)m->m_ext.ext_buf);
604 1.41 thorpej remainbytes = m->m_len;
605 1.41 thorpej
606 1.41 thorpej /* skip uninteresting pages */
607 1.41 thorpej pgs = (const struct vm_page * const *)
608 1.41 thorpej m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
609 1.95 skrll
610 1.41 thorpej offset &= PAGE_MASK; /* offset in the first page */
611 1.41 thorpej
612 1.41 thorpej /* load each page */
613 1.41 thorpej while (remainbytes > 0) {
614 1.41 thorpej const struct vm_page *pg;
615 1.41 thorpej
616 1.41 thorpej size = MIN(remainbytes, PAGE_SIZE - offset);
617 1.41 thorpej
618 1.41 thorpej pg = *pgs++;
619 1.41 thorpej KASSERT(pg);
620 1.41 thorpej paddr = VM_PAGE_TO_PHYS(pg) + offset;
621 1.41 thorpej
622 1.41 thorpej error = _bus_dmamap_load_paddr(t, map,
623 1.61 matt paddr, size, false);
624 1.41 thorpej if (error)
625 1.28 thorpej break;
626 1.41 thorpej offset = 0;
627 1.41 thorpej remainbytes -= size;
628 1.28 thorpej }
629 1.28 thorpej break;
630 1.28 thorpej
631 1.28 thorpej case 0:
632 1.41 thorpej paddr = m->m_paddr + M_BUFOFFSET(m) +
633 1.28 thorpej (m->m_data - M_BUFADDR(m));
634 1.41 thorpej size = m->m_len;
635 1.61 matt error = _bus_dmamap_load_paddr(t, map, paddr, size,
636 1.61 matt false);
637 1.41 thorpej break;
638 1.28 thorpej
639 1.28 thorpej default:
640 1.28 thorpej error = _bus_dmamap_load_buffer(t, map, m->m_data,
641 1.48 yamt m->m_len, vmspace_kernel(), flags);
642 1.28 thorpej }
643 1.1 chris }
644 1.1 chris if (error == 0) {
645 1.1 chris map->dm_mapsize = m0->m_pkthdr.len;
646 1.14 thorpej map->_dm_origbuf = m0;
647 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF;
648 1.48 yamt map->_dm_vmspace = vmspace_kernel(); /* always kernel */
649 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
650 1.81 matt STAT_INCR(coherent_loads);
651 1.81 matt } else {
652 1.81 matt STAT_INCR(loads);
653 1.81 matt }
654 1.58 matt return 0;
655 1.1 chris }
656 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
657 1.58 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
658 1.58 matt if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
659 1.58 matt error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
660 1.58 matt _BUS_DMA_BUFTYPE_MBUF, flags);
661 1.95 skrll }
662 1.95 skrll #endif
663 1.100 skrll return error;
664 1.1 chris }
665 1.1 chris
666 1.1 chris /*
667 1.1 chris * Like _bus_dmamap_load(), but for uios.
668 1.1 chris */
669 1.1 chris int
670 1.7 thorpej _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
671 1.7 thorpej int flags)
672 1.1 chris {
673 1.1 chris bus_size_t minlen, resid;
674 1.1 chris struct iovec *iov;
675 1.50 christos void *addr;
676 1.105 skrll int i, error;
677 1.1 chris
678 1.1 chris /*
679 1.1 chris * Make sure that on error condition we return "no valid mappings."
680 1.1 chris */
681 1.1 chris map->dm_mapsize = 0;
682 1.1 chris map->dm_nsegs = 0;
683 1.74 matt KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
684 1.74 matt "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
685 1.74 matt map->dm_maxsegsz, map->_dm_maxmaxsegsz);
686 1.1 chris
687 1.1 chris resid = uio->uio_resid;
688 1.1 chris iov = uio->uio_iov;
689 1.1 chris
690 1.17 thorpej /* _bus_dmamap_load_buffer() clears this if we're not... */
691 1.58 matt map->_dm_flags |= _BUS_DMAMAP_COHERENT;
692 1.17 thorpej
693 1.1 chris error = 0;
694 1.1 chris for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
695 1.1 chris /*
696 1.1 chris * Now at the first iovec to load. Load each iovec
697 1.1 chris * until we have exhausted the residual count.
698 1.1 chris */
699 1.1 chris minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
700 1.50 christos addr = (void *)iov[i].iov_base;
701 1.1 chris
702 1.1 chris error = _bus_dmamap_load_buffer(t, map, addr, minlen,
703 1.48 yamt uio->uio_vmspace, flags);
704 1.1 chris
705 1.1 chris resid -= minlen;
706 1.1 chris }
707 1.1 chris if (error == 0) {
708 1.1 chris map->dm_mapsize = uio->uio_resid;
709 1.14 thorpej map->_dm_origbuf = uio;
710 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO;
711 1.48 yamt map->_dm_vmspace = uio->uio_vmspace;
712 1.81 matt if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
713 1.81 matt STAT_INCR(coherent_loads);
714 1.81 matt } else {
715 1.81 matt STAT_INCR(loads);
716 1.81 matt }
717 1.1 chris }
718 1.100 skrll return error;
719 1.1 chris }
720 1.1 chris
721 1.1 chris /*
722 1.1 chris * Like _bus_dmamap_load(), but for raw memory allocated with
723 1.1 chris * bus_dmamem_alloc().
724 1.1 chris */
725 1.1 chris int
726 1.7 thorpej _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
727 1.94 jmcneill bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
728 1.1 chris {
729 1.1 chris
730 1.94 jmcneill bus_size_t size;
731 1.94 jmcneill int i, error = 0;
732 1.94 jmcneill
733 1.94 jmcneill /*
734 1.94 jmcneill * Make sure that on error conditions we return "no valid mappings."
735 1.94 jmcneill */
736 1.94 jmcneill map->dm_mapsize = 0;
737 1.94 jmcneill map->dm_nsegs = 0;
738 1.94 jmcneill KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
739 1.94 jmcneill
740 1.94 jmcneill if (size0 > map->_dm_size)
741 1.94 jmcneill return EINVAL;
742 1.94 jmcneill
743 1.94 jmcneill for (i = 0, size = size0; i < nsegs && size > 0; i++) {
744 1.94 jmcneill bus_dma_segment_t *ds = &segs[i];
745 1.94 jmcneill bus_size_t sgsize;
746 1.94 jmcneill
747 1.94 jmcneill sgsize = MIN(ds->ds_len, size);
748 1.94 jmcneill if (sgsize == 0)
749 1.94 jmcneill continue;
750 1.94 jmcneill error = _bus_dmamap_load_paddr(t, map, ds->ds_addr,
751 1.94 jmcneill sgsize, false);
752 1.94 jmcneill if (error != 0)
753 1.94 jmcneill break;
754 1.94 jmcneill size -= sgsize;
755 1.94 jmcneill }
756 1.94 jmcneill
757 1.94 jmcneill if (error != 0) {
758 1.94 jmcneill map->dm_mapsize = 0;
759 1.94 jmcneill map->dm_nsegs = 0;
760 1.94 jmcneill return error;
761 1.94 jmcneill }
762 1.94 jmcneill
763 1.94 jmcneill /* XXX TBD bounce */
764 1.94 jmcneill
765 1.94 jmcneill map->dm_mapsize = size0;
766 1.94 jmcneill return 0;
767 1.1 chris }
768 1.1 chris
769 1.1 chris /*
770 1.1 chris * Common function for unloading a DMA map. May be called by
771 1.1 chris * bus-specific DMA map unload functions.
772 1.1 chris */
773 1.1 chris void
774 1.7 thorpej _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
775 1.1 chris {
776 1.1 chris
777 1.1 chris #ifdef DEBUG_DMA
778 1.1 chris printf("dmamap_unload: t=%p map=%p\n", t, map);
779 1.1 chris #endif /* DEBUG_DMA */
780 1.1 chris
781 1.1 chris /*
782 1.1 chris * No resources to free; just mark the mappings as
783 1.1 chris * invalid.
784 1.1 chris */
785 1.1 chris map->dm_mapsize = 0;
786 1.1 chris map->dm_nsegs = 0;
787 1.14 thorpej map->_dm_origbuf = NULL;
788 1.58 matt map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
789 1.48 yamt map->_dm_vmspace = NULL;
790 1.1 chris }
791 1.1 chris
792 1.57 matt static void
793 1.103 skrll _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops,
794 1.103 skrll bool readonly_p)
795 1.14 thorpej {
796 1.106 skrll
797 1.106 skrll #ifdef ARM_MMU_EXTENDED
798 1.106 skrll /*
799 1.106 skrll * No optimisations are available for readonly mbufs on armv6+, so
800 1.106 skrll * assume it's not readonly from here on.
801 1.106 skrll *
802 1.106 skrll * See the comment in _bus_dmamap_sync_mbuf
803 1.106 skrll */
804 1.106 skrll readonly_p = false;
805 1.106 skrll #endif
806 1.106 skrll
807 1.86 matt KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK),
808 1.86 matt "va %#lx pa %#lx", va, pa);
809 1.62 matt #if 0
810 1.62 matt printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
811 1.62 matt va, pa, len, ops, readonly_p);
812 1.62 matt #endif
813 1.14 thorpej
814 1.14 thorpej switch (ops) {
815 1.14 thorpej case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
816 1.57 matt if (!readonly_p) {
817 1.76 matt STAT_INCR(sync_prereadwrite);
818 1.57 matt cpu_dcache_wbinv_range(va, len);
819 1.57 matt cpu_sdcache_wbinv_range(va, pa, len);
820 1.57 matt break;
821 1.57 matt }
822 1.57 matt /* FALLTHROUGH */
823 1.14 thorpej
824 1.57 matt case BUS_DMASYNC_PREREAD: {
825 1.59 matt const size_t line_size = arm_dcache_align;
826 1.59 matt const size_t line_mask = arm_dcache_align_mask;
827 1.59 matt vsize_t misalignment = va & line_mask;
828 1.57 matt if (misalignment) {
829 1.59 matt va -= misalignment;
830 1.59 matt pa -= misalignment;
831 1.59 matt len += misalignment;
832 1.77 matt STAT_INCR(sync_preread_begin);
833 1.59 matt cpu_dcache_wbinv_range(va, line_size);
834 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
835 1.59 matt if (len <= line_size)
836 1.57 matt break;
837 1.59 matt va += line_size;
838 1.59 matt pa += line_size;
839 1.59 matt len -= line_size;
840 1.57 matt }
841 1.59 matt misalignment = len & line_mask;
842 1.57 matt len -= misalignment;
843 1.65 matt if (len > 0) {
844 1.77 matt STAT_INCR(sync_preread);
845 1.65 matt cpu_dcache_inv_range(va, len);
846 1.65 matt cpu_sdcache_inv_range(va, pa, len);
847 1.65 matt }
848 1.57 matt if (misalignment) {
849 1.57 matt va += len;
850 1.57 matt pa += len;
851 1.77 matt STAT_INCR(sync_preread_tail);
852 1.59 matt cpu_dcache_wbinv_range(va, line_size);
853 1.59 matt cpu_sdcache_wbinv_range(va, pa, line_size);
854 1.57 matt }
855 1.14 thorpej break;
856 1.57 matt }
857 1.14 thorpej
858 1.14 thorpej case BUS_DMASYNC_PREWRITE:
859 1.76 matt STAT_INCR(sync_prewrite);
860 1.57 matt cpu_dcache_wb_range(va, len);
861 1.57 matt cpu_sdcache_wb_range(va, pa, len);
862 1.14 thorpej break;
863 1.67 matt
864 1.67 matt #ifdef CPU_CORTEX
865 1.67 matt /*
866 1.67 matt * Cortex CPUs can do speculative loads so we need to clean the cache
867 1.67 matt * after a DMA read to deal with any speculatively loaded cache lines.
868 1.67 matt * Since these can't be dirty, we can just invalidate them and don't
869 1.67 matt * have to worry about having to write back their contents.
870 1.67 matt */
871 1.67 matt case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
872 1.76 matt STAT_INCR(sync_postreadwrite);
873 1.76 matt cpu_dcache_inv_range(va, len);
874 1.76 matt cpu_sdcache_inv_range(va, pa, len);
875 1.76 matt break;
876 1.67 matt case BUS_DMASYNC_POSTREAD:
877 1.76 matt STAT_INCR(sync_postread);
878 1.67 matt cpu_dcache_inv_range(va, len);
879 1.67 matt cpu_sdcache_inv_range(va, pa, len);
880 1.67 matt break;
881 1.67 matt #endif
882 1.14 thorpej }
883 1.14 thorpej }
884 1.14 thorpej
885 1.47 perry static inline void
886 1.57 matt _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
887 1.14 thorpej bus_size_t len, int ops)
888 1.14 thorpej {
889 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
890 1.57 matt vaddr_t va = (vaddr_t) map->_dm_origbuf;
891 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
892 1.63 matt if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) {
893 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
894 1.58 matt va = (vaddr_t) cookie->id_bouncebuf;
895 1.58 matt }
896 1.58 matt #endif
897 1.57 matt
898 1.57 matt while (len > 0) {
899 1.57 matt while (offset >= ds->ds_len) {
900 1.57 matt offset -= ds->ds_len;
901 1.57 matt va += ds->ds_len;
902 1.57 matt ds++;
903 1.57 matt }
904 1.57 matt
905 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset);
906 1.57 matt size_t seglen = min(len, ds->ds_len - offset);
907 1.57 matt
908 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
909 1.61 matt _bus_dmamap_sync_segment(va + offset, pa, seglen, ops,
910 1.67 matt false);
911 1.57 matt
912 1.57 matt offset += seglen;
913 1.57 matt len -= seglen;
914 1.57 matt }
915 1.57 matt }
916 1.57 matt
917 1.57 matt static inline void
918 1.57 matt _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset,
919 1.57 matt bus_size_t len, int ops)
920 1.57 matt {
921 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
922 1.57 matt struct mbuf *m = map->_dm_origbuf;
923 1.57 matt bus_size_t voff = offset;
924 1.57 matt bus_size_t ds_off = offset;
925 1.57 matt
926 1.57 matt while (len > 0) {
927 1.57 matt /* Find the current dma segment */
928 1.57 matt while (ds_off >= ds->ds_len) {
929 1.57 matt ds_off -= ds->ds_len;
930 1.57 matt ds++;
931 1.57 matt }
932 1.57 matt /* Find the current mbuf. */
933 1.57 matt while (voff >= m->m_len) {
934 1.57 matt voff -= m->m_len;
935 1.57 matt m = m->m_next;
936 1.14 thorpej }
937 1.14 thorpej
938 1.14 thorpej /*
939 1.14 thorpej * Now at the first mbuf to sync; nail each one until
940 1.14 thorpej * we have exhausted the length.
941 1.14 thorpej */
942 1.57 matt vsize_t seglen = min(len, min(m->m_len - voff, ds->ds_len - ds_off));
943 1.57 matt vaddr_t va = mtod(m, vaddr_t) + voff;
944 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
945 1.14 thorpej
946 1.28 thorpej /*
947 1.28 thorpej * We can save a lot of work here if we know the mapping
948 1.93 matt * is read-only at the MMU and we aren't using the armv6+
949 1.93 matt * MMU:
950 1.28 thorpej *
951 1.28 thorpej * If a mapping is read-only, no dirty cache blocks will
952 1.28 thorpej * exist for it. If a writable mapping was made read-only,
953 1.28 thorpej * we know any dirty cache lines for the range will have
954 1.28 thorpej * been cleaned for us already. Therefore, if the upper
955 1.28 thorpej * layer can tell us we have a read-only mapping, we can
956 1.28 thorpej * skip all cache cleaning.
957 1.28 thorpej *
958 1.28 thorpej * NOTE: This only works if we know the pmap cleans pages
959 1.28 thorpej * before making a read-write -> read-only transition. If
960 1.28 thorpej * this ever becomes non-true (e.g. Physically Indexed
961 1.28 thorpej * cache), this will have to be revisited.
962 1.28 thorpej */
963 1.14 thorpej
964 1.92 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) {
965 1.92 matt /*
966 1.92 matt * If we are doing preread (DMAing into the mbuf),
967 1.95 skrll * this mbuf better not be readonly,
968 1.92 matt */
969 1.92 matt KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m));
970 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops,
971 1.61 matt M_ROMAP(m));
972 1.92 matt }
973 1.57 matt voff += seglen;
974 1.57 matt ds_off += seglen;
975 1.57 matt len -= seglen;
976 1.14 thorpej }
977 1.14 thorpej }
978 1.14 thorpej
979 1.47 perry static inline void
980 1.14 thorpej _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
981 1.14 thorpej bus_size_t len, int ops)
982 1.14 thorpej {
983 1.57 matt bus_dma_segment_t *ds = map->dm_segs;
984 1.14 thorpej struct uio *uio = map->_dm_origbuf;
985 1.57 matt struct iovec *iov = uio->uio_iov;
986 1.57 matt bus_size_t voff = offset;
987 1.57 matt bus_size_t ds_off = offset;
988 1.57 matt
989 1.57 matt while (len > 0) {
990 1.57 matt /* Find the current dma segment */
991 1.57 matt while (ds_off >= ds->ds_len) {
992 1.57 matt ds_off -= ds->ds_len;
993 1.57 matt ds++;
994 1.57 matt }
995 1.14 thorpej
996 1.57 matt /* Find the current iovec. */
997 1.57 matt while (voff >= iov->iov_len) {
998 1.57 matt voff -= iov->iov_len;
999 1.57 matt iov++;
1000 1.14 thorpej }
1001 1.14 thorpej
1002 1.14 thorpej /*
1003 1.14 thorpej * Now at the first iovec to sync; nail each one until
1004 1.14 thorpej * we have exhausted the length.
1005 1.14 thorpej */
1006 1.57 matt vsize_t seglen = min(len, min(iov->iov_len - voff, ds->ds_len - ds_off));
1007 1.57 matt vaddr_t va = (vaddr_t) iov->iov_base + voff;
1008 1.59 matt paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
1009 1.57 matt
1010 1.61 matt if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
1011 1.61 matt _bus_dmamap_sync_segment(va, pa, seglen, ops, false);
1012 1.57 matt
1013 1.57 matt voff += seglen;
1014 1.57 matt ds_off += seglen;
1015 1.57 matt len -= seglen;
1016 1.14 thorpej }
1017 1.14 thorpej }
1018 1.14 thorpej
1019 1.1 chris /*
1020 1.1 chris * Common function for DMA map synchronization. May be called
1021 1.1 chris * by bus-specific DMA map synchronization functions.
1022 1.8 thorpej *
1023 1.8 thorpej * This version works for the Virtually Indexed Virtually Tagged
1024 1.8 thorpej * cache found on 32-bit ARM processors.
1025 1.8 thorpej *
1026 1.8 thorpej * XXX Should have separate versions for write-through vs.
1027 1.8 thorpej * XXX write-back caches. We currently assume write-back
1028 1.8 thorpej * XXX here, which is not as efficient as it could be for
1029 1.8 thorpej * XXX the write-through case.
1030 1.1 chris */
1031 1.1 chris void
1032 1.7 thorpej _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1033 1.7 thorpej bus_size_t len, int ops)
1034 1.1 chris {
1035 1.1 chris #ifdef DEBUG_DMA
1036 1.1 chris printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
1037 1.1 chris t, map, offset, len, ops);
1038 1.1 chris #endif /* DEBUG_DMA */
1039 1.1 chris
1040 1.8 thorpej /*
1041 1.8 thorpej * Mixing of PRE and POST operations is not allowed.
1042 1.8 thorpej */
1043 1.8 thorpej if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
1044 1.8 thorpej (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
1045 1.8 thorpej panic("_bus_dmamap_sync: mix PRE and POST");
1046 1.8 thorpej
1047 1.79 matt KASSERTMSG(offset < map->dm_mapsize,
1048 1.79 matt "offset %lu mapsize %lu",
1049 1.79 matt offset, map->dm_mapsize);
1050 1.79 matt KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize,
1051 1.79 matt "len %lu offset %lu mapsize %lu",
1052 1.79 matt len, offset, map->dm_mapsize);
1053 1.8 thorpej
1054 1.8 thorpej /*
1055 1.8 thorpej * For a virtually-indexed write-back cache, we need
1056 1.8 thorpej * to do the following things:
1057 1.8 thorpej *
1058 1.8 thorpej * PREREAD -- Invalidate the D-cache. We do this
1059 1.8 thorpej * here in case a write-back is required by the back-end.
1060 1.8 thorpej *
1061 1.8 thorpej * PREWRITE -- Write-back the D-cache. Note that if
1062 1.8 thorpej * we are doing a PREREAD|PREWRITE, we can collapse
1063 1.8 thorpej * the whole thing into a single Wb-Inv.
1064 1.8 thorpej *
1065 1.67 matt * POSTREAD -- Re-invalidate the D-cache in case speculative
1066 1.67 matt * memory accesses caused cachelines to become valid with now
1067 1.67 matt * invalid data.
1068 1.8 thorpej *
1069 1.8 thorpej * POSTWRITE -- Nothing.
1070 1.8 thorpej */
1071 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1072 1.74 matt const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING);
1073 1.63 matt #else
1074 1.63 matt const bool bouncing = false;
1075 1.58 matt #endif
1076 1.8 thorpej
1077 1.58 matt const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1078 1.67 matt #ifdef CPU_CORTEX
1079 1.67 matt const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1080 1.67 matt #else
1081 1.67 matt const int post_ops = 0;
1082 1.67 matt #endif
1083 1.96 skrll if (!bouncing) {
1084 1.96 skrll if (pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) {
1085 1.96 skrll STAT_INCR(sync_postwrite);
1086 1.96 skrll return;
1087 1.96 skrll } else if (pre_ops == 0 && post_ops == 0) {
1088 1.96 skrll return;
1089 1.96 skrll }
1090 1.61 matt }
1091 1.74 matt KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
1092 1.74 matt "pre_ops %#x post_ops %#x", pre_ops, post_ops);
1093 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1094 1.58 matt if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) {
1095 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1096 1.58 matt STAT_INCR(write_bounces);
1097 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1098 1.58 matt /*
1099 1.58 matt * Copy the caller's buffer to the bounce buffer.
1100 1.58 matt */
1101 1.58 matt switch (map->_dm_buftype) {
1102 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1103 1.58 matt memcpy(dataptr, cookie->id_origlinearbuf + offset, len);
1104 1.58 matt break;
1105 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1106 1.58 matt m_copydata(cookie->id_origmbuf, offset, len, dataptr);
1107 1.58 matt break;
1108 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1109 1.58 matt _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_WRITE);
1110 1.58 matt break;
1111 1.58 matt #ifdef DIAGNOSTIC
1112 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1113 1.58 matt panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_RAW");
1114 1.58 matt break;
1115 1.58 matt
1116 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1117 1.58 matt panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_INVALID");
1118 1.58 matt break;
1119 1.58 matt
1120 1.58 matt default:
1121 1.58 matt panic("_bus_dmamap_sync(pre): map %p: unknown buffer type %d\n",
1122 1.58 matt map, map->_dm_buftype);
1123 1.58 matt break;
1124 1.58 matt #endif /* DIAGNOSTIC */
1125 1.58 matt }
1126 1.58 matt }
1127 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1128 1.58 matt
1129 1.17 thorpej /* Skip cache frobbing if mapping was COHERENT. */
1130 1.75 matt if (!bouncing && (map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
1131 1.17 thorpej /* Drain the write buffer. */
1132 1.75 matt if (pre_ops & BUS_DMASYNC_PREWRITE)
1133 1.75 matt cpu_drain_writebuf();
1134 1.17 thorpej return;
1135 1.17 thorpej }
1136 1.8 thorpej
1137 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1138 1.58 matt if (bouncing && ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || pre_ops == 0)) {
1139 1.58 matt goto bounce_it;
1140 1.58 matt }
1141 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1142 1.58 matt
1143 1.80 matt #ifndef ARM_MMU_EXTENDED
1144 1.8 thorpej /*
1145 1.38 scw * If the mapping belongs to a non-kernel vmspace, and the
1146 1.38 scw * vmspace has not been active since the last time a full
1147 1.38 scw * cache flush was performed, we don't need to do anything.
1148 1.8 thorpej */
1149 1.48 yamt if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
1150 1.48 yamt vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
1151 1.8 thorpej return;
1152 1.80 matt #endif
1153 1.8 thorpej
1154 1.58 matt int buftype = map->_dm_buftype;
1155 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1156 1.58 matt if (bouncing) {
1157 1.58 matt buftype = _BUS_DMA_BUFTYPE_LINEAR;
1158 1.58 matt }
1159 1.58 matt #endif
1160 1.58 matt
1161 1.58 matt switch (buftype) {
1162 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1163 1.14 thorpej _bus_dmamap_sync_linear(t, map, offset, len, ops);
1164 1.14 thorpej break;
1165 1.14 thorpej
1166 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1167 1.14 thorpej _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
1168 1.14 thorpej break;
1169 1.14 thorpej
1170 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1171 1.14 thorpej _bus_dmamap_sync_uio(t, map, offset, len, ops);
1172 1.14 thorpej break;
1173 1.14 thorpej
1174 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1175 1.58 matt panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
1176 1.14 thorpej break;
1177 1.14 thorpej
1178 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1179 1.58 matt panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
1180 1.14 thorpej break;
1181 1.14 thorpej
1182 1.14 thorpej default:
1183 1.58 matt panic("_bus_dmamap_sync: map %p: unknown buffer type %d\n",
1184 1.58 matt map, map->_dm_buftype);
1185 1.8 thorpej }
1186 1.1 chris
1187 1.8 thorpej /* Drain the write buffer. */
1188 1.8 thorpej cpu_drain_writebuf();
1189 1.58 matt
1190 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1191 1.58 matt bounce_it:
1192 1.76 matt if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
1193 1.58 matt return;
1194 1.58 matt
1195 1.63 matt struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1196 1.58 matt char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1197 1.58 matt STAT_INCR(read_bounces);
1198 1.58 matt /*
1199 1.58 matt * Copy the bounce buffer to the caller's buffer.
1200 1.58 matt */
1201 1.58 matt switch (map->_dm_buftype) {
1202 1.58 matt case _BUS_DMA_BUFTYPE_LINEAR:
1203 1.58 matt memcpy(cookie->id_origlinearbuf + offset, dataptr, len);
1204 1.58 matt break;
1205 1.58 matt
1206 1.58 matt case _BUS_DMA_BUFTYPE_MBUF:
1207 1.58 matt m_copyback(cookie->id_origmbuf, offset, len, dataptr);
1208 1.58 matt break;
1209 1.58 matt
1210 1.58 matt case _BUS_DMA_BUFTYPE_UIO:
1211 1.58 matt _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ);
1212 1.58 matt break;
1213 1.58 matt #ifdef DIAGNOSTIC
1214 1.58 matt case _BUS_DMA_BUFTYPE_RAW:
1215 1.58 matt panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_RAW");
1216 1.58 matt break;
1217 1.58 matt
1218 1.58 matt case _BUS_DMA_BUFTYPE_INVALID:
1219 1.58 matt panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_INVALID");
1220 1.58 matt break;
1221 1.58 matt
1222 1.58 matt default:
1223 1.58 matt panic("_bus_dmamap_sync(post): map %p: unknown buffer type %d\n",
1224 1.58 matt map, map->_dm_buftype);
1225 1.58 matt break;
1226 1.58 matt #endif
1227 1.58 matt }
1228 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1229 1.1 chris }
1230 1.1 chris
1231 1.1 chris /*
1232 1.1 chris * Common function for DMA-safe memory allocation. May be called
1233 1.1 chris * by bus-specific DMA memory allocation functions.
1234 1.1 chris */
1235 1.1 chris
1236 1.11 thorpej extern paddr_t physical_start;
1237 1.11 thorpej extern paddr_t physical_end;
1238 1.1 chris
1239 1.1 chris int
1240 1.7 thorpej _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1241 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1242 1.7 thorpej int flags)
1243 1.1 chris {
1244 1.15 thorpej struct arm32_dma_range *dr;
1245 1.37 mycroft int error, i;
1246 1.15 thorpej
1247 1.1 chris #ifdef DEBUG_DMA
1248 1.15 thorpej printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
1249 1.15 thorpej "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
1250 1.15 thorpej boundary, segs, nsegs, rsegs, flags);
1251 1.15 thorpej #endif
1252 1.15 thorpej
1253 1.15 thorpej if ((dr = t->_ranges) != NULL) {
1254 1.37 mycroft error = ENOMEM;
1255 1.15 thorpej for (i = 0; i < t->_nranges; i++, dr++) {
1256 1.70 matt if (dr->dr_len == 0
1257 1.70 matt || (dr->dr_flags & _BUS_DMAMAP_NOALLOC))
1258 1.15 thorpej continue;
1259 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment,
1260 1.15 thorpej boundary, segs, nsegs, rsegs, flags,
1261 1.15 thorpej trunc_page(dr->dr_sysbase),
1262 1.15 thorpej trunc_page(dr->dr_sysbase + dr->dr_len));
1263 1.15 thorpej if (error == 0)
1264 1.15 thorpej break;
1265 1.15 thorpej }
1266 1.15 thorpej } else {
1267 1.15 thorpej error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
1268 1.15 thorpej segs, nsegs, rsegs, flags, trunc_page(physical_start),
1269 1.15 thorpej trunc_page(physical_end));
1270 1.15 thorpej }
1271 1.15 thorpej
1272 1.1 chris #ifdef DEBUG_DMA
1273 1.1 chris printf("dmamem_alloc: =%d\n", error);
1274 1.15 thorpej #endif
1275 1.15 thorpej
1276 1.100 skrll return error;
1277 1.1 chris }
1278 1.1 chris
1279 1.1 chris /*
1280 1.1 chris * Common function for freeing DMA-safe memory. May be called by
1281 1.1 chris * bus-specific DMA memory free functions.
1282 1.1 chris */
1283 1.1 chris void
1284 1.7 thorpej _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1285 1.1 chris {
1286 1.1 chris struct vm_page *m;
1287 1.1 chris bus_addr_t addr;
1288 1.1 chris struct pglist mlist;
1289 1.1 chris int curseg;
1290 1.1 chris
1291 1.1 chris #ifdef DEBUG_DMA
1292 1.1 chris printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
1293 1.1 chris #endif /* DEBUG_DMA */
1294 1.1 chris
1295 1.1 chris /*
1296 1.1 chris * Build a list of pages to free back to the VM system.
1297 1.1 chris */
1298 1.1 chris TAILQ_INIT(&mlist);
1299 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1300 1.1 chris for (addr = segs[curseg].ds_addr;
1301 1.1 chris addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1302 1.1 chris addr += PAGE_SIZE) {
1303 1.1 chris m = PHYS_TO_VM_PAGE(addr);
1304 1.52 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1305 1.1 chris }
1306 1.1 chris }
1307 1.1 chris uvm_pglistfree(&mlist);
1308 1.1 chris }
1309 1.1 chris
1310 1.1 chris /*
1311 1.1 chris * Common function for mapping DMA-safe memory. May be called by
1312 1.1 chris * bus-specific DMA memory map functions.
1313 1.1 chris */
1314 1.1 chris int
1315 1.7 thorpej _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1316 1.50 christos size_t size, void **kvap, int flags)
1317 1.1 chris {
1318 1.11 thorpej vaddr_t va;
1319 1.57 matt paddr_t pa;
1320 1.1 chris int curseg;
1321 1.65 matt const uvm_flag_t kmflags = UVM_KMF_VAONLY
1322 1.65 matt | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0);
1323 1.65 matt vsize_t align = 0;
1324 1.1 chris
1325 1.1 chris #ifdef DEBUG_DMA
1326 1.3 rearnsha printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
1327 1.3 rearnsha segs, nsegs, (unsigned long)size, flags);
1328 1.1 chris #endif /* DEBUG_DMA */
1329 1.1 chris
1330 1.62 matt #ifdef PMAP_MAP_POOLPAGE
1331 1.62 matt /*
1332 1.62 matt * If all of memory is mapped, and we are mapping a single physically
1333 1.62 matt * contiguous area then this area is already mapped. Let's see if we
1334 1.62 matt * avoid having a separate mapping for it.
1335 1.62 matt */
1336 1.62 matt if (nsegs == 1) {
1337 1.62 matt /*
1338 1.62 matt * If this is a non-COHERENT mapping, then the existing kernel
1339 1.62 matt * mapping is already compatible with it.
1340 1.62 matt */
1341 1.68 matt bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0;
1342 1.68 matt pa = segs[0].ds_addr;
1343 1.68 matt
1344 1.62 matt /*
1345 1.68 matt * This is a COHERENT mapping which, unless this address is in
1346 1.62 matt * a COHERENT dma range, will not be compatible.
1347 1.62 matt */
1348 1.62 matt if (t->_ranges != NULL) {
1349 1.62 matt const struct arm32_dma_range * const dr =
1350 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1351 1.71 matt if (dr != NULL
1352 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1353 1.71 matt direct_mapable = true;
1354 1.68 matt }
1355 1.68 matt }
1356 1.68 matt
1357 1.87 matt #ifdef PMAP_NEED_ALLOC_POOLPAGE
1358 1.87 matt /*
1359 1.87 matt * The page can only be direct mapped if was allocated out
1360 1.95 skrll * of the arm poolpage vm freelist.
1361 1.87 matt */
1362 1.97 cherry uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
1363 1.97 cherry KASSERT(uvm_physseg_valid_p(upm));
1364 1.87 matt if (direct_mapable) {
1365 1.87 matt direct_mapable =
1366 1.97 cherry (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
1367 1.87 matt }
1368 1.87 matt #endif
1369 1.87 matt
1370 1.68 matt if (direct_mapable) {
1371 1.68 matt *kvap = (void *)PMAP_MAP_POOLPAGE(pa);
1372 1.64 matt #ifdef DEBUG_DMA
1373 1.68 matt printf("dmamem_map: =%p\n", *kvap);
1374 1.64 matt #endif /* DEBUG_DMA */
1375 1.68 matt return 0;
1376 1.62 matt }
1377 1.62 matt }
1378 1.62 matt #endif
1379 1.62 matt
1380 1.1 chris size = round_page(size);
1381 1.65 matt if (__predict_true(size > L2_L_SIZE)) {
1382 1.65 matt #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1383 1.65 matt if (size >= L1_SS_SIZE)
1384 1.65 matt align = L1_SS_SIZE;
1385 1.65 matt else
1386 1.65 matt #endif
1387 1.65 matt if (size >= L1_S_SIZE)
1388 1.65 matt align = L1_S_SIZE;
1389 1.65 matt else
1390 1.81 matt align = L2_L_SIZE;
1391 1.65 matt }
1392 1.65 matt
1393 1.65 matt va = uvm_km_alloc(kernel_map, size, align, kmflags);
1394 1.65 matt if (__predict_false(va == 0 && align > 0)) {
1395 1.65 matt align = 0;
1396 1.65 matt va = uvm_km_alloc(kernel_map, size, 0, kmflags);
1397 1.65 matt }
1398 1.1 chris
1399 1.1 chris if (va == 0)
1400 1.100 skrll return ENOMEM;
1401 1.1 chris
1402 1.50 christos *kvap = (void *)va;
1403 1.1 chris
1404 1.1 chris for (curseg = 0; curseg < nsegs; curseg++) {
1405 1.57 matt for (pa = segs[curseg].ds_addr;
1406 1.57 matt pa < (segs[curseg].ds_addr + segs[curseg].ds_len);
1407 1.57 matt pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1408 1.68 matt bool uncached = (flags & BUS_DMA_COHERENT);
1409 1.1 chris #ifdef DEBUG_DMA
1410 1.57 matt printf("wiring p%lx to v%lx", pa, va);
1411 1.1 chris #endif /* DEBUG_DMA */
1412 1.1 chris if (size == 0)
1413 1.1 chris panic("_bus_dmamem_map: size botch");
1414 1.68 matt
1415 1.68 matt const struct arm32_dma_range * const dr =
1416 1.68 matt _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1417 1.68 matt /*
1418 1.68 matt * If this dma region is coherent then there is
1419 1.68 matt * no need for an uncached mapping.
1420 1.68 matt */
1421 1.71 matt if (dr != NULL
1422 1.71 matt && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1423 1.71 matt uncached = false;
1424 1.68 matt }
1425 1.71 matt
1426 1.81 matt pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE,
1427 1.81 matt PMAP_WIRED | (uncached ? PMAP_NOCACHE : 0));
1428 1.1 chris }
1429 1.1 chris }
1430 1.2 chris pmap_update(pmap_kernel());
1431 1.1 chris #ifdef DEBUG_DMA
1432 1.1 chris printf("dmamem_map: =%p\n", *kvap);
1433 1.1 chris #endif /* DEBUG_DMA */
1434 1.100 skrll return 0;
1435 1.1 chris }
1436 1.1 chris
1437 1.1 chris /*
1438 1.1 chris * Common function for unmapping DMA-safe memory. May be called by
1439 1.1 chris * bus-specific DMA memory unmapping functions.
1440 1.1 chris */
1441 1.1 chris void
1442 1.50 christos _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1443 1.1 chris {
1444 1.1 chris
1445 1.1 chris #ifdef DEBUG_DMA
1446 1.65 matt printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size);
1447 1.1 chris #endif /* DEBUG_DMA */
1448 1.79 matt KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0,
1449 1.83 christos "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK));
1450 1.1 chris
1451 1.84 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1452 1.84 matt /*
1453 1.88 snj * Check to see if this used direct mapped memory. Get its physical
1454 1.84 matt * address and try to map it. If the resultant matches the kva, then
1455 1.99 skrll * it was and so we can just return since we have nothing to free up.
1456 1.84 matt */
1457 1.84 matt paddr_t pa;
1458 1.84 matt vaddr_t va;
1459 1.84 matt (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa);
1460 1.84 matt if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva)
1461 1.84 matt return;
1462 1.84 matt #endif
1463 1.84 matt
1464 1.1 chris size = round_page(size);
1465 1.65 matt pmap_kremove((vaddr_t)kva, size);
1466 1.44 yamt pmap_update(pmap_kernel());
1467 1.44 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1468 1.1 chris }
1469 1.1 chris
1470 1.1 chris /*
1471 1.1 chris * Common functin for mmap(2)'ing DMA-safe memory. May be called by
1472 1.1 chris * bus-specific DMA mmap(2)'ing functions.
1473 1.1 chris */
1474 1.1 chris paddr_t
1475 1.7 thorpej _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1476 1.7 thorpej off_t off, int prot, int flags)
1477 1.1 chris {
1478 1.73 macallan paddr_t map_flags;
1479 1.1 chris int i;
1480 1.1 chris
1481 1.1 chris for (i = 0; i < nsegs; i++) {
1482 1.79 matt KASSERTMSG((off & PAGE_MASK) == 0,
1483 1.79 matt "off %#qx (%#x)", off, (int)off & PAGE_MASK);
1484 1.79 matt KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0,
1485 1.79 matt "ds_addr %#lx (%#x)", segs[i].ds_addr,
1486 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1487 1.79 matt KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0,
1488 1.79 matt "ds_len %#lx (%#x)", segs[i].ds_addr,
1489 1.79 matt (int)segs[i].ds_addr & PAGE_MASK);
1490 1.1 chris if (off >= segs[i].ds_len) {
1491 1.1 chris off -= segs[i].ds_len;
1492 1.1 chris continue;
1493 1.1 chris }
1494 1.1 chris
1495 1.73 macallan map_flags = 0;
1496 1.73 macallan if (flags & BUS_DMA_PREFETCHABLE)
1497 1.73 macallan map_flags |= ARM32_MMAP_WRITECOMBINE;
1498 1.73 macallan
1499 1.100 skrll return arm_btop((u_long)segs[i].ds_addr + off) | map_flags;
1500 1.95 skrll
1501 1.1 chris }
1502 1.1 chris
1503 1.1 chris /* Page not found. */
1504 1.100 skrll return -1;
1505 1.1 chris }
1506 1.1 chris
1507 1.1 chris /**********************************************************************
1508 1.1 chris * DMA utility functions
1509 1.1 chris **********************************************************************/
1510 1.1 chris
1511 1.1 chris /*
1512 1.1 chris * Utility function to load a linear buffer. lastaddrp holds state
1513 1.1 chris * between invocations (for multiple-buffer loads). segp contains
1514 1.1 chris * the starting segment on entrace, and the ending segment on exit.
1515 1.1 chris * first indicates if this is the first invocation of this function.
1516 1.1 chris */
1517 1.1 chris int
1518 1.7 thorpej _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1519 1.48 yamt bus_size_t buflen, struct vmspace *vm, int flags)
1520 1.1 chris {
1521 1.1 chris bus_size_t sgsize;
1522 1.41 thorpej bus_addr_t curaddr;
1523 1.11 thorpej vaddr_t vaddr = (vaddr_t)buf;
1524 1.41 thorpej int error;
1525 1.1 chris pmap_t pmap;
1526 1.1 chris
1527 1.1 chris #ifdef DEBUG_DMA
1528 1.40 scw printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
1529 1.40 scw buf, buflen, flags);
1530 1.1 chris #endif /* DEBUG_DMA */
1531 1.1 chris
1532 1.48 yamt pmap = vm_map_pmap(&vm->vm_map);
1533 1.1 chris
1534 1.41 thorpej while (buflen > 0) {
1535 1.1 chris /*
1536 1.1 chris * Get the physical address for this segment.
1537 1.17 thorpej *
1538 1.55 matt * XXX Doesn't support checking for coherent mappings
1539 1.17 thorpej * XXX in user address space.
1540 1.1 chris */
1541 1.61 matt bool coherent;
1542 1.17 thorpej if (__predict_true(pmap == pmap_kernel())) {
1543 1.61 matt pd_entry_t *pde;
1544 1.61 matt pt_entry_t *ptep;
1545 1.29 scw (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
1546 1.17 thorpej if (__predict_false(pmap_pde_section(pde))) {
1547 1.55 matt paddr_t s_frame = L1_S_FRAME;
1548 1.55 matt paddr_t s_offset = L1_S_OFFSET;
1549 1.56 matt #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1550 1.55 matt if (__predict_false(pmap_pde_supersection(pde))) {
1551 1.55 matt s_frame = L1_SS_FRAME;
1552 1.60 matt s_offset = L1_SS_OFFSET;
1553 1.60 matt }
1554 1.55 matt #endif
1555 1.55 matt curaddr = (*pde & s_frame) | (vaddr & s_offset);
1556 1.66 skrll coherent = (*pde & L1_S_CACHE_MASK) == 0;
1557 1.17 thorpej } else {
1558 1.61 matt pt_entry_t pte = *ptep;
1559 1.65 matt KDASSERTMSG((pte & L2_TYPE_MASK) != L2_TYPE_INV,
1560 1.65 matt "va=%#"PRIxVADDR" pde=%#x ptep=%p pte=%#x",
1561 1.65 matt vaddr, *pde, ptep, pte);
1562 1.17 thorpej if (__predict_false((pte & L2_TYPE_MASK)
1563 1.17 thorpej == L2_TYPE_L)) {
1564 1.17 thorpej curaddr = (pte & L2_L_FRAME) |
1565 1.17 thorpej (vaddr & L2_L_OFFSET);
1566 1.66 skrll coherent = (pte & L2_L_CACHE_MASK) == 0;
1567 1.17 thorpej } else {
1568 1.86 matt curaddr = (pte & ~PAGE_MASK) |
1569 1.86 matt (vaddr & PAGE_MASK);
1570 1.66 skrll coherent = (pte & L2_S_CACHE_MASK) == 0;
1571 1.17 thorpej }
1572 1.17 thorpej }
1573 1.34 briggs } else {
1574 1.17 thorpej (void) pmap_extract(pmap, vaddr, &curaddr);
1575 1.61 matt coherent = false;
1576 1.34 briggs }
1577 1.86 matt KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK),
1578 1.86 matt "va %#lx curaddr %#lx", vaddr, curaddr);
1579 1.1 chris
1580 1.1 chris /*
1581 1.1 chris * Compute the segment size, and adjust counts.
1582 1.1 chris */
1583 1.27 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1584 1.1 chris if (buflen < sgsize)
1585 1.1 chris sgsize = buflen;
1586 1.1 chris
1587 1.61 matt error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize,
1588 1.61 matt coherent);
1589 1.41 thorpej if (error)
1590 1.100 skrll return error;
1591 1.1 chris
1592 1.1 chris vaddr += sgsize;
1593 1.1 chris buflen -= sgsize;
1594 1.1 chris }
1595 1.1 chris
1596 1.100 skrll return 0;
1597 1.1 chris }
1598 1.1 chris
1599 1.1 chris /*
1600 1.1 chris * Allocate physical memory from the given physical address range.
1601 1.1 chris * Called by DMA-safe memory allocation methods.
1602 1.1 chris */
1603 1.1 chris int
1604 1.7 thorpej _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1605 1.7 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1606 1.11 thorpej int flags, paddr_t low, paddr_t high)
1607 1.1 chris {
1608 1.11 thorpej paddr_t curaddr, lastaddr;
1609 1.1 chris struct vm_page *m;
1610 1.1 chris struct pglist mlist;
1611 1.1 chris int curseg, error;
1612 1.1 chris
1613 1.101 skrll KASSERTMSG(boundary == 0 || (boundary & (boundary - 1)) == 0,
1614 1.76 matt "invalid boundary %#lx", boundary);
1615 1.76 matt
1616 1.1 chris #ifdef DEBUG_DMA
1617 1.1 chris printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
1618 1.1 chris t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
1619 1.1 chris #endif /* DEBUG_DMA */
1620 1.1 chris
1621 1.1 chris /* Always round the size. */
1622 1.1 chris size = round_page(size);
1623 1.1 chris
1624 1.1 chris /*
1625 1.76 matt * We accept boundaries < size, splitting in multiple segments
1626 1.76 matt * if needed. uvm_pglistalloc does not, so compute an appropriate
1627 1.76 matt * boundary: next power of 2 >= size
1628 1.76 matt */
1629 1.76 matt bus_size_t uboundary = boundary;
1630 1.76 matt if (uboundary <= PAGE_SIZE) {
1631 1.76 matt uboundary = 0;
1632 1.76 matt } else {
1633 1.76 matt while (uboundary < size) {
1634 1.76 matt uboundary <<= 1;
1635 1.76 matt }
1636 1.76 matt }
1637 1.76 matt
1638 1.76 matt /*
1639 1.1 chris * Allocate pages from the VM system.
1640 1.1 chris */
1641 1.78 matt error = uvm_pglistalloc(size, low, high, alignment, uboundary,
1642 1.1 chris &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1643 1.1 chris if (error)
1644 1.100 skrll return error;
1645 1.1 chris
1646 1.1 chris /*
1647 1.1 chris * Compute the location, size, and number of segments actually
1648 1.1 chris * returned by the VM code.
1649 1.1 chris */
1650 1.42 chris m = TAILQ_FIRST(&mlist);
1651 1.1 chris curseg = 0;
1652 1.1 chris lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1653 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1654 1.1 chris #ifdef DEBUG_DMA
1655 1.1 chris printf("alloc: page %lx\n", lastaddr);
1656 1.1 chris #endif /* DEBUG_DMA */
1657 1.52 ad m = TAILQ_NEXT(m, pageq.queue);
1658 1.1 chris
1659 1.52 ad for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1660 1.1 chris curaddr = VM_PAGE_TO_PHYS(m);
1661 1.76 matt KASSERTMSG(low <= curaddr && curaddr < high,
1662 1.76 matt "uvm_pglistalloc returned non-sensicaladdress %#lx "
1663 1.76 matt "(low=%#lx, high=%#lx\n", curaddr, low, high);
1664 1.1 chris #ifdef DEBUG_DMA
1665 1.1 chris printf("alloc: page %lx\n", curaddr);
1666 1.1 chris #endif /* DEBUG_DMA */
1667 1.76 matt if (curaddr == lastaddr + PAGE_SIZE
1668 1.76 matt && (lastaddr & boundary) == (curaddr & boundary))
1669 1.1 chris segs[curseg].ds_len += PAGE_SIZE;
1670 1.1 chris else {
1671 1.1 chris curseg++;
1672 1.76 matt if (curseg >= nsegs) {
1673 1.76 matt uvm_pglistfree(&mlist);
1674 1.76 matt return EFBIG;
1675 1.76 matt }
1676 1.1 chris segs[curseg].ds_addr = curaddr;
1677 1.1 chris segs[curseg].ds_len = PAGE_SIZE;
1678 1.1 chris }
1679 1.1 chris lastaddr = curaddr;
1680 1.1 chris }
1681 1.1 chris
1682 1.1 chris *rsegs = curseg + 1;
1683 1.1 chris
1684 1.100 skrll return 0;
1685 1.15 thorpej }
1686 1.15 thorpej
1687 1.15 thorpej /*
1688 1.15 thorpej * Check if a memory region intersects with a DMA range, and return the
1689 1.15 thorpej * page-rounded intersection if it does.
1690 1.15 thorpej */
1691 1.15 thorpej int
1692 1.15 thorpej arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1693 1.15 thorpej paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1694 1.15 thorpej {
1695 1.15 thorpej struct arm32_dma_range *dr;
1696 1.15 thorpej int i;
1697 1.15 thorpej
1698 1.15 thorpej if (ranges == NULL)
1699 1.100 skrll return 0;
1700 1.15 thorpej
1701 1.15 thorpej for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1702 1.15 thorpej if (dr->dr_sysbase <= pa &&
1703 1.15 thorpej pa < (dr->dr_sysbase + dr->dr_len)) {
1704 1.15 thorpej /*
1705 1.15 thorpej * Beginning of region intersects with this range.
1706 1.15 thorpej */
1707 1.15 thorpej *pap = trunc_page(pa);
1708 1.15 thorpej *sizep = round_page(min(pa + size,
1709 1.15 thorpej dr->dr_sysbase + dr->dr_len) - pa);
1710 1.100 skrll return 1;
1711 1.15 thorpej }
1712 1.15 thorpej if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1713 1.15 thorpej /*
1714 1.15 thorpej * End of region intersects with this range.
1715 1.15 thorpej */
1716 1.15 thorpej *pap = trunc_page(dr->dr_sysbase);
1717 1.15 thorpej *sizep = round_page(min((pa + size) - dr->dr_sysbase,
1718 1.15 thorpej dr->dr_len));
1719 1.100 skrll return 1;
1720 1.15 thorpej }
1721 1.15 thorpej }
1722 1.15 thorpej
1723 1.15 thorpej /* No intersection found. */
1724 1.100 skrll return 0;
1725 1.1 chris }
1726 1.58 matt
1727 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1728 1.58 matt static int
1729 1.58 matt _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1730 1.58 matt bus_size_t size, int flags)
1731 1.58 matt {
1732 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1733 1.58 matt int error = 0;
1734 1.58 matt
1735 1.79 matt KASSERT(cookie != NULL);
1736 1.58 matt
1737 1.58 matt cookie->id_bouncebuflen = round_page(size);
1738 1.58 matt error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1739 1.58 matt PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1740 1.58 matt map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1741 1.76 matt if (error == 0) {
1742 1.76 matt error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1743 1.76 matt cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1744 1.76 matt (void **)&cookie->id_bouncebuf, flags);
1745 1.76 matt if (error) {
1746 1.76 matt _bus_dmamem_free(t, cookie->id_bouncesegs,
1747 1.76 matt cookie->id_nbouncesegs);
1748 1.76 matt cookie->id_bouncebuflen = 0;
1749 1.76 matt cookie->id_nbouncesegs = 0;
1750 1.76 matt } else {
1751 1.76 matt cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1752 1.76 matt }
1753 1.76 matt } else {
1754 1.58 matt cookie->id_bouncebuflen = 0;
1755 1.58 matt cookie->id_nbouncesegs = 0;
1756 1.58 matt }
1757 1.58 matt
1758 1.100 skrll return error;
1759 1.58 matt }
1760 1.58 matt
1761 1.58 matt static void
1762 1.58 matt _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1763 1.58 matt {
1764 1.58 matt struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1765 1.58 matt
1766 1.79 matt KASSERT(cookie != NULL);
1767 1.58 matt
1768 1.58 matt _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1769 1.79 matt _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1770 1.58 matt cookie->id_bouncebuflen = 0;
1771 1.58 matt cookie->id_nbouncesegs = 0;
1772 1.58 matt cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1773 1.58 matt }
1774 1.58 matt
1775 1.58 matt /*
1776 1.58 matt * This function does the same as uiomove, but takes an explicit
1777 1.58 matt * direction, and does not update the uio structure.
1778 1.58 matt */
1779 1.58 matt static int
1780 1.58 matt _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1781 1.58 matt {
1782 1.58 matt struct iovec *iov;
1783 1.58 matt int error;
1784 1.58 matt struct vmspace *vm;
1785 1.58 matt char *cp;
1786 1.58 matt size_t resid, cnt;
1787 1.58 matt int i;
1788 1.58 matt
1789 1.58 matt iov = uio->uio_iov;
1790 1.58 matt vm = uio->uio_vmspace;
1791 1.58 matt cp = buf;
1792 1.58 matt resid = n;
1793 1.58 matt
1794 1.58 matt for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1795 1.58 matt iov = &uio->uio_iov[i];
1796 1.58 matt if (iov->iov_len == 0)
1797 1.58 matt continue;
1798 1.58 matt cnt = MIN(resid, iov->iov_len);
1799 1.58 matt
1800 1.58 matt if (!VMSPACE_IS_KERNEL_P(vm) &&
1801 1.58 matt (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
1802 1.58 matt != 0) {
1803 1.58 matt preempt();
1804 1.58 matt }
1805 1.58 matt if (direction == UIO_READ) {
1806 1.58 matt error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1807 1.58 matt } else {
1808 1.58 matt error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1809 1.58 matt }
1810 1.58 matt if (error)
1811 1.100 skrll return error;
1812 1.58 matt cp += cnt;
1813 1.58 matt resid -= cnt;
1814 1.58 matt }
1815 1.100 skrll return 0;
1816 1.58 matt }
1817 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1818 1.58 matt
1819 1.58 matt int
1820 1.58 matt _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1821 1.58 matt bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1822 1.58 matt {
1823 1.58 matt
1824 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1825 1.58 matt struct arm32_dma_range *dr;
1826 1.58 matt bool subset = false;
1827 1.58 matt size_t nranges = 0;
1828 1.58 matt size_t i;
1829 1.58 matt for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) {
1830 1.95 skrll if (dr->dr_sysbase <= min_addr
1831 1.58 matt && max_addr <= dr->dr_sysbase + dr->dr_len - 1) {
1832 1.58 matt subset = true;
1833 1.58 matt }
1834 1.58 matt if (min_addr <= dr->dr_sysbase + dr->dr_len
1835 1.58 matt && max_addr >= dr->dr_sysbase) {
1836 1.58 matt nranges++;
1837 1.58 matt }
1838 1.58 matt }
1839 1.58 matt if (subset) {
1840 1.58 matt *newtag = tag;
1841 1.58 matt /* if the tag must be freed, add a reference */
1842 1.58 matt if (tag->_tag_needs_free)
1843 1.58 matt (tag->_tag_needs_free)++;
1844 1.58 matt return 0;
1845 1.58 matt }
1846 1.58 matt if (nranges == 0) {
1847 1.58 matt nranges = 1;
1848 1.58 matt }
1849 1.58 matt
1850 1.81 matt const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr);
1851 1.81 matt if ((*newtag = kmem_intr_zalloc(tagsize,
1852 1.81 matt (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1853 1.58 matt return ENOMEM;
1854 1.58 matt
1855 1.58 matt dr = (void *)(*newtag + 1);
1856 1.58 matt **newtag = *tag;
1857 1.58 matt (*newtag)->_tag_needs_free = 1;
1858 1.58 matt (*newtag)->_ranges = dr;
1859 1.58 matt (*newtag)->_nranges = nranges;
1860 1.58 matt
1861 1.58 matt if (tag->_ranges == NULL) {
1862 1.58 matt dr->dr_sysbase = min_addr;
1863 1.58 matt dr->dr_busbase = min_addr;
1864 1.58 matt dr->dr_len = max_addr + 1 - min_addr;
1865 1.58 matt } else {
1866 1.58 matt for (i = 0; i < nranges; i++) {
1867 1.58 matt if (min_addr > dr->dr_sysbase + dr->dr_len
1868 1.58 matt || max_addr < dr->dr_sysbase)
1869 1.58 matt continue;
1870 1.58 matt dr[0] = tag->_ranges[i];
1871 1.58 matt if (dr->dr_sysbase < min_addr) {
1872 1.58 matt psize_t diff = min_addr - dr->dr_sysbase;
1873 1.58 matt dr->dr_busbase += diff;
1874 1.58 matt dr->dr_len -= diff;
1875 1.58 matt dr->dr_sysbase += diff;
1876 1.58 matt }
1877 1.58 matt if (max_addr != 0xffffffff
1878 1.58 matt && max_addr + 1 < dr->dr_sysbase + dr->dr_len) {
1879 1.58 matt dr->dr_len = max_addr + 1 - dr->dr_sysbase;
1880 1.58 matt }
1881 1.58 matt dr++;
1882 1.58 matt }
1883 1.58 matt }
1884 1.58 matt
1885 1.58 matt return 0;
1886 1.58 matt #else
1887 1.58 matt return EOPNOTSUPP;
1888 1.58 matt #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1889 1.58 matt }
1890 1.58 matt
1891 1.58 matt void
1892 1.58 matt _bus_dmatag_destroy(bus_dma_tag_t tag)
1893 1.58 matt {
1894 1.58 matt #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1895 1.58 matt switch (tag->_tag_needs_free) {
1896 1.58 matt case 0:
1897 1.81 matt break; /* not allocated with kmem */
1898 1.81 matt case 1: {
1899 1.81 matt const size_t tagsize = sizeof(*tag)
1900 1.81 matt + tag->_nranges * sizeof(*tag->_ranges);
1901 1.81 matt kmem_intr_free(tag, tagsize); /* last reference to tag */
1902 1.58 matt break;
1903 1.81 matt }
1904 1.58 matt default:
1905 1.58 matt (tag->_tag_needs_free)--; /* one less reference */
1906 1.58 matt }
1907 1.58 matt #endif
1908 1.58 matt }
1909