isadma_bounce.c revision 1.2.4.2 1 1.2.4.2 minoura /* $NetBSD: isadma_bounce.c,v 1.2.4.2 2000/06/22 16:58:37 minoura Exp $ */
2 1.2.4.2 minoura
3 1.2.4.2 minoura /*-
4 1.2.4.2 minoura * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
5 1.2.4.2 minoura * All rights reserved.
6 1.2.4.2 minoura *
7 1.2.4.2 minoura * This code is derived from software contributed to The NetBSD Foundation
8 1.2.4.2 minoura * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.2.4.2 minoura * NASA Ames Research Center.
10 1.2.4.2 minoura *
11 1.2.4.2 minoura * Redistribution and use in source and binary forms, with or without
12 1.2.4.2 minoura * modification, are permitted provided that the following conditions
13 1.2.4.2 minoura * are met:
14 1.2.4.2 minoura * 1. Redistributions of source code must retain the above copyright
15 1.2.4.2 minoura * notice, this list of conditions and the following disclaimer.
16 1.2.4.2 minoura * 2. Redistributions in binary form must reproduce the above copyright
17 1.2.4.2 minoura * notice, this list of conditions and the following disclaimer in the
18 1.2.4.2 minoura * documentation and/or other materials provided with the distribution.
19 1.2.4.2 minoura * 3. All advertising materials mentioning features or use of this software
20 1.2.4.2 minoura * must display the following acknowledgement:
21 1.2.4.2 minoura * This product includes software developed by the NetBSD
22 1.2.4.2 minoura * Foundation, Inc. and its contributors.
23 1.2.4.2 minoura * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.2.4.2 minoura * contributors may be used to endorse or promote products derived
25 1.2.4.2 minoura * from this software without specific prior written permission.
26 1.2.4.2 minoura *
27 1.2.4.2 minoura * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.2.4.2 minoura * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.2.4.2 minoura * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.2.4.2 minoura * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.2.4.2 minoura * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.2.4.2 minoura * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.2.4.2 minoura * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.2.4.2 minoura * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.2.4.2 minoura * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.2.4.2 minoura * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.2.4.2 minoura * POSSIBILITY OF SUCH DAMAGE.
38 1.2.4.2 minoura */
39 1.2.4.2 minoura
40 1.2.4.2 minoura #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41 1.2.4.2 minoura
42 1.2.4.2 minoura __KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.2.4.2 2000/06/22 16:58:37 minoura Exp $");
43 1.2.4.2 minoura
44 1.2.4.2 minoura #include <sys/param.h>
45 1.2.4.2 minoura #include <sys/systm.h>
46 1.2.4.2 minoura #include <sys/syslog.h>
47 1.2.4.2 minoura #include <sys/device.h>
48 1.2.4.2 minoura #include <sys/malloc.h>
49 1.2.4.2 minoura #include <sys/proc.h>
50 1.2.4.2 minoura #include <sys/mbuf.h>
51 1.2.4.2 minoura
52 1.2.4.2 minoura #define _ALPHA_BUS_DMA_PRIVATE
53 1.2.4.2 minoura #include <machine/bus.h>
54 1.2.4.2 minoura
55 1.2.4.2 minoura #include <dev/isa/isareg.h>
56 1.2.4.2 minoura #include <dev/isa/isavar.h>
57 1.2.4.2 minoura
58 1.2.4.2 minoura #include <vm/vm.h>
59 1.2.4.2 minoura
60 1.2.4.2 minoura extern paddr_t avail_end;
61 1.2.4.2 minoura
62 1.2.4.2 minoura /*
63 1.2.4.2 minoura * ISA can only DMA to 0-16M.
64 1.2.4.2 minoura */
65 1.2.4.2 minoura #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
66 1.2.4.2 minoura
67 1.2.4.2 minoura /*
68 1.2.4.2 minoura * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed
69 1.2.4.2 minoura * in the DMA map.
70 1.2.4.2 minoura */
71 1.2.4.2 minoura struct isadma_bounce_cookie {
72 1.2.4.2 minoura int id_flags; /* flags; see below */
73 1.2.4.2 minoura
74 1.2.4.2 minoura /*
75 1.2.4.2 minoura * Information about the original buffer used during
76 1.2.4.2 minoura * DMA map syncs. Note that origbuflen is only used
77 1.2.4.2 minoura * for ID_BUFTYPE_LINEAR.
78 1.2.4.2 minoura */
79 1.2.4.2 minoura void *id_origbuf; /* pointer to orig buffer if
80 1.2.4.2 minoura bouncing */
81 1.2.4.2 minoura bus_size_t id_origbuflen; /* ...and size */
82 1.2.4.2 minoura int id_buftype; /* type of buffer */
83 1.2.4.2 minoura
84 1.2.4.2 minoura void *id_bouncebuf; /* pointer to the bounce buffer */
85 1.2.4.2 minoura bus_size_t id_bouncebuflen; /* ...and size */
86 1.2.4.2 minoura int id_nbouncesegs; /* number of valid bounce segs */
87 1.2.4.2 minoura bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
88 1.2.4.2 minoura physical memory segments */
89 1.2.4.2 minoura };
90 1.2.4.2 minoura
91 1.2.4.2 minoura /* id_flags */
92 1.2.4.2 minoura #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
93 1.2.4.2 minoura #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
94 1.2.4.2 minoura #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
95 1.2.4.2 minoura
96 1.2.4.2 minoura /* id_buftype */
97 1.2.4.2 minoura #define ID_BUFTYPE_INVALID 0
98 1.2.4.2 minoura #define ID_BUFTYPE_LINEAR 1
99 1.2.4.2 minoura #define ID_BUFTYPE_MBUF 2
100 1.2.4.2 minoura #define ID_BUFTYPE_UIO 3
101 1.2.4.2 minoura #define ID_BUFTYPE_RAW 4
102 1.2.4.2 minoura
103 1.2.4.2 minoura int isadma_bounce_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
104 1.2.4.2 minoura bus_size_t, int));
105 1.2.4.2 minoura void isadma_bounce_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
106 1.2.4.2 minoura
107 1.2.4.2 minoura /*
108 1.2.4.2 minoura * Create an ISA DMA map.
109 1.2.4.2 minoura */
110 1.2.4.2 minoura int
111 1.2.4.2 minoura isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
112 1.2.4.2 minoura bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
113 1.2.4.2 minoura {
114 1.2.4.2 minoura struct isadma_bounce_cookie *cookie;
115 1.2.4.2 minoura bus_dmamap_t map;
116 1.2.4.2 minoura int error, cookieflags;
117 1.2.4.2 minoura void *cookiestore;
118 1.2.4.2 minoura size_t cookiesize;
119 1.2.4.2 minoura
120 1.2.4.2 minoura /* Call common function to create the basic map. */
121 1.2.4.2 minoura error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
122 1.2.4.2 minoura flags, dmamp);
123 1.2.4.2 minoura if (error)
124 1.2.4.2 minoura return (error);
125 1.2.4.2 minoura
126 1.2.4.2 minoura map = *dmamp;
127 1.2.4.2 minoura map->_dm_cookie = NULL;
128 1.2.4.2 minoura
129 1.2.4.2 minoura cookiesize = sizeof(*cookie);
130 1.2.4.2 minoura
131 1.2.4.2 minoura /*
132 1.2.4.2 minoura * ISA only has 24-bits of address space. This means
133 1.2.4.2 minoura * we can't DMA to pages over 16M. In order to DMA to
134 1.2.4.2 minoura * arbitrary buffers, we use "bounce buffers" - pages
135 1.2.4.2 minoura * in memory below the 16M boundary. On DMA reads,
136 1.2.4.2 minoura * DMA happens to the bounce buffers, and is copied into
137 1.2.4.2 minoura * the caller's buffer. On writes, data is copied into
138 1.2.4.2 minoura * but bounce buffer, and the DMA happens from those
139 1.2.4.2 minoura * pages. To software using the DMA mapping interface,
140 1.2.4.2 minoura * this looks simply like a data cache.
141 1.2.4.2 minoura *
142 1.2.4.2 minoura * If we have more than 16M of RAM in the system, we may
143 1.2.4.2 minoura * need bounce buffers. We check and remember that here.
144 1.2.4.2 minoura *
145 1.2.4.2 minoura * ...or, there is an opposite case. The most segments
146 1.2.4.2 minoura * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
147 1.2.4.2 minoura * the caller can't handle that many segments (e.g. the
148 1.2.4.2 minoura * ISA DMA controller), we may have to bounce it as well.
149 1.2.4.2 minoura */
150 1.2.4.2 minoura cookieflags = 0;
151 1.2.4.2 minoura if (avail_end > (t->_wbase + t->_wsize) ||
152 1.2.4.2 minoura ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
153 1.2.4.2 minoura cookieflags |= ID_MIGHT_NEED_BOUNCE;
154 1.2.4.2 minoura cookiesize += (sizeof(bus_dma_segment_t) *
155 1.2.4.2 minoura (map->_dm_segcnt - 1));
156 1.2.4.2 minoura }
157 1.2.4.2 minoura
158 1.2.4.2 minoura /*
159 1.2.4.2 minoura * Allocate our cookie.
160 1.2.4.2 minoura */
161 1.2.4.2 minoura if ((cookiestore = malloc(cookiesize, M_DMAMAP,
162 1.2.4.2 minoura (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
163 1.2.4.2 minoura error = ENOMEM;
164 1.2.4.2 minoura goto out;
165 1.2.4.2 minoura }
166 1.2.4.2 minoura memset(cookiestore, 0, cookiesize);
167 1.2.4.2 minoura cookie = (struct isadma_bounce_cookie *)cookiestore;
168 1.2.4.2 minoura cookie->id_flags = cookieflags;
169 1.2.4.2 minoura map->_dm_cookie = cookie;
170 1.2.4.2 minoura
171 1.2.4.2 minoura if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
172 1.2.4.2 minoura /*
173 1.2.4.2 minoura * Allocate the bounce pages now if the caller
174 1.2.4.2 minoura * wishes us to do so.
175 1.2.4.2 minoura */
176 1.2.4.2 minoura if ((flags & BUS_DMA_ALLOCNOW) == 0)
177 1.2.4.2 minoura goto out;
178 1.2.4.2 minoura
179 1.2.4.2 minoura error = isadma_bounce_alloc_bouncebuf(t, map, size, flags);
180 1.2.4.2 minoura }
181 1.2.4.2 minoura
182 1.2.4.2 minoura out:
183 1.2.4.2 minoura if (error) {
184 1.2.4.2 minoura if (map->_dm_cookie != NULL)
185 1.2.4.2 minoura free(map->_dm_cookie, M_DMAMAP);
186 1.2.4.2 minoura _bus_dmamap_destroy(t, map);
187 1.2.4.2 minoura }
188 1.2.4.2 minoura return (error);
189 1.2.4.2 minoura }
190 1.2.4.2 minoura
191 1.2.4.2 minoura /*
192 1.2.4.2 minoura * Destroy an ISA DMA map.
193 1.2.4.2 minoura */
194 1.2.4.2 minoura void
195 1.2.4.2 minoura isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
196 1.2.4.2 minoura {
197 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
198 1.2.4.2 minoura
199 1.2.4.2 minoura /*
200 1.2.4.2 minoura * Free any bounce pages this map might hold.
201 1.2.4.2 minoura */
202 1.2.4.2 minoura if (cookie->id_flags & ID_HAS_BOUNCE)
203 1.2.4.2 minoura isadma_bounce_free_bouncebuf(t, map);
204 1.2.4.2 minoura
205 1.2.4.2 minoura free(cookie, M_DMAMAP);
206 1.2.4.2 minoura _bus_dmamap_destroy(t, map);
207 1.2.4.2 minoura }
208 1.2.4.2 minoura
209 1.2.4.2 minoura /*
210 1.2.4.2 minoura * Load an ISA DMA map with a linear buffer.
211 1.2.4.2 minoura */
212 1.2.4.2 minoura int
213 1.2.4.2 minoura isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
214 1.2.4.2 minoura size_t buflen, struct proc *p, int flags)
215 1.2.4.2 minoura {
216 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
217 1.2.4.2 minoura int error;
218 1.2.4.2 minoura
219 1.2.4.2 minoura /*
220 1.2.4.2 minoura * Make sure that on error condition we return "no valid mappings."
221 1.2.4.2 minoura */
222 1.2.4.2 minoura map->dm_mapsize = 0;
223 1.2.4.2 minoura map->dm_nsegs = 0;
224 1.2.4.2 minoura
225 1.2.4.2 minoura /*
226 1.2.4.2 minoura * Try to load the map the normal way. If this errors out,
227 1.2.4.2 minoura * and we can bounce, we will.
228 1.2.4.2 minoura */
229 1.2.4.2 minoura error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags);
230 1.2.4.2 minoura if (error == 0 ||
231 1.2.4.2 minoura (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
232 1.2.4.2 minoura return (error);
233 1.2.4.2 minoura
234 1.2.4.2 minoura /*
235 1.2.4.2 minoura * First attempt failed; bounce it.
236 1.2.4.2 minoura */
237 1.2.4.2 minoura
238 1.2.4.2 minoura /*
239 1.2.4.2 minoura * Allocate bounce pages, if necessary.
240 1.2.4.2 minoura */
241 1.2.4.2 minoura if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
242 1.2.4.2 minoura error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
243 1.2.4.2 minoura if (error)
244 1.2.4.2 minoura return (error);
245 1.2.4.2 minoura }
246 1.2.4.2 minoura
247 1.2.4.2 minoura /*
248 1.2.4.2 minoura * Cache a pointer to the caller's buffer and load the DMA map
249 1.2.4.2 minoura * with the bounce buffer.
250 1.2.4.2 minoura */
251 1.2.4.2 minoura cookie->id_origbuf = buf;
252 1.2.4.2 minoura cookie->id_origbuflen = buflen;
253 1.2.4.2 minoura cookie->id_buftype = ID_BUFTYPE_LINEAR;
254 1.2.4.2 minoura error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen,
255 1.2.4.2 minoura p, flags);
256 1.2.4.2 minoura if (error) {
257 1.2.4.2 minoura /*
258 1.2.4.2 minoura * Free the bounce pages, unless our resources
259 1.2.4.2 minoura * are reserved for our exclusive use.
260 1.2.4.2 minoura */
261 1.2.4.2 minoura if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
262 1.2.4.2 minoura isadma_bounce_free_bouncebuf(t, map);
263 1.2.4.2 minoura return (error);
264 1.2.4.2 minoura }
265 1.2.4.2 minoura
266 1.2.4.2 minoura /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
267 1.2.4.2 minoura cookie->id_flags |= ID_IS_BOUNCING;
268 1.2.4.2 minoura return (0);
269 1.2.4.2 minoura }
270 1.2.4.2 minoura
271 1.2.4.2 minoura /*
272 1.2.4.2 minoura * Like isadma_bounce_dmamap_load(), but for mbufs.
273 1.2.4.2 minoura */
274 1.2.4.2 minoura int
275 1.2.4.2 minoura isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
276 1.2.4.2 minoura struct mbuf *m0, int flags)
277 1.2.4.2 minoura {
278 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
279 1.2.4.2 minoura int error;
280 1.2.4.2 minoura
281 1.2.4.2 minoura /*
282 1.2.4.2 minoura * Make sure on error condition we return "no valid mappings."
283 1.2.4.2 minoura */
284 1.2.4.2 minoura map->dm_mapsize = 0;
285 1.2.4.2 minoura map->dm_nsegs = 0;
286 1.2.4.2 minoura
287 1.2.4.2 minoura #ifdef DIAGNOSTIC
288 1.2.4.2 minoura if ((m0->m_flags & M_PKTHDR) == 0)
289 1.2.4.2 minoura panic("isadma_bounce_dmamap_load_mbuf: no packet header");
290 1.2.4.2 minoura #endif
291 1.2.4.2 minoura
292 1.2.4.2 minoura if (m0->m_pkthdr.len > map->_dm_size)
293 1.2.4.2 minoura return (EINVAL);
294 1.2.4.2 minoura
295 1.2.4.2 minoura /*
296 1.2.4.2 minoura * Try to load the map the normal way. If this errors out,
297 1.2.4.2 minoura * and we can bounce, we will.
298 1.2.4.2 minoura */
299 1.2.4.2 minoura error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags);
300 1.2.4.2 minoura if (error == 0 ||
301 1.2.4.2 minoura (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
302 1.2.4.2 minoura return (error);
303 1.2.4.2 minoura
304 1.2.4.2 minoura /*
305 1.2.4.2 minoura * First attempt failed; bounce it.
306 1.2.4.2 minoura */
307 1.2.4.2 minoura
308 1.2.4.2 minoura /*
309 1.2.4.2 minoura * Allocate bounce pages, if necessary.
310 1.2.4.2 minoura */
311 1.2.4.2 minoura if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
312 1.2.4.2 minoura error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
313 1.2.4.2 minoura flags);
314 1.2.4.2 minoura if (error)
315 1.2.4.2 minoura return (error);
316 1.2.4.2 minoura }
317 1.2.4.2 minoura
318 1.2.4.2 minoura /*
319 1.2.4.2 minoura * Cache a pointer to the caller's buffer and load the DMA map
320 1.2.4.2 minoura * with the bounce buffer.
321 1.2.4.2 minoura */
322 1.2.4.2 minoura cookie->id_origbuf = m0;
323 1.2.4.2 minoura cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
324 1.2.4.2 minoura cookie->id_buftype = ID_BUFTYPE_MBUF;
325 1.2.4.2 minoura error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf,
326 1.2.4.2 minoura m0->m_pkthdr.len, NULL, flags);
327 1.2.4.2 minoura if (error) {
328 1.2.4.2 minoura /*
329 1.2.4.2 minoura * Free the bounce pages, unless our resources
330 1.2.4.2 minoura * are reserved for our exclusive use.
331 1.2.4.2 minoura */
332 1.2.4.2 minoura if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
333 1.2.4.2 minoura isadma_bounce_free_bouncebuf(t, map);
334 1.2.4.2 minoura return (error);
335 1.2.4.2 minoura }
336 1.2.4.2 minoura
337 1.2.4.2 minoura /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
338 1.2.4.2 minoura cookie->id_flags |= ID_IS_BOUNCING;
339 1.2.4.2 minoura return (0);
340 1.2.4.2 minoura }
341 1.2.4.2 minoura
342 1.2.4.2 minoura /*
343 1.2.4.2 minoura * Like isadma_bounce_dmamap_load(), but for uios.
344 1.2.4.2 minoura */
345 1.2.4.2 minoura int
346 1.2.4.2 minoura isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
347 1.2.4.2 minoura struct uio *uio, int flags)
348 1.2.4.2 minoura {
349 1.2.4.2 minoura
350 1.2.4.2 minoura panic("isadma_bounce_dmamap_load_uio: not implemented");
351 1.2.4.2 minoura }
352 1.2.4.2 minoura
353 1.2.4.2 minoura /*
354 1.2.4.2 minoura * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
355 1.2.4.2 minoura * bus_dmamem_alloc().
356 1.2.4.2 minoura */
357 1.2.4.2 minoura int
358 1.2.4.2 minoura isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
359 1.2.4.2 minoura bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
360 1.2.4.2 minoura {
361 1.2.4.2 minoura
362 1.2.4.2 minoura panic("isadma_bounce_dmamap_load_raw: not implemented");
363 1.2.4.2 minoura }
364 1.2.4.2 minoura
365 1.2.4.2 minoura /*
366 1.2.4.2 minoura * Unload an ISA DMA map.
367 1.2.4.2 minoura */
368 1.2.4.2 minoura void
369 1.2.4.2 minoura isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
370 1.2.4.2 minoura {
371 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
372 1.2.4.2 minoura
373 1.2.4.2 minoura /*
374 1.2.4.2 minoura * If we have bounce pages, free them, unless they're
375 1.2.4.2 minoura * reserved for our exclusive use.
376 1.2.4.2 minoura */
377 1.2.4.2 minoura if ((cookie->id_flags & ID_HAS_BOUNCE) &&
378 1.2.4.2 minoura (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
379 1.2.4.2 minoura isadma_bounce_free_bouncebuf(t, map);
380 1.2.4.2 minoura
381 1.2.4.2 minoura cookie->id_flags &= ~ID_IS_BOUNCING;
382 1.2.4.2 minoura cookie->id_buftype = ID_BUFTYPE_INVALID;
383 1.2.4.2 minoura
384 1.2.4.2 minoura /*
385 1.2.4.2 minoura * Do the generic bits of the unload.
386 1.2.4.2 minoura */
387 1.2.4.2 minoura _bus_dmamap_unload(t, map);
388 1.2.4.2 minoura }
389 1.2.4.2 minoura
390 1.2.4.2 minoura /*
391 1.2.4.2 minoura * Synchronize an ISA DMA map.
392 1.2.4.2 minoura */
393 1.2.4.2 minoura void
394 1.2.4.2 minoura isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
395 1.2.4.2 minoura bus_size_t len, int ops)
396 1.2.4.2 minoura {
397 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
398 1.2.4.2 minoura
399 1.2.4.2 minoura /*
400 1.2.4.2 minoura * Mixing PRE and POST operations is not allowed.
401 1.2.4.2 minoura */
402 1.2.4.2 minoura if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
403 1.2.4.2 minoura (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
404 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: mix PRE and POST");
405 1.2.4.2 minoura
406 1.2.4.2 minoura #ifdef DIAGNOSTIC
407 1.2.4.2 minoura if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
408 1.2.4.2 minoura if (offset >= map->dm_mapsize)
409 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: bad offset");
410 1.2.4.2 minoura if (len == 0 || (offset + len) > map->dm_mapsize)
411 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: bad length");
412 1.2.4.2 minoura }
413 1.2.4.2 minoura #endif
414 1.2.4.2 minoura
415 1.2.4.2 minoura /*
416 1.2.4.2 minoura * If we're not bouncing, just drain the write buffer
417 1.2.4.2 minoura * and return.
418 1.2.4.2 minoura */
419 1.2.4.2 minoura if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
420 1.2.4.2 minoura alpha_mb();
421 1.2.4.2 minoura return;
422 1.2.4.2 minoura }
423 1.2.4.2 minoura
424 1.2.4.2 minoura switch (cookie->id_buftype) {
425 1.2.4.2 minoura case ID_BUFTYPE_LINEAR:
426 1.2.4.2 minoura /*
427 1.2.4.2 minoura * Nothing to do for pre-read.
428 1.2.4.2 minoura */
429 1.2.4.2 minoura
430 1.2.4.2 minoura if (ops & BUS_DMASYNC_PREWRITE) {
431 1.2.4.2 minoura /*
432 1.2.4.2 minoura * Copy the caller's buffer to the bounce buffer.
433 1.2.4.2 minoura */
434 1.2.4.2 minoura memcpy((char *)cookie->id_bouncebuf + offset,
435 1.2.4.2 minoura (char *)cookie->id_origbuf + offset, len);
436 1.2.4.2 minoura }
437 1.2.4.2 minoura
438 1.2.4.2 minoura if (ops & BUS_DMASYNC_POSTREAD) {
439 1.2.4.2 minoura /*
440 1.2.4.2 minoura * Copy the bounce buffer to the caller's buffer.
441 1.2.4.2 minoura */
442 1.2.4.2 minoura memcpy((char *)cookie->id_origbuf + offset,
443 1.2.4.2 minoura (char *)cookie->id_bouncebuf + offset, len);
444 1.2.4.2 minoura }
445 1.2.4.2 minoura
446 1.2.4.2 minoura /*
447 1.2.4.2 minoura * Nothing to do for post-write.
448 1.2.4.2 minoura */
449 1.2.4.2 minoura break;
450 1.2.4.2 minoura
451 1.2.4.2 minoura case ID_BUFTYPE_MBUF:
452 1.2.4.2 minoura {
453 1.2.4.2 minoura struct mbuf *m, *m0 = cookie->id_origbuf;
454 1.2.4.2 minoura bus_size_t minlen, moff;
455 1.2.4.2 minoura
456 1.2.4.2 minoura /*
457 1.2.4.2 minoura * Nothing to do for pre-read.
458 1.2.4.2 minoura */
459 1.2.4.2 minoura
460 1.2.4.2 minoura if (ops & BUS_DMASYNC_PREWRITE) {
461 1.2.4.2 minoura /*
462 1.2.4.2 minoura * Copy the caller's buffer to the bounce buffer.
463 1.2.4.2 minoura */
464 1.2.4.2 minoura m_copydata(m0, offset, len,
465 1.2.4.2 minoura (char *)cookie->id_bouncebuf + offset);
466 1.2.4.2 minoura }
467 1.2.4.2 minoura
468 1.2.4.2 minoura if (ops & BUS_DMASYNC_POSTREAD) {
469 1.2.4.2 minoura /*
470 1.2.4.2 minoura * Copy the bounce buffer to the caller's buffer.
471 1.2.4.2 minoura */
472 1.2.4.2 minoura for (moff = offset, m = m0; m != NULL && len != 0;
473 1.2.4.2 minoura m = m->m_next) {
474 1.2.4.2 minoura /* Find the beginning mbuf. */
475 1.2.4.2 minoura if (moff >= m->m_len) {
476 1.2.4.2 minoura moff -= m->m_len;
477 1.2.4.2 minoura continue;
478 1.2.4.2 minoura }
479 1.2.4.2 minoura
480 1.2.4.2 minoura /*
481 1.2.4.2 minoura * Now at the first mbuf to sync; nail
482 1.2.4.2 minoura * each one until we have exhausted the
483 1.2.4.2 minoura * length.
484 1.2.4.2 minoura */
485 1.2.4.2 minoura minlen = len < m->m_len - moff ?
486 1.2.4.2 minoura len : m->m_len - moff;
487 1.2.4.2 minoura
488 1.2.4.2 minoura memcpy(mtod(m, caddr_t) + moff,
489 1.2.4.2 minoura (char *)cookie->id_bouncebuf + offset,
490 1.2.4.2 minoura minlen);
491 1.2.4.2 minoura
492 1.2.4.2 minoura moff = 0;
493 1.2.4.2 minoura len -= minlen;
494 1.2.4.2 minoura offset += minlen;
495 1.2.4.2 minoura }
496 1.2.4.2 minoura }
497 1.2.4.2 minoura
498 1.2.4.2 minoura /*
499 1.2.4.2 minoura * Nothing to do for post-write.
500 1.2.4.2 minoura */
501 1.2.4.2 minoura break;
502 1.2.4.2 minoura }
503 1.2.4.2 minoura
504 1.2.4.2 minoura case ID_BUFTYPE_UIO:
505 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
506 1.2.4.2 minoura break;
507 1.2.4.2 minoura
508 1.2.4.2 minoura case ID_BUFTYPE_RAW:
509 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
510 1.2.4.2 minoura break;
511 1.2.4.2 minoura
512 1.2.4.2 minoura case ID_BUFTYPE_INVALID:
513 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
514 1.2.4.2 minoura break;
515 1.2.4.2 minoura
516 1.2.4.2 minoura default:
517 1.2.4.2 minoura printf("unknown buffer type %d\n", cookie->id_buftype);
518 1.2.4.2 minoura panic("isadma_bounce_dmamap_sync");
519 1.2.4.2 minoura }
520 1.2.4.2 minoura
521 1.2.4.2 minoura /* Drain the write buffer. */
522 1.2.4.2 minoura alpha_mb();
523 1.2.4.2 minoura }
524 1.2.4.2 minoura
525 1.2.4.2 minoura /*
526 1.2.4.2 minoura * Allocate memory safe for ISA DMA.
527 1.2.4.2 minoura */
528 1.2.4.2 minoura int
529 1.2.4.2 minoura isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
530 1.2.4.2 minoura bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
531 1.2.4.2 minoura int nsegs, int *rsegs, int flags)
532 1.2.4.2 minoura {
533 1.2.4.2 minoura paddr_t high;
534 1.2.4.2 minoura
535 1.2.4.2 minoura if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
536 1.2.4.2 minoura high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
537 1.2.4.2 minoura else
538 1.2.4.2 minoura high = trunc_page(avail_end);
539 1.2.4.2 minoura
540 1.2.4.2 minoura return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
541 1.2.4.2 minoura segs, nsegs, rsegs, flags, 0, high));
542 1.2.4.2 minoura }
543 1.2.4.2 minoura
544 1.2.4.2 minoura /**********************************************************************
545 1.2.4.2 minoura * ISA DMA utility functions
546 1.2.4.2 minoura **********************************************************************/
547 1.2.4.2 minoura
548 1.2.4.2 minoura int
549 1.2.4.2 minoura isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
550 1.2.4.2 minoura bus_size_t size, int flags)
551 1.2.4.2 minoura {
552 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
553 1.2.4.2 minoura int error = 0;
554 1.2.4.2 minoura
555 1.2.4.2 minoura cookie->id_bouncebuflen = round_page(size);
556 1.2.4.2 minoura error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen,
557 1.2.4.2 minoura PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
558 1.2.4.2 minoura map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
559 1.2.4.2 minoura if (error)
560 1.2.4.2 minoura goto out;
561 1.2.4.2 minoura error = _bus_dmamem_map(t, cookie->id_bouncesegs,
562 1.2.4.2 minoura cookie->id_nbouncesegs, cookie->id_bouncebuflen,
563 1.2.4.2 minoura (caddr_t *)&cookie->id_bouncebuf, flags);
564 1.2.4.2 minoura
565 1.2.4.2 minoura out:
566 1.2.4.2 minoura if (error) {
567 1.2.4.2 minoura _bus_dmamem_free(t, cookie->id_bouncesegs,
568 1.2.4.2 minoura cookie->id_nbouncesegs);
569 1.2.4.2 minoura cookie->id_bouncebuflen = 0;
570 1.2.4.2 minoura cookie->id_nbouncesegs = 0;
571 1.2.4.2 minoura } else
572 1.2.4.2 minoura cookie->id_flags |= ID_HAS_BOUNCE;
573 1.2.4.2 minoura
574 1.2.4.2 minoura return (error);
575 1.2.4.2 minoura }
576 1.2.4.2 minoura
577 1.2.4.2 minoura void
578 1.2.4.2 minoura isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
579 1.2.4.2 minoura {
580 1.2.4.2 minoura struct isadma_bounce_cookie *cookie = map->_dm_cookie;
581 1.2.4.2 minoura
582 1.2.4.2 minoura _bus_dmamem_unmap(t, cookie->id_bouncebuf,
583 1.2.4.2 minoura cookie->id_bouncebuflen);
584 1.2.4.2 minoura _bus_dmamem_free(t, cookie->id_bouncesegs,
585 1.2.4.2 minoura cookie->id_nbouncesegs);
586 1.2.4.2 minoura cookie->id_bouncebuflen = 0;
587 1.2.4.2 minoura cookie->id_nbouncesegs = 0;
588 1.2.4.2 minoura cookie->id_flags &= ~ID_HAS_BOUNCE;
589 1.2.4.2 minoura }
590