bus_dma.c revision 1.65 1 1.65 martin /* $NetBSD: bus_dma.c,v 1.65 2008/04/28 20:23:11 martin Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.8 thorpej * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.2 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.2 thorpej * NASA Ames Research Center.
10 1.2 thorpej *
11 1.2 thorpej * Redistribution and use in source and binary forms, with or without
12 1.2 thorpej * modification, are permitted provided that the following conditions
13 1.2 thorpej * are met:
14 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer.
16 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.2 thorpej * documentation and/or other materials provided with the distribution.
19 1.2 thorpej *
20 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
31 1.2 thorpej */
32 1.2 thorpej
33 1.2 thorpej #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34 1.2 thorpej
35 1.65 martin __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.65 2008/04/28 20:23:11 martin Exp $");
36 1.2 thorpej
37 1.2 thorpej #include <sys/param.h>
38 1.2 thorpej #include <sys/systm.h>
39 1.2 thorpej #include <sys/kernel.h>
40 1.2 thorpej #include <sys/device.h>
41 1.2 thorpej #include <sys/malloc.h>
42 1.2 thorpej #include <sys/proc.h>
43 1.9 thorpej #include <sys/mbuf.h>
44 1.28 mrg
45 1.16 thorpej #include <uvm/uvm_extern.h>
46 1.2 thorpej
47 1.2 thorpej #define _ALPHA_BUS_DMA_PRIVATE
48 1.2 thorpej #include <machine/bus.h>
49 1.3 thorpej #include <machine/intr.h>
50 1.2 thorpej
51 1.43 thorpej int _bus_dmamap_load_buffer_direct(bus_dma_tag_t,
52 1.62 yamt bus_dmamap_t, void *, bus_size_t, struct vmspace *, int,
53 1.41 thorpej paddr_t *, int *, int);
54 1.9 thorpej
55 1.34 thorpej extern paddr_t avail_start, avail_end; /* from pmap.c */
56 1.34 thorpej
57 1.2 thorpej /*
58 1.2 thorpej * Common function for DMA map creation. May be called by bus-specific
59 1.2 thorpej * DMA map creation functions.
60 1.2 thorpej */
61 1.2 thorpej int
62 1.41 thorpej _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
63 1.41 thorpej bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
64 1.2 thorpej {
65 1.2 thorpej struct alpha_bus_dmamap *map;
66 1.2 thorpej void *mapstore;
67 1.2 thorpej size_t mapsize;
68 1.2 thorpej
69 1.2 thorpej /*
70 1.35 mjacob * Allocate and initialize the DMA map. The end of the map
71 1.2 thorpej * is a variable-sized array of segments, so we allocate enough
72 1.2 thorpej * room for them in one shot.
73 1.2 thorpej *
74 1.2 thorpej * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
75 1.56 simonb * of ALLOCNOW notifies others that we've reserved these resources,
76 1.2 thorpej * and they are not to be freed.
77 1.2 thorpej *
78 1.2 thorpej * The bus_dmamap_t includes one bus_dma_segment_t, hence
79 1.2 thorpej * the (nsegments - 1).
80 1.2 thorpej */
81 1.2 thorpej mapsize = sizeof(struct alpha_bus_dmamap) +
82 1.2 thorpej (sizeof(bus_dma_segment_t) * (nsegments - 1));
83 1.14 thorpej if ((mapstore = malloc(mapsize, M_DMAMAP,
84 1.2 thorpej (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
85 1.2 thorpej return (ENOMEM);
86 1.2 thorpej
87 1.47 thorpej memset(mapstore, 0, mapsize);
88 1.2 thorpej map = (struct alpha_bus_dmamap *)mapstore;
89 1.2 thorpej map->_dm_size = size;
90 1.2 thorpej map->_dm_segcnt = nsegments;
91 1.57 matt map->_dm_maxmaxsegsz = maxsegsz;
92 1.23 thorpej if (t->_boundary != 0 && t->_boundary < boundary)
93 1.23 thorpej map->_dm_boundary = t->_boundary;
94 1.23 thorpej else
95 1.23 thorpej map->_dm_boundary = boundary;
96 1.2 thorpej map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
97 1.57 matt map->dm_maxsegsz = maxsegsz;
98 1.10 thorpej map->dm_mapsize = 0; /* no valid mappings */
99 1.10 thorpej map->dm_nsegs = 0;
100 1.49 thorpej map->_dm_window = NULL;
101 1.2 thorpej
102 1.2 thorpej *dmamp = map;
103 1.2 thorpej return (0);
104 1.2 thorpej }
105 1.2 thorpej
106 1.2 thorpej /*
107 1.2 thorpej * Common function for DMA map destruction. May be called by bus-specific
108 1.2 thorpej * DMA map destruction functions.
109 1.2 thorpej */
110 1.2 thorpej void
111 1.41 thorpej _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
112 1.2 thorpej {
113 1.2 thorpej
114 1.14 thorpej free(map, M_DMAMAP);
115 1.2 thorpej }
116 1.2 thorpej
117 1.2 thorpej /*
118 1.9 thorpej * Utility function to load a linear buffer. lastaddrp holds state
119 1.9 thorpej * between invocations (for multiple-buffer loads). segp contains
120 1.9 thorpej * the starting segment on entrance, and the ending segment on exit.
121 1.9 thorpej * first indicates if this is the first invocation of this function.
122 1.2 thorpej */
123 1.2 thorpej int
124 1.43 thorpej _bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map,
125 1.62 yamt void *buf, size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
126 1.41 thorpej int *segp, int first)
127 1.2 thorpej {
128 1.2 thorpej bus_size_t sgsize;
129 1.22 thorpej bus_addr_t curaddr, lastaddr, baddr, bmask;
130 1.25 thorpej vaddr_t vaddr = (vaddr_t)buf;
131 1.9 thorpej int seg;
132 1.2 thorpej
133 1.9 thorpej lastaddr = *lastaddrp;
134 1.21 matt bmask = ~(map->_dm_boundary - 1);
135 1.2 thorpej
136 1.20 matt for (seg = *segp; buflen > 0 ; ) {
137 1.2 thorpej /*
138 1.2 thorpej * Get the physical address for this segment.
139 1.2 thorpej */
140 1.62 yamt if (!VMSPACE_IS_KERNEL_P(vm))
141 1.62 yamt (void) pmap_extract(vm->vm_map.pmap, vaddr, &curaddr);
142 1.2 thorpej else
143 1.2 thorpej curaddr = vtophys(vaddr);
144 1.2 thorpej
145 1.19 thorpej /*
146 1.19 thorpej * If we're beyond the current DMA window, indicate
147 1.19 thorpej * that and try to fall back into SGMAPs.
148 1.19 thorpej */
149 1.26 thorpej if (t->_wsize != 0 && curaddr >= t->_wsize)
150 1.19 thorpej return (EINVAL);
151 1.19 thorpej
152 1.26 thorpej curaddr |= t->_wbase;
153 1.2 thorpej
154 1.2 thorpej /*
155 1.2 thorpej * Compute the segment size, and adjust counts.
156 1.2 thorpej */
157 1.52 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
158 1.2 thorpej if (buflen < sgsize)
159 1.2 thorpej sgsize = buflen;
160 1.57 matt if (map->dm_maxsegsz < sgsize)
161 1.57 matt sgsize = map->dm_maxsegsz;
162 1.22 thorpej
163 1.20 matt /*
164 1.22 thorpej * Make sure we don't cross any boundaries.
165 1.20 matt */
166 1.21 matt if (map->_dm_boundary > 0) {
167 1.20 matt baddr = (curaddr + map->_dm_boundary) & bmask;
168 1.22 thorpej if (sgsize > (baddr - curaddr))
169 1.22 thorpej sgsize = (baddr - curaddr);
170 1.20 matt }
171 1.2 thorpej
172 1.2 thorpej /*
173 1.2 thorpej * Insert chunk into a segment, coalescing with
174 1.2 thorpej * the previous segment if possible.
175 1.2 thorpej */
176 1.2 thorpej if (first) {
177 1.2 thorpej map->dm_segs[seg].ds_addr = curaddr;
178 1.2 thorpej map->dm_segs[seg].ds_len = sgsize;
179 1.2 thorpej first = 0;
180 1.2 thorpej } else {
181 1.36 thorpej if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
182 1.36 thorpej curaddr == lastaddr &&
183 1.2 thorpej (map->dm_segs[seg].ds_len + sgsize) <=
184 1.57 matt map->dm_maxsegsz &&
185 1.20 matt (map->_dm_boundary == 0 ||
186 1.20 matt (map->dm_segs[seg].ds_addr & bmask) ==
187 1.21 matt (curaddr & bmask)))
188 1.2 thorpej map->dm_segs[seg].ds_len += sgsize;
189 1.2 thorpej else {
190 1.20 matt if (++seg >= map->_dm_segcnt)
191 1.20 matt break;
192 1.2 thorpej map->dm_segs[seg].ds_addr = curaddr;
193 1.2 thorpej map->dm_segs[seg].ds_len = sgsize;
194 1.2 thorpej }
195 1.2 thorpej }
196 1.2 thorpej
197 1.2 thorpej lastaddr = curaddr + sgsize;
198 1.2 thorpej vaddr += sgsize;
199 1.2 thorpej buflen -= sgsize;
200 1.2 thorpej }
201 1.2 thorpej
202 1.9 thorpej *segp = seg;
203 1.9 thorpej *lastaddrp = lastaddr;
204 1.9 thorpej
205 1.2 thorpej /*
206 1.2 thorpej * Did we fit?
207 1.2 thorpej */
208 1.2 thorpej if (buflen != 0) {
209 1.2 thorpej /*
210 1.19 thorpej * If there is a chained window, we will automatically
211 1.19 thorpej * fall back to it.
212 1.2 thorpej */
213 1.2 thorpej return (EFBIG); /* XXX better return value here? */
214 1.2 thorpej }
215 1.19 thorpej
216 1.9 thorpej return (0);
217 1.9 thorpej }
218 1.9 thorpej
219 1.9 thorpej /*
220 1.9 thorpej * Common function for loading a direct-mapped DMA map with a linear
221 1.9 thorpej * buffer. Called by bus-specific DMA map load functions with the
222 1.9 thorpej * OR value appropriate for indicating "direct-mapped" for that
223 1.9 thorpej * chipset.
224 1.9 thorpej */
225 1.9 thorpej int
226 1.41 thorpej _bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
227 1.41 thorpej bus_size_t buflen, struct proc *p, int flags)
228 1.9 thorpej {
229 1.25 thorpej paddr_t lastaddr;
230 1.9 thorpej int seg, error;
231 1.62 yamt struct vmspace *vm;
232 1.9 thorpej
233 1.9 thorpej /*
234 1.9 thorpej * Make sure that on error condition we return "no valid mappings".
235 1.9 thorpej */
236 1.10 thorpej map->dm_mapsize = 0;
237 1.9 thorpej map->dm_nsegs = 0;
238 1.57 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
239 1.59 mhitch KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
240 1.2 thorpej
241 1.9 thorpej if (buflen > map->_dm_size)
242 1.9 thorpej return (EINVAL);
243 1.9 thorpej
244 1.62 yamt if (p != NULL) {
245 1.62 yamt vm = p->p_vmspace;
246 1.62 yamt } else {
247 1.62 yamt vm = vmspace_kernel();
248 1.62 yamt }
249 1.9 thorpej seg = 0;
250 1.43 thorpej error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
251 1.62 yamt vm, flags, &lastaddr, &seg, 1);
252 1.10 thorpej if (error == 0) {
253 1.10 thorpej map->dm_mapsize = buflen;
254 1.9 thorpej map->dm_nsegs = seg + 1;
255 1.49 thorpej map->_dm_window = t;
256 1.19 thorpej } else if (t->_next_window != NULL) {
257 1.19 thorpej /*
258 1.19 thorpej * Give the next window a chance.
259 1.19 thorpej */
260 1.19 thorpej error = bus_dmamap_load(t->_next_window, map, buf, buflen,
261 1.19 thorpej p, flags);
262 1.10 thorpej }
263 1.9 thorpej return (error);
264 1.2 thorpej }
265 1.2 thorpej
266 1.2 thorpej /*
267 1.42 thorpej * Like _bus_dmamap_load_direct(), but for mbufs.
268 1.2 thorpej */
269 1.2 thorpej int
270 1.41 thorpej _bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map,
271 1.41 thorpej struct mbuf *m0, int flags)
272 1.2 thorpej {
273 1.25 thorpej paddr_t lastaddr;
274 1.9 thorpej int seg, error, first;
275 1.9 thorpej struct mbuf *m;
276 1.9 thorpej
277 1.9 thorpej /*
278 1.9 thorpej * Make sure that on error condition we return "no valid mappings."
279 1.9 thorpej */
280 1.10 thorpej map->dm_mapsize = 0;
281 1.9 thorpej map->dm_nsegs = 0;
282 1.57 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
283 1.59 mhitch KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
284 1.9 thorpej
285 1.9 thorpej #ifdef DIAGNOSTIC
286 1.9 thorpej if ((m0->m_flags & M_PKTHDR) == 0)
287 1.43 thorpej panic("_bus_dmamap_load_mbuf_direct: no packet header");
288 1.9 thorpej #endif
289 1.2 thorpej
290 1.9 thorpej if (m0->m_pkthdr.len > map->_dm_size)
291 1.9 thorpej return (EINVAL);
292 1.9 thorpej
293 1.9 thorpej first = 1;
294 1.9 thorpej seg = 0;
295 1.9 thorpej error = 0;
296 1.9 thorpej for (m = m0; m != NULL && error == 0; m = m->m_next) {
297 1.53 thorpej if (m->m_len == 0)
298 1.53 thorpej continue;
299 1.53 thorpej /* XXX Could be better about coalescing. */
300 1.53 thorpej /* XXX Doesn't check boundaries. */
301 1.53 thorpej switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
302 1.53 thorpej case M_EXT|M_EXT_CLUSTER:
303 1.53 thorpej /* XXX KDASSERT */
304 1.53 thorpej KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
305 1.53 thorpej lastaddr = m->m_ext.ext_paddr +
306 1.53 thorpej (m->m_data - m->m_ext.ext_buf);
307 1.53 thorpej have_addr:
308 1.53 thorpej if (first == 0 &&
309 1.53 thorpej ++seg >= map->_dm_segcnt) {
310 1.53 thorpej error = EFBIG;
311 1.53 thorpej break;
312 1.53 thorpej }
313 1.53 thorpej
314 1.53 thorpej /*
315 1.53 thorpej * If we're beyond the current DMA window, indicate
316 1.53 thorpej * that and try to fall back into SGMAPs.
317 1.53 thorpej */
318 1.53 thorpej if (t->_wsize != 0 && lastaddr >= t->_wsize) {
319 1.53 thorpej error = EINVAL;
320 1.53 thorpej break;
321 1.53 thorpej }
322 1.53 thorpej lastaddr |= t->_wbase;
323 1.53 thorpej
324 1.53 thorpej map->dm_segs[seg].ds_addr = lastaddr;
325 1.53 thorpej map->dm_segs[seg].ds_len = m->m_len;
326 1.53 thorpej lastaddr += m->m_len;
327 1.53 thorpej break;
328 1.53 thorpej
329 1.53 thorpej case 0:
330 1.53 thorpej lastaddr = m->m_paddr + M_BUFOFFSET(m) +
331 1.53 thorpej (m->m_data - M_BUFADDR(m));
332 1.53 thorpej goto have_addr;
333 1.53 thorpej
334 1.53 thorpej default:
335 1.53 thorpej error = _bus_dmamap_load_buffer_direct(t, map,
336 1.62 yamt m->m_data, m->m_len, vmspace_kernel(), flags,
337 1.62 yamt &lastaddr, &seg, first);
338 1.53 thorpej }
339 1.9 thorpej first = 0;
340 1.9 thorpej }
341 1.10 thorpej if (error == 0) {
342 1.10 thorpej map->dm_mapsize = m0->m_pkthdr.len;
343 1.9 thorpej map->dm_nsegs = seg + 1;
344 1.49 thorpej map->_dm_window = t;
345 1.19 thorpej } else if (t->_next_window != NULL) {
346 1.19 thorpej /*
347 1.19 thorpej * Give the next window a chance.
348 1.19 thorpej */
349 1.19 thorpej error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
350 1.10 thorpej }
351 1.9 thorpej return (error);
352 1.2 thorpej }
353 1.2 thorpej
354 1.2 thorpej /*
355 1.42 thorpej * Like _bus_dmamap_load_direct(), but for uios.
356 1.2 thorpej */
357 1.2 thorpej int
358 1.41 thorpej _bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map,
359 1.41 thorpej struct uio *uio, int flags)
360 1.2 thorpej {
361 1.25 thorpej paddr_t lastaddr;
362 1.24 thorpej int seg, i, error, first;
363 1.24 thorpej bus_size_t minlen, resid;
364 1.62 yamt struct vmspace *vm;
365 1.24 thorpej struct iovec *iov;
366 1.63 christos void *addr;
367 1.24 thorpej
368 1.24 thorpej /*
369 1.24 thorpej * Make sure that on error condition we return "no valid mappings."
370 1.24 thorpej */
371 1.24 thorpej map->dm_mapsize = 0;
372 1.24 thorpej map->dm_nsegs = 0;
373 1.57 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
374 1.59 mhitch KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
375 1.24 thorpej
376 1.24 thorpej resid = uio->uio_resid;
377 1.24 thorpej iov = uio->uio_iov;
378 1.24 thorpej
379 1.62 yamt vm = uio->uio_vmspace;
380 1.24 thorpej
381 1.24 thorpej first = 1;
382 1.24 thorpej seg = 0;
383 1.24 thorpej error = 0;
384 1.24 thorpej for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
385 1.24 thorpej /*
386 1.24 thorpej * Now at the first iovec to load. Load each iovec
387 1.24 thorpej * until we have exhausted the residual count.
388 1.24 thorpej */
389 1.27 thorpej minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
390 1.63 christos addr = (void *)iov[i].iov_base;
391 1.24 thorpej
392 1.43 thorpej error = _bus_dmamap_load_buffer_direct(t, map,
393 1.62 yamt addr, minlen, vm, flags, &lastaddr, &seg, first);
394 1.24 thorpej first = 0;
395 1.24 thorpej
396 1.24 thorpej resid -= minlen;
397 1.24 thorpej }
398 1.24 thorpej if (error == 0) {
399 1.24 thorpej map->dm_mapsize = uio->uio_resid;
400 1.24 thorpej map->dm_nsegs = seg + 1;
401 1.49 thorpej map->_dm_window = t;
402 1.24 thorpej } else if (t->_next_window != NULL) {
403 1.24 thorpej /*
404 1.24 thorpej * Give the next window a chance.
405 1.24 thorpej */
406 1.24 thorpej error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
407 1.24 thorpej }
408 1.24 thorpej return (error);
409 1.2 thorpej }
410 1.2 thorpej
411 1.2 thorpej /*
412 1.42 thorpej * Like _bus_dmamap_load_direct(), but for raw memory.
413 1.2 thorpej */
414 1.2 thorpej int
415 1.41 thorpej _bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map,
416 1.41 thorpej bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
417 1.2 thorpej {
418 1.2 thorpej
419 1.18 thorpej panic("_bus_dmamap_load_raw_direct: not implemented");
420 1.2 thorpej }
421 1.2 thorpej
422 1.2 thorpej /*
423 1.2 thorpej * Common function for unloading a DMA map. May be called by
424 1.2 thorpej * chipset-specific DMA map unload functions.
425 1.2 thorpej */
426 1.2 thorpej void
427 1.41 thorpej _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
428 1.2 thorpej {
429 1.2 thorpej
430 1.2 thorpej /*
431 1.2 thorpej * No resources to free; just mark the mappings as
432 1.2 thorpej * invalid.
433 1.2 thorpej */
434 1.57 matt map->dm_maxsegsz = map->_dm_maxmaxsegsz;
435 1.10 thorpej map->dm_mapsize = 0;
436 1.2 thorpej map->dm_nsegs = 0;
437 1.49 thorpej map->_dm_window = NULL;
438 1.59 mhitch map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
439 1.2 thorpej }
440 1.2 thorpej
441 1.2 thorpej /*
442 1.2 thorpej * Common function for DMA map synchronization. May be called
443 1.2 thorpej * by chipset-specific DMA map synchronization functions.
444 1.2 thorpej */
445 1.2 thorpej void
446 1.41 thorpej _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
447 1.41 thorpej bus_size_t len, int ops)
448 1.2 thorpej {
449 1.2 thorpej
450 1.13 thorpej /*
451 1.13 thorpej * Flush the store buffer.
452 1.13 thorpej */
453 1.13 thorpej alpha_mb();
454 1.2 thorpej }
455 1.2 thorpej
456 1.2 thorpej /*
457 1.2 thorpej * Common function for DMA-safe memory allocation. May be called
458 1.2 thorpej * by bus-specific DMA memory allocation functions.
459 1.2 thorpej */
460 1.2 thorpej int
461 1.41 thorpej _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
462 1.41 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
463 1.41 thorpej int flags)
464 1.2 thorpej {
465 1.34 thorpej
466 1.34 thorpej return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
467 1.34 thorpej segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
468 1.34 thorpej }
469 1.34 thorpej
470 1.34 thorpej /*
471 1.34 thorpej * Allocate physical memory from the given physical address range.
472 1.34 thorpej * Called by DMA-safe memory allocation methods.
473 1.34 thorpej */
474 1.34 thorpej int
475 1.41 thorpej _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
476 1.41 thorpej bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
477 1.41 thorpej int flags, paddr_t low, paddr_t high)
478 1.34 thorpej {
479 1.34 thorpej paddr_t curaddr, lastaddr;
480 1.46 chs struct vm_page *m;
481 1.2 thorpej struct pglist mlist;
482 1.2 thorpej int curseg, error;
483 1.2 thorpej
484 1.2 thorpej /* Always round the size. */
485 1.2 thorpej size = round_page(size);
486 1.2 thorpej
487 1.2 thorpej /*
488 1.2 thorpej * Allocate pages from the VM system.
489 1.2 thorpej */
490 1.34 thorpej error = uvm_pglistalloc(size, low, high, alignment, boundary,
491 1.16 thorpej &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
492 1.2 thorpej if (error)
493 1.2 thorpej return (error);
494 1.2 thorpej
495 1.2 thorpej /*
496 1.2 thorpej * Compute the location, size, and number of segments actually
497 1.2 thorpej * returned by the VM code.
498 1.2 thorpej */
499 1.2 thorpej m = mlist.tqh_first;
500 1.2 thorpej curseg = 0;
501 1.2 thorpej lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
502 1.2 thorpej segs[curseg].ds_len = PAGE_SIZE;
503 1.2 thorpej m = m->pageq.tqe_next;
504 1.2 thorpej
505 1.2 thorpej for (; m != NULL; m = m->pageq.tqe_next) {
506 1.2 thorpej curaddr = VM_PAGE_TO_PHYS(m);
507 1.2 thorpej #ifdef DIAGNOSTIC
508 1.7 thorpej if (curaddr < avail_start || curaddr >= high) {
509 1.44 soren printf("uvm_pglistalloc returned non-sensical"
510 1.2 thorpej " address 0x%lx\n", curaddr);
511 1.2 thorpej panic("_bus_dmamem_alloc");
512 1.2 thorpej }
513 1.2 thorpej #endif
514 1.2 thorpej if (curaddr == (lastaddr + PAGE_SIZE))
515 1.2 thorpej segs[curseg].ds_len += PAGE_SIZE;
516 1.2 thorpej else {
517 1.2 thorpej curseg++;
518 1.2 thorpej segs[curseg].ds_addr = curaddr;
519 1.2 thorpej segs[curseg].ds_len = PAGE_SIZE;
520 1.2 thorpej }
521 1.2 thorpej lastaddr = curaddr;
522 1.2 thorpej }
523 1.2 thorpej
524 1.2 thorpej *rsegs = curseg + 1;
525 1.2 thorpej
526 1.2 thorpej return (0);
527 1.2 thorpej }
528 1.2 thorpej
529 1.2 thorpej /*
530 1.2 thorpej * Common function for freeing DMA-safe memory. May be called by
531 1.2 thorpej * bus-specific DMA memory free functions.
532 1.2 thorpej */
533 1.2 thorpej void
534 1.41 thorpej _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
535 1.2 thorpej {
536 1.46 chs struct vm_page *m;
537 1.2 thorpej bus_addr_t addr;
538 1.2 thorpej struct pglist mlist;
539 1.2 thorpej int curseg;
540 1.2 thorpej
541 1.2 thorpej /*
542 1.2 thorpej * Build a list of pages to free back to the VM system.
543 1.2 thorpej */
544 1.2 thorpej TAILQ_INIT(&mlist);
545 1.2 thorpej for (curseg = 0; curseg < nsegs; curseg++) {
546 1.2 thorpej for (addr = segs[curseg].ds_addr;
547 1.2 thorpej addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
548 1.2 thorpej addr += PAGE_SIZE) {
549 1.2 thorpej m = PHYS_TO_VM_PAGE(addr);
550 1.2 thorpej TAILQ_INSERT_TAIL(&mlist, m, pageq);
551 1.2 thorpej }
552 1.2 thorpej }
553 1.2 thorpej
554 1.16 thorpej uvm_pglistfree(&mlist);
555 1.2 thorpej }
556 1.2 thorpej
557 1.2 thorpej /*
558 1.2 thorpej * Common function for mapping DMA-safe memory. May be called by
559 1.2 thorpej * bus-specific DMA memory map functions.
560 1.2 thorpej */
561 1.2 thorpej int
562 1.41 thorpej _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
563 1.63 christos size_t size, void **kvap, int flags)
564 1.2 thorpej {
565 1.25 thorpej vaddr_t va;
566 1.2 thorpej bus_addr_t addr;
567 1.16 thorpej int curseg;
568 1.60 yamt const uvm_flag_t kmflags =
569 1.60 yamt (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
570 1.2 thorpej
571 1.8 thorpej /*
572 1.8 thorpej * If we're only mapping 1 segment, use K0SEG, to avoid
573 1.8 thorpej * TLB thrashing.
574 1.8 thorpej */
575 1.8 thorpej if (nsegs == 1) {
576 1.63 christos *kvap = (void *)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
577 1.8 thorpej return (0);
578 1.8 thorpej }
579 1.8 thorpej
580 1.2 thorpej size = round_page(size);
581 1.3 thorpej
582 1.60 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
583 1.3 thorpej
584 1.2 thorpej if (va == 0)
585 1.2 thorpej return (ENOMEM);
586 1.2 thorpej
587 1.63 christos *kvap = (void *)va;
588 1.2 thorpej
589 1.2 thorpej for (curseg = 0; curseg < nsegs; curseg++) {
590 1.2 thorpej for (addr = segs[curseg].ds_addr;
591 1.2 thorpej addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
592 1.52 thorpej addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
593 1.2 thorpej if (size == 0)
594 1.2 thorpej panic("_bus_dmamem_map: size botch");
595 1.2 thorpej pmap_enter(pmap_kernel(), va, addr,
596 1.33 thorpej VM_PROT_READ | VM_PROT_WRITE,
597 1.33 thorpej PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
598 1.2 thorpej }
599 1.2 thorpej }
600 1.48 chris pmap_update(pmap_kernel());
601 1.2 thorpej
602 1.2 thorpej return (0);
603 1.2 thorpej }
604 1.2 thorpej
605 1.2 thorpej /*
606 1.2 thorpej * Common function for unmapping DMA-safe memory. May be called by
607 1.2 thorpej * bus-specific DMA memory unmapping functions.
608 1.2 thorpej */
609 1.2 thorpej void
610 1.63 christos _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
611 1.2 thorpej {
612 1.2 thorpej
613 1.2 thorpej #ifdef DIAGNOSTIC
614 1.2 thorpej if ((u_long)kva & PGOFSET)
615 1.2 thorpej panic("_bus_dmamem_unmap");
616 1.2 thorpej #endif
617 1.8 thorpej
618 1.8 thorpej /*
619 1.8 thorpej * Nothing to do if we mapped it with K0SEG.
620 1.8 thorpej */
621 1.63 christos if (kva >= (void *)ALPHA_K0SEG_BASE &&
622 1.63 christos kva <= (void *)ALPHA_K0SEG_END)
623 1.8 thorpej return;
624 1.2 thorpej
625 1.2 thorpej size = round_page(size);
626 1.58 yamt pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
627 1.58 yamt pmap_update(pmap_kernel());
628 1.58 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
629 1.2 thorpej }
630 1.2 thorpej
631 1.2 thorpej /*
632 1.2 thorpej * Common functin for mmap(2)'ing DMA-safe memory. May be called by
633 1.2 thorpej * bus-specific DMA mmap(2)'ing functions.
634 1.2 thorpej */
635 1.37 simonb paddr_t
636 1.41 thorpej _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
637 1.41 thorpej off_t off, int prot, int flags)
638 1.2 thorpej {
639 1.6 thorpej int i;
640 1.2 thorpej
641 1.6 thorpej for (i = 0; i < nsegs; i++) {
642 1.6 thorpej #ifdef DIAGNOSTIC
643 1.6 thorpej if (off & PGOFSET)
644 1.6 thorpej panic("_bus_dmamem_mmap: offset unaligned");
645 1.6 thorpej if (segs[i].ds_addr & PGOFSET)
646 1.6 thorpej panic("_bus_dmamem_mmap: segment unaligned");
647 1.6 thorpej if (segs[i].ds_len & PGOFSET)
648 1.6 thorpej panic("_bus_dmamem_mmap: segment size not multiple"
649 1.6 thorpej " of page size");
650 1.6 thorpej #endif
651 1.6 thorpej if (off >= segs[i].ds_len) {
652 1.6 thorpej off -= segs[i].ds_len;
653 1.6 thorpej continue;
654 1.6 thorpej }
655 1.6 thorpej
656 1.64 yamt return (alpha_btop((char *)segs[i].ds_addr + off));
657 1.6 thorpej }
658 1.6 thorpej
659 1.6 thorpej /* Page not found. */
660 1.6 thorpej return (-1);
661 1.2 thorpej }
662