rumpdev_bus_dma.c revision 1.3.2.2 1 1.3.2.2 rmind /* $NetBSD: rumpdev_bus_dma.c,v 1.3.2.2 2014/05/18 17:46:15 rmind Exp $ */
2 1.3.2.2 rmind
3 1.3.2.2 rmind /*-
4 1.3.2.2 rmind * Copyright (c) 2013 Antti Kantee
5 1.3.2.2 rmind * All rights reserved.
6 1.3.2.2 rmind *
7 1.3.2.2 rmind * Redistribution and use in source and binary forms, with or without
8 1.3.2.2 rmind * modification, are permitted provided that the following conditions
9 1.3.2.2 rmind * are met:
10 1.3.2.2 rmind * 1. Redistributions of source code must retain the above copyright
11 1.3.2.2 rmind * notice, this list of conditions and the following disclaimer.
12 1.3.2.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
13 1.3.2.2 rmind * notice, this list of conditions and the following disclaimer in the
14 1.3.2.2 rmind * documentation and/or other materials provided with the distribution.
15 1.3.2.2 rmind *
16 1.3.2.2 rmind * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
17 1.3.2.2 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.3.2.2 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.3.2.2 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.3.2.2 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.3.2.2 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.3.2.2 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.3.2.2 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.3.2.2 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.3.2.2 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.3.2.2 rmind * POSSIBILITY OF SUCH DAMAGE.
27 1.3.2.2 rmind */
28 1.3.2.2 rmind
29 1.3.2.2 rmind /*-
30 1.3.2.2 rmind * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
31 1.3.2.2 rmind * All rights reserved.
32 1.3.2.2 rmind *
33 1.3.2.2 rmind * This code is derived from software contributed to The NetBSD Foundation
34 1.3.2.2 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
35 1.3.2.2 rmind * NASA Ames Research Center.
36 1.3.2.2 rmind *
37 1.3.2.2 rmind * Redistribution and use in source and binary forms, with or without
38 1.3.2.2 rmind * modification, are permitted provided that the following conditions
39 1.3.2.2 rmind * are met:
40 1.3.2.2 rmind * 1. Redistributions of source code must retain the above copyright
41 1.3.2.2 rmind * notice, this list of conditions and the following disclaimer.
42 1.3.2.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
43 1.3.2.2 rmind * notice, this list of conditions and the following disclaimer in the
44 1.3.2.2 rmind * documentation and/or other materials provided with the distribution.
45 1.3.2.2 rmind *
46 1.3.2.2 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
47 1.3.2.2 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
48 1.3.2.2 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 1.3.2.2 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
50 1.3.2.2 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 1.3.2.2 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 1.3.2.2 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 1.3.2.2 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 1.3.2.2 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 1.3.2.2 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56 1.3.2.2 rmind * POSSIBILITY OF SUCH DAMAGE.
57 1.3.2.2 rmind */
58 1.3.2.2 rmind
59 1.3.2.2 rmind /*
60 1.3.2.2 rmind * bus_dma(9) implementation which runs on top of rump kernel hypercalls.
61 1.3.2.2 rmind * It's essentially the same as the PowerPC implementation its based on,
62 1.3.2.2 rmind * except with some indirection and PowerPC MD features removed.
63 1.3.2.2 rmind * This should/could be expected to run on x86, other archs may need
64 1.3.2.2 rmind * some cache flushing hooks.
65 1.3.2.2 rmind *
66 1.3.2.2 rmind * From sys/arch/powerpc/powerpc/bus_dma.c:
67 1.3.2.2 rmind * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp
68 1.3.2.2 rmind */
69 1.3.2.2 rmind
70 1.3.2.2 rmind #include <sys/param.h>
71 1.3.2.2 rmind #include <sys/systm.h>
72 1.3.2.2 rmind #include <sys/kernel.h>
73 1.3.2.2 rmind #include <sys/device.h>
74 1.3.2.2 rmind #include <sys/kmem.h>
75 1.3.2.2 rmind #include <sys/proc.h>
76 1.3.2.2 rmind #include <sys/mbuf.h>
77 1.3.2.2 rmind #include <sys/bus.h>
78 1.3.2.2 rmind #include <sys/intr.h>
79 1.3.2.2 rmind
80 1.3.2.2 rmind #include <uvm/uvm.h>
81 1.3.2.2 rmind
82 1.3.2.2 rmind #include "pci_user.h"
83 1.3.2.2 rmind
84 1.3.2.2 rmind #define EIEIO membar_sync()
85 1.3.2.2 rmind
86 1.3.2.2 rmind int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
87 1.3.2.2 rmind bus_size_t, struct vmspace *, int, paddr_t *, int *, int);
88 1.3.2.2 rmind
89 1.3.2.2 rmind /*
90 1.3.2.2 rmind * Common function for DMA map creation. May be called by bus-specific
91 1.3.2.2 rmind * DMA map creation functions.
92 1.3.2.2 rmind */
93 1.3.2.2 rmind int
94 1.3.2.2 rmind bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
95 1.3.2.2 rmind bus_size_t maxsegsz, bus_size_t boundary, int flags,
96 1.3.2.2 rmind bus_dmamap_t *dmamp)
97 1.3.2.2 rmind {
98 1.3.2.2 rmind bus_dmamap_t map;
99 1.3.2.2 rmind void *mapstore;
100 1.3.2.2 rmind size_t mapsize;
101 1.3.2.2 rmind
102 1.3.2.2 rmind /*
103 1.3.2.2 rmind * Allocate and initialize the DMA map. The end of the map
104 1.3.2.2 rmind * is a variable-sized array of segments, so we allocate enough
105 1.3.2.2 rmind * room for them in one shot.
106 1.3.2.2 rmind *
107 1.3.2.2 rmind * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
108 1.3.2.2 rmind * of ALLOCNOW notifies others that we've reserved these resources,
109 1.3.2.2 rmind * and they are not to be freed.
110 1.3.2.2 rmind *
111 1.3.2.2 rmind * The bus_dmamap_t includes one bus_dma_segment_t, hence
112 1.3.2.2 rmind * the (nsegments - 1).
113 1.3.2.2 rmind */
114 1.3.2.2 rmind mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]);
115 1.3.2.2 rmind if ((mapstore = kmem_intr_alloc(mapsize,
116 1.3.2.2 rmind (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
117 1.3.2.2 rmind return (ENOMEM);
118 1.3.2.2 rmind
119 1.3.2.2 rmind memset(mapstore, 0, mapsize);
120 1.3.2.2 rmind map = (void *)mapstore;
121 1.3.2.2 rmind map->_dm_size = size;
122 1.3.2.2 rmind map->_dm_segcnt = nsegments;
123 1.3.2.2 rmind map->_dm_maxmaxsegsz = maxsegsz;
124 1.3.2.2 rmind map->_dm_boundary = boundary;
125 1.3.2.2 rmind map->_dm_bounce_thresh = 0;
126 1.3.2.2 rmind map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
127 1.3.2.2 rmind map->dm_maxsegsz = maxsegsz;
128 1.3.2.2 rmind map->dm_mapsize = 0; /* no valid mappings */
129 1.3.2.2 rmind map->dm_nsegs = 0;
130 1.3.2.2 rmind
131 1.3.2.2 rmind *dmamp = map;
132 1.3.2.2 rmind return (0);
133 1.3.2.2 rmind }
134 1.3.2.2 rmind
135 1.3.2.2 rmind /*
136 1.3.2.2 rmind * Common function for DMA map destruction. May be called by bus-specific
137 1.3.2.2 rmind * DMA map destruction functions.
138 1.3.2.2 rmind */
139 1.3.2.2 rmind void
140 1.3.2.2 rmind bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
141 1.3.2.2 rmind {
142 1.3.2.2 rmind
143 1.3.2.2 rmind size_t mapsize = sizeof(*map)
144 1.3.2.2 rmind + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]);
145 1.3.2.2 rmind kmem_intr_free(map, mapsize);
146 1.3.2.2 rmind }
147 1.3.2.2 rmind
148 1.3.2.2 rmind /*
149 1.3.2.2 rmind * Utility function to load a linear buffer. lastaddrp holds state
150 1.3.2.2 rmind * between invocations (for multiple-buffer loads). segp contains
151 1.3.2.2 rmind * the starting segment on entrance, and the ending segment on exit.
152 1.3.2.2 rmind * first indicates if this is the first invocation of this function.
153 1.3.2.2 rmind */
154 1.3.2.2 rmind int
155 1.3.2.2 rmind _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
156 1.3.2.2 rmind void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
157 1.3.2.2 rmind paddr_t *lastaddrp, int *segp, int first)
158 1.3.2.2 rmind {
159 1.3.2.2 rmind bus_size_t sgsize;
160 1.3.2.2 rmind bus_addr_t curaddr, lastaddr, baddr, bmask;
161 1.3.2.2 rmind vaddr_t vaddr = (vaddr_t)buf;
162 1.3.2.2 rmind int seg;
163 1.3.2.2 rmind
164 1.3.2.2 rmind // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__,
165 1.3.2.2 rmind // t, map, buf, buflen, vm, flags, lastaddrp, segp, first);
166 1.3.2.2 rmind
167 1.3.2.2 rmind lastaddr = *lastaddrp;
168 1.3.2.2 rmind bmask = ~(map->_dm_boundary - 1);
169 1.3.2.2 rmind
170 1.3.2.2 rmind for (seg = *segp; buflen > 0 ; ) {
171 1.3.2.2 rmind /*
172 1.3.2.2 rmind * Get the physical address for this segment.
173 1.3.2.2 rmind */
174 1.3.2.2 rmind if (!VMSPACE_IS_KERNEL_P(vm))
175 1.3.2.2 rmind (void) pmap_extract(vm_map_pmap(&vm->vm_map),
176 1.3.2.2 rmind vaddr, (void *)&curaddr);
177 1.3.2.2 rmind else
178 1.3.2.2 rmind curaddr = vtophys(vaddr);
179 1.3.2.2 rmind
180 1.3.2.2 rmind /*
181 1.3.2.2 rmind * If we're beyond the bounce threshold, notify
182 1.3.2.2 rmind * the caller.
183 1.3.2.2 rmind */
184 1.3.2.2 rmind if (map->_dm_bounce_thresh != 0 &&
185 1.3.2.2 rmind curaddr >= map->_dm_bounce_thresh)
186 1.3.2.2 rmind return (EINVAL);
187 1.3.2.2 rmind
188 1.3.2.2 rmind /*
189 1.3.2.2 rmind * Compute the segment size, and adjust counts.
190 1.3.2.2 rmind */
191 1.3.2.2 rmind sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
192 1.3.2.2 rmind if (buflen < sgsize)
193 1.3.2.2 rmind sgsize = buflen;
194 1.3.2.2 rmind sgsize = min(sgsize, map->dm_maxsegsz);
195 1.3.2.2 rmind
196 1.3.2.2 rmind /*
197 1.3.2.2 rmind * Make sure we don't cross any boundaries.
198 1.3.2.2 rmind */
199 1.3.2.2 rmind if (map->_dm_boundary > 0) {
200 1.3.2.2 rmind baddr = (curaddr + map->_dm_boundary) & bmask;
201 1.3.2.2 rmind if (sgsize > (baddr - curaddr))
202 1.3.2.2 rmind sgsize = (baddr - curaddr);
203 1.3.2.2 rmind }
204 1.3.2.2 rmind
205 1.3.2.2 rmind /*
206 1.3.2.2 rmind * Insert chunk into a segment, coalescing with
207 1.3.2.2 rmind * the previous segment if possible.
208 1.3.2.2 rmind */
209 1.3.2.2 rmind if (first) {
210 1.3.2.2 rmind map->dm_segs[seg].ds_addr
211 1.3.2.2 rmind = rumpcomp_pci_virt_to_mach((void *)curaddr);
212 1.3.2.2 rmind map->dm_segs[seg].ds_len = sgsize;
213 1.3.2.2 rmind first = 0;
214 1.3.2.2 rmind } else {
215 1.3.2.2 rmind if (curaddr == lastaddr &&
216 1.3.2.2 rmind (map->dm_segs[seg].ds_len + sgsize) <=
217 1.3.2.2 rmind map->dm_maxsegsz &&
218 1.3.2.2 rmind (map->_dm_boundary == 0 ||
219 1.3.2.2 rmind (map->dm_segs[seg].ds_addr & bmask) ==
220 1.3.2.2 rmind (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask)))
221 1.3.2.2 rmind map->dm_segs[seg].ds_len += sgsize;
222 1.3.2.2 rmind else {
223 1.3.2.2 rmind if (++seg >= map->_dm_segcnt)
224 1.3.2.2 rmind break;
225 1.3.2.2 rmind map->dm_segs[seg].ds_addr =
226 1.3.2.2 rmind rumpcomp_pci_virt_to_mach((void *)curaddr);
227 1.3.2.2 rmind map->dm_segs[seg].ds_len = sgsize;
228 1.3.2.2 rmind }
229 1.3.2.2 rmind }
230 1.3.2.2 rmind
231 1.3.2.2 rmind lastaddr = curaddr + sgsize;
232 1.3.2.2 rmind vaddr += sgsize;
233 1.3.2.2 rmind buflen -= sgsize;
234 1.3.2.2 rmind }
235 1.3.2.2 rmind
236 1.3.2.2 rmind *segp = seg;
237 1.3.2.2 rmind *lastaddrp = lastaddr;
238 1.3.2.2 rmind
239 1.3.2.2 rmind /*
240 1.3.2.2 rmind * Did we fit?
241 1.3.2.2 rmind */
242 1.3.2.2 rmind if (buflen != 0)
243 1.3.2.2 rmind return (EFBIG); /* XXX better return value here? */
244 1.3.2.2 rmind
245 1.3.2.2 rmind return (0);
246 1.3.2.2 rmind }
247 1.3.2.2 rmind
248 1.3.2.2 rmind /*
249 1.3.2.2 rmind * Common function for loading a DMA map with a linear buffer. May
250 1.3.2.2 rmind * be called by bus-specific DMA map load functions.
251 1.3.2.2 rmind */
252 1.3.2.2 rmind int
253 1.3.2.2 rmind bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
254 1.3.2.2 rmind void *buf, bus_size_t buflen, struct proc *p, int flags)
255 1.3.2.2 rmind {
256 1.3.2.2 rmind paddr_t lastaddr = 0;
257 1.3.2.2 rmind int seg, error;
258 1.3.2.2 rmind struct vmspace *vm;
259 1.3.2.2 rmind
260 1.3.2.2 rmind /*
261 1.3.2.2 rmind * Make sure that on error condition we return "no valid mappings".
262 1.3.2.2 rmind */
263 1.3.2.2 rmind map->dm_mapsize = 0;
264 1.3.2.2 rmind map->dm_nsegs = 0;
265 1.3.2.2 rmind KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
266 1.3.2.2 rmind
267 1.3.2.2 rmind if (buflen > map->_dm_size)
268 1.3.2.2 rmind return (EINVAL);
269 1.3.2.2 rmind
270 1.3.2.2 rmind if (p != NULL) {
271 1.3.2.2 rmind vm = p->p_vmspace;
272 1.3.2.2 rmind } else {
273 1.3.2.2 rmind vm = vmspace_kernel();
274 1.3.2.2 rmind }
275 1.3.2.2 rmind
276 1.3.2.2 rmind seg = 0;
277 1.3.2.2 rmind error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
278 1.3.2.2 rmind &lastaddr, &seg, 1);
279 1.3.2.2 rmind if (error == 0) {
280 1.3.2.2 rmind map->dm_mapsize = buflen;
281 1.3.2.2 rmind map->dm_nsegs = seg + 1;
282 1.3.2.2 rmind }
283 1.3.2.2 rmind return (error);
284 1.3.2.2 rmind }
285 1.3.2.2 rmind
286 1.3.2.2 rmind /*
287 1.3.2.2 rmind * Like _bus_dmamap_load(), but for mbufs.
288 1.3.2.2 rmind */
289 1.3.2.2 rmind int
290 1.3.2.2 rmind bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
291 1.3.2.2 rmind struct mbuf *m0, int flags)
292 1.3.2.2 rmind {
293 1.3.2.2 rmind paddr_t lastaddr = 0;
294 1.3.2.2 rmind int seg, error, first;
295 1.3.2.2 rmind struct mbuf *m;
296 1.3.2.2 rmind
297 1.3.2.2 rmind /*
298 1.3.2.2 rmind * Make sure that on error condition we return "no valid mappings."
299 1.3.2.2 rmind */
300 1.3.2.2 rmind map->dm_mapsize = 0;
301 1.3.2.2 rmind map->dm_nsegs = 0;
302 1.3.2.2 rmind KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
303 1.3.2.2 rmind
304 1.3.2.2 rmind #ifdef DIAGNOSTIC
305 1.3.2.2 rmind if ((m0->m_flags & M_PKTHDR) == 0)
306 1.3.2.2 rmind panic("_bus_dmamap_load_mbuf: no packet header");
307 1.3.2.2 rmind #endif
308 1.3.2.2 rmind
309 1.3.2.2 rmind if (m0->m_pkthdr.len > map->_dm_size)
310 1.3.2.2 rmind return (EINVAL);
311 1.3.2.2 rmind
312 1.3.2.2 rmind first = 1;
313 1.3.2.2 rmind seg = 0;
314 1.3.2.2 rmind error = 0;
315 1.3.2.2 rmind for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
316 1.3.2.2 rmind if (m->m_len == 0)
317 1.3.2.2 rmind continue;
318 1.3.2.2 rmind #ifdef POOL_VTOPHYS
319 1.3.2.2 rmind /* XXX Could be better about coalescing. */
320 1.3.2.2 rmind /* XXX Doesn't check boundaries. */
321 1.3.2.2 rmind switch (m->m_flags & (M_EXT|M_CLUSTER)) {
322 1.3.2.2 rmind case M_EXT|M_CLUSTER:
323 1.3.2.2 rmind /* XXX KDASSERT */
324 1.3.2.2 rmind KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
325 1.3.2.2 rmind lastaddr = m->m_ext.ext_paddr +
326 1.3.2.2 rmind (m->m_data - m->m_ext.ext_buf);
327 1.3.2.2 rmind have_addr:
328 1.3.2.2 rmind if (first == 0 && ++seg >= map->_dm_segcnt) {
329 1.3.2.2 rmind error = EFBIG;
330 1.3.2.2 rmind continue;
331 1.3.2.2 rmind }
332 1.3.2.2 rmind map->dm_segs[seg].ds_addr =
333 1.3.2.2 rmind rumpcomp_pci_virt_to_mach((void *)lastaddr);
334 1.3.2.2 rmind map->dm_segs[seg].ds_len = m->m_len;
335 1.3.2.2 rmind lastaddr += m->m_len;
336 1.3.2.2 rmind continue;
337 1.3.2.2 rmind
338 1.3.2.2 rmind case 0:
339 1.3.2.2 rmind lastaddr = m->m_paddr + M_BUFOFFSET(m) +
340 1.3.2.2 rmind (m->m_data - M_BUFADDR(m));
341 1.3.2.2 rmind goto have_addr;
342 1.3.2.2 rmind
343 1.3.2.2 rmind default:
344 1.3.2.2 rmind break;
345 1.3.2.2 rmind }
346 1.3.2.2 rmind #endif
347 1.3.2.2 rmind error = _bus_dmamap_load_buffer(t, map, m->m_data,
348 1.3.2.2 rmind m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
349 1.3.2.2 rmind }
350 1.3.2.2 rmind if (error == 0) {
351 1.3.2.2 rmind map->dm_mapsize = m0->m_pkthdr.len;
352 1.3.2.2 rmind map->dm_nsegs = seg + 1;
353 1.3.2.2 rmind }
354 1.3.2.2 rmind return (error);
355 1.3.2.2 rmind }
356 1.3.2.2 rmind
357 1.3.2.2 rmind /*
358 1.3.2.2 rmind * Like _bus_dmamap_load(), but for uios.
359 1.3.2.2 rmind */
360 1.3.2.2 rmind int
361 1.3.2.2 rmind bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
362 1.3.2.2 rmind struct uio *uio, int flags)
363 1.3.2.2 rmind {
364 1.3.2.2 rmind paddr_t lastaddr = 0;
365 1.3.2.2 rmind int seg, i, error, first;
366 1.3.2.2 rmind bus_size_t minlen, resid;
367 1.3.2.2 rmind struct iovec *iov;
368 1.3.2.2 rmind void *addr;
369 1.3.2.2 rmind
370 1.3.2.2 rmind /*
371 1.3.2.2 rmind * Make sure that on error condition we return "no valid mappings."
372 1.3.2.2 rmind */
373 1.3.2.2 rmind map->dm_mapsize = 0;
374 1.3.2.2 rmind map->dm_nsegs = 0;
375 1.3.2.2 rmind KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
376 1.3.2.2 rmind
377 1.3.2.2 rmind resid = uio->uio_resid;
378 1.3.2.2 rmind iov = uio->uio_iov;
379 1.3.2.2 rmind
380 1.3.2.2 rmind first = 1;
381 1.3.2.2 rmind seg = 0;
382 1.3.2.2 rmind error = 0;
383 1.3.2.2 rmind for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
384 1.3.2.2 rmind /*
385 1.3.2.2 rmind * Now at the first iovec to load. Load each iovec
386 1.3.2.2 rmind * until we have exhausted the residual count.
387 1.3.2.2 rmind */
388 1.3.2.2 rmind minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
389 1.3.2.2 rmind addr = (void *)iov[i].iov_base;
390 1.3.2.2 rmind
391 1.3.2.2 rmind error = _bus_dmamap_load_buffer(t, map, addr, minlen,
392 1.3.2.2 rmind uio->uio_vmspace, flags, &lastaddr, &seg, first);
393 1.3.2.2 rmind first = 0;
394 1.3.2.2 rmind
395 1.3.2.2 rmind resid -= minlen;
396 1.3.2.2 rmind }
397 1.3.2.2 rmind if (error == 0) {
398 1.3.2.2 rmind map->dm_mapsize = uio->uio_resid;
399 1.3.2.2 rmind map->dm_nsegs = seg + 1;
400 1.3.2.2 rmind }
401 1.3.2.2 rmind return (error);
402 1.3.2.2 rmind }
403 1.3.2.2 rmind
404 1.3.2.2 rmind /*
405 1.3.2.2 rmind * Like _bus_dmamap_load(), but for raw memory allocated with
406 1.3.2.2 rmind * bus_dmamem_alloc().
407 1.3.2.2 rmind */
408 1.3.2.2 rmind int
409 1.3.2.2 rmind bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
410 1.3.2.2 rmind bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
411 1.3.2.2 rmind {
412 1.3.2.2 rmind
413 1.3.2.2 rmind panic("_bus_dmamap_load_raw: not implemented");
414 1.3.2.2 rmind }
415 1.3.2.2 rmind
416 1.3.2.2 rmind /*
417 1.3.2.2 rmind * Common function for unloading a DMA map. May be called by
418 1.3.2.2 rmind * chipset-specific DMA map unload functions.
419 1.3.2.2 rmind */
420 1.3.2.2 rmind void
421 1.3.2.2 rmind bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
422 1.3.2.2 rmind {
423 1.3.2.2 rmind
424 1.3.2.2 rmind /*
425 1.3.2.2 rmind * No resources to free; just mark the mappings as
426 1.3.2.2 rmind * invalid.
427 1.3.2.2 rmind */
428 1.3.2.2 rmind map->dm_maxsegsz = map->_dm_maxmaxsegsz;
429 1.3.2.2 rmind map->dm_mapsize = 0;
430 1.3.2.2 rmind map->dm_nsegs = 0;
431 1.3.2.2 rmind }
432 1.3.2.2 rmind
433 1.3.2.2 rmind void
434 1.3.2.2 rmind bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
435 1.3.2.2 rmind bus_addr_t offset, bus_size_t len, int ops)
436 1.3.2.2 rmind {
437 1.3.2.2 rmind
438 1.3.2.2 rmind /* XXX: this might need some MD tweaks */
439 1.3.2.2 rmind membar_sync();
440 1.3.2.2 rmind }
441 1.3.2.2 rmind
442 1.3.2.2 rmind /*
443 1.3.2.2 rmind * Common function for freeing DMA-safe memory. May be called by
444 1.3.2.2 rmind * bus-specific DMA memory free functions.
445 1.3.2.2 rmind */
446 1.3.2.2 rmind void
447 1.3.2.2 rmind bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
448 1.3.2.2 rmind {
449 1.3.2.2 rmind
450 1.3.2.2 rmind panic("bus_dmamem_free not implemented");
451 1.3.2.2 rmind }
452 1.3.2.2 rmind
453 1.3.2.2 rmind /*
454 1.3.2.2 rmind * Don't have hypercall for mapping scatter-gather memory.
455 1.3.2.2 rmind * So just simply fail if there's more than one segment to map
456 1.3.2.2 rmind */
457 1.3.2.2 rmind int
458 1.3.2.2 rmind bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
459 1.3.2.2 rmind size_t size, void **kvap, int flags)
460 1.3.2.2 rmind {
461 1.3.2.2 rmind struct rumpcomp_pci_dmaseg *dss;
462 1.3.2.2 rmind size_t allocsize = nsegs * sizeof(*dss);
463 1.3.2.2 rmind int rv, i;
464 1.3.2.2 rmind
465 1.3.2.2 rmind /*
466 1.3.2.2 rmind * Though rumpcomp_pci_dmaseg "accidentally" matches the
467 1.3.2.2 rmind * bus_dma segment descriptor (at least for now), act
468 1.3.2.2 rmind * proper and actually translate it.
469 1.3.2.2 rmind */
470 1.3.2.2 rmind dss = kmem_alloc(allocsize, KM_SLEEP);
471 1.3.2.2 rmind for (i = 0; i < nsegs; i++) {
472 1.3.2.2 rmind dss[i].ds_pa = segs[i].ds_addr;
473 1.3.2.2 rmind dss[i].ds_len = segs[i].ds_len;
474 1.3.2.2 rmind dss[i].ds_vacookie = segs[i]._ds_vacookie;
475 1.3.2.2 rmind }
476 1.3.2.2 rmind rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap);
477 1.3.2.2 rmind kmem_free(dss, allocsize);
478 1.3.2.2 rmind
479 1.3.2.2 rmind return rv;
480 1.3.2.2 rmind }
481 1.3.2.2 rmind
482 1.3.2.2 rmind /*
483 1.3.2.2 rmind * Common function for unmapping DMA-safe memory. May be called by
484 1.3.2.2 rmind * bus-specific DMA memory unmapping functions.
485 1.3.2.2 rmind */
486 1.3.2.2 rmind void
487 1.3.2.2 rmind bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
488 1.3.2.2 rmind {
489 1.3.2.2 rmind
490 1.3.2.2 rmind /* nothing to do as long as bus_dmamem_map() is what it is */
491 1.3.2.2 rmind }
492 1.3.2.2 rmind
493 1.3.2.2 rmind paddr_t
494 1.3.2.2 rmind bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
495 1.3.2.2 rmind off_t off, int prot, int flags)
496 1.3.2.2 rmind {
497 1.3.2.2 rmind
498 1.3.2.2 rmind panic("bus_dmamem_mmap not supported");
499 1.3.2.2 rmind }
500 1.3.2.2 rmind
501 1.3.2.2 rmind /*
502 1.3.2.2 rmind * Allocate physical memory from the given physical address range.
503 1.3.2.2 rmind * Called by DMA-safe memory allocation methods.
504 1.3.2.2 rmind */
505 1.3.2.2 rmind int
506 1.3.2.2 rmind bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
507 1.3.2.2 rmind bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
508 1.3.2.2 rmind int flags)
509 1.3.2.2 rmind {
510 1.3.2.2 rmind paddr_t curaddr, lastaddr, pa;
511 1.3.2.2 rmind vaddr_t vacookie;
512 1.3.2.2 rmind int curseg, error;
513 1.3.2.2 rmind
514 1.3.2.2 rmind /* Always round the size. */
515 1.3.2.2 rmind size = round_page(size);
516 1.3.2.2 rmind
517 1.3.2.2 rmind /*
518 1.3.2.2 rmind * Allocate pages from the VM system.
519 1.3.2.2 rmind */
520 1.3.2.2 rmind #if 0
521 1.3.2.2 rmind error = uvm_pglistalloc(size, low, high, alignment, boundary,
522 1.3.2.2 rmind &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
523 1.3.2.2 rmind #else
524 1.3.2.2 rmind /* XXX: ignores boundary, nsegs, etc. */
525 1.3.2.2 rmind //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs);
526 1.3.2.2 rmind error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie);
527 1.3.2.2 rmind #endif
528 1.3.2.2 rmind if (error)
529 1.3.2.2 rmind return (error);
530 1.3.2.2 rmind
531 1.3.2.2 rmind /*
532 1.3.2.2 rmind * Compute the location, size, and number of segments actually
533 1.3.2.2 rmind * returned by the VM code.
534 1.3.2.2 rmind */
535 1.3.2.2 rmind curseg = 0;
536 1.3.2.2 rmind lastaddr = segs[curseg].ds_addr = pa;
537 1.3.2.2 rmind segs[curseg].ds_len = PAGE_SIZE;
538 1.3.2.2 rmind segs[curseg]._ds_vacookie = vacookie;
539 1.3.2.2 rmind size -= PAGE_SIZE;
540 1.3.2.2 rmind pa += PAGE_SIZE;
541 1.3.2.2 rmind vacookie += PAGE_SIZE;
542 1.3.2.2 rmind
543 1.3.2.2 rmind for (; size;
544 1.3.2.2 rmind pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) {
545 1.3.2.2 rmind curaddr = pa;
546 1.3.2.2 rmind if (curaddr == (lastaddr + PAGE_SIZE) &&
547 1.3.2.2 rmind (lastaddr & boundary) == (curaddr & boundary)) {
548 1.3.2.2 rmind segs[curseg].ds_len += PAGE_SIZE;
549 1.3.2.2 rmind } else {
550 1.3.2.2 rmind curseg++;
551 1.3.2.2 rmind if (curseg >= nsegs)
552 1.3.2.2 rmind return EFBIG;
553 1.3.2.2 rmind segs[curseg].ds_addr = curaddr;
554 1.3.2.2 rmind segs[curseg].ds_len = PAGE_SIZE;
555 1.3.2.2 rmind segs[curseg]._ds_vacookie = vacookie;
556 1.3.2.2 rmind }
557 1.3.2.2 rmind lastaddr = curaddr;
558 1.3.2.2 rmind }
559 1.3.2.2 rmind *rsegs = curseg + 1;
560 1.3.2.2 rmind
561 1.3.2.2 rmind return (0);
562 1.3.2.2 rmind }
563