rumpdev_bus_dma.c revision 1.3.4.2 1 1.3.4.2 yamt /* $NetBSD: rumpdev_bus_dma.c,v 1.3.4.2 2014/05/22 11:41:12 yamt Exp $ */
2 1.3.4.2 yamt
3 1.3.4.2 yamt /*-
4 1.3.4.2 yamt * Copyright (c) 2013 Antti Kantee
5 1.3.4.2 yamt * All rights reserved.
6 1.3.4.2 yamt *
7 1.3.4.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.3.4.2 yamt * modification, are permitted provided that the following conditions
9 1.3.4.2 yamt * are met:
10 1.3.4.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.3.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.3.4.2 yamt * documentation and/or other materials provided with the distribution.
15 1.3.4.2 yamt *
16 1.3.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
17 1.3.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.3.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.3.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.3.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.3.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.3.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.3.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.3.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.3.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.3.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
27 1.3.4.2 yamt */
28 1.3.4.2 yamt
29 1.3.4.2 yamt /*-
30 1.3.4.2 yamt * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
31 1.3.4.2 yamt * All rights reserved.
32 1.3.4.2 yamt *
33 1.3.4.2 yamt * This code is derived from software contributed to The NetBSD Foundation
34 1.3.4.2 yamt * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
35 1.3.4.2 yamt * NASA Ames Research Center.
36 1.3.4.2 yamt *
37 1.3.4.2 yamt * Redistribution and use in source and binary forms, with or without
38 1.3.4.2 yamt * modification, are permitted provided that the following conditions
39 1.3.4.2 yamt * are met:
40 1.3.4.2 yamt * 1. Redistributions of source code must retain the above copyright
41 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer.
42 1.3.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
43 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer in the
44 1.3.4.2 yamt * documentation and/or other materials provided with the distribution.
45 1.3.4.2 yamt *
46 1.3.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
47 1.3.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
48 1.3.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 1.3.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
50 1.3.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 1.3.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 1.3.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 1.3.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 1.3.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 1.3.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56 1.3.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
57 1.3.4.2 yamt */
58 1.3.4.2 yamt
59 1.3.4.2 yamt /*
60 1.3.4.2 yamt * bus_dma(9) implementation which runs on top of rump kernel hypercalls.
61 1.3.4.2 yamt * It's essentially the same as the PowerPC implementation its based on,
62 1.3.4.2 yamt * except with some indirection and PowerPC MD features removed.
63 1.3.4.2 yamt * This should/could be expected to run on x86, other archs may need
64 1.3.4.2 yamt * some cache flushing hooks.
65 1.3.4.2 yamt *
66 1.3.4.2 yamt * From sys/arch/powerpc/powerpc/bus_dma.c:
67 1.3.4.2 yamt * NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp
68 1.3.4.2 yamt */
69 1.3.4.2 yamt
70 1.3.4.2 yamt #include <sys/param.h>
71 1.3.4.2 yamt #include <sys/systm.h>
72 1.3.4.2 yamt #include <sys/kernel.h>
73 1.3.4.2 yamt #include <sys/device.h>
74 1.3.4.2 yamt #include <sys/kmem.h>
75 1.3.4.2 yamt #include <sys/proc.h>
76 1.3.4.2 yamt #include <sys/mbuf.h>
77 1.3.4.2 yamt #include <sys/bus.h>
78 1.3.4.2 yamt #include <sys/intr.h>
79 1.3.4.2 yamt
80 1.3.4.2 yamt #include <uvm/uvm.h>
81 1.3.4.2 yamt
82 1.3.4.2 yamt #include "pci_user.h"
83 1.3.4.2 yamt
84 1.3.4.2 yamt #define EIEIO membar_sync()
85 1.3.4.2 yamt
86 1.3.4.2 yamt int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
87 1.3.4.2 yamt bus_size_t, struct vmspace *, int, paddr_t *, int *, int);
88 1.3.4.2 yamt
89 1.3.4.2 yamt /*
90 1.3.4.2 yamt * Common function for DMA map creation. May be called by bus-specific
91 1.3.4.2 yamt * DMA map creation functions.
92 1.3.4.2 yamt */
93 1.3.4.2 yamt int
94 1.3.4.2 yamt bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
95 1.3.4.2 yamt bus_size_t maxsegsz, bus_size_t boundary, int flags,
96 1.3.4.2 yamt bus_dmamap_t *dmamp)
97 1.3.4.2 yamt {
98 1.3.4.2 yamt bus_dmamap_t map;
99 1.3.4.2 yamt void *mapstore;
100 1.3.4.2 yamt size_t mapsize;
101 1.3.4.2 yamt
102 1.3.4.2 yamt /*
103 1.3.4.2 yamt * Allocate and initialize the DMA map. The end of the map
104 1.3.4.2 yamt * is a variable-sized array of segments, so we allocate enough
105 1.3.4.2 yamt * room for them in one shot.
106 1.3.4.2 yamt *
107 1.3.4.2 yamt * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
108 1.3.4.2 yamt * of ALLOCNOW notifies others that we've reserved these resources,
109 1.3.4.2 yamt * and they are not to be freed.
110 1.3.4.2 yamt *
111 1.3.4.2 yamt * The bus_dmamap_t includes one bus_dma_segment_t, hence
112 1.3.4.2 yamt * the (nsegments - 1).
113 1.3.4.2 yamt */
114 1.3.4.2 yamt mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]);
115 1.3.4.2 yamt if ((mapstore = kmem_intr_alloc(mapsize,
116 1.3.4.2 yamt (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
117 1.3.4.2 yamt return (ENOMEM);
118 1.3.4.2 yamt
119 1.3.4.2 yamt memset(mapstore, 0, mapsize);
120 1.3.4.2 yamt map = (void *)mapstore;
121 1.3.4.2 yamt map->_dm_size = size;
122 1.3.4.2 yamt map->_dm_segcnt = nsegments;
123 1.3.4.2 yamt map->_dm_maxmaxsegsz = maxsegsz;
124 1.3.4.2 yamt map->_dm_boundary = boundary;
125 1.3.4.2 yamt map->_dm_bounce_thresh = 0;
126 1.3.4.2 yamt map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
127 1.3.4.2 yamt map->dm_maxsegsz = maxsegsz;
128 1.3.4.2 yamt map->dm_mapsize = 0; /* no valid mappings */
129 1.3.4.2 yamt map->dm_nsegs = 0;
130 1.3.4.2 yamt
131 1.3.4.2 yamt *dmamp = map;
132 1.3.4.2 yamt return (0);
133 1.3.4.2 yamt }
134 1.3.4.2 yamt
135 1.3.4.2 yamt /*
136 1.3.4.2 yamt * Common function for DMA map destruction. May be called by bus-specific
137 1.3.4.2 yamt * DMA map destruction functions.
138 1.3.4.2 yamt */
139 1.3.4.2 yamt void
140 1.3.4.2 yamt bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
141 1.3.4.2 yamt {
142 1.3.4.2 yamt
143 1.3.4.2 yamt size_t mapsize = sizeof(*map)
144 1.3.4.2 yamt + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]);
145 1.3.4.2 yamt kmem_intr_free(map, mapsize);
146 1.3.4.2 yamt }
147 1.3.4.2 yamt
148 1.3.4.2 yamt /*
149 1.3.4.2 yamt * Utility function to load a linear buffer. lastaddrp holds state
150 1.3.4.2 yamt * between invocations (for multiple-buffer loads). segp contains
151 1.3.4.2 yamt * the starting segment on entrance, and the ending segment on exit.
152 1.3.4.2 yamt * first indicates if this is the first invocation of this function.
153 1.3.4.2 yamt */
154 1.3.4.2 yamt int
155 1.3.4.2 yamt _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
156 1.3.4.2 yamt void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
157 1.3.4.2 yamt paddr_t *lastaddrp, int *segp, int first)
158 1.3.4.2 yamt {
159 1.3.4.2 yamt bus_size_t sgsize;
160 1.3.4.2 yamt bus_addr_t curaddr, lastaddr, baddr, bmask;
161 1.3.4.2 yamt vaddr_t vaddr = (vaddr_t)buf;
162 1.3.4.2 yamt int seg;
163 1.3.4.2 yamt
164 1.3.4.2 yamt // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__,
165 1.3.4.2 yamt // t, map, buf, buflen, vm, flags, lastaddrp, segp, first);
166 1.3.4.2 yamt
167 1.3.4.2 yamt lastaddr = *lastaddrp;
168 1.3.4.2 yamt bmask = ~(map->_dm_boundary - 1);
169 1.3.4.2 yamt
170 1.3.4.2 yamt for (seg = *segp; buflen > 0 ; ) {
171 1.3.4.2 yamt /*
172 1.3.4.2 yamt * Get the physical address for this segment.
173 1.3.4.2 yamt */
174 1.3.4.2 yamt if (!VMSPACE_IS_KERNEL_P(vm))
175 1.3.4.2 yamt (void) pmap_extract(vm_map_pmap(&vm->vm_map),
176 1.3.4.2 yamt vaddr, (void *)&curaddr);
177 1.3.4.2 yamt else
178 1.3.4.2 yamt curaddr = vtophys(vaddr);
179 1.3.4.2 yamt
180 1.3.4.2 yamt /*
181 1.3.4.2 yamt * If we're beyond the bounce threshold, notify
182 1.3.4.2 yamt * the caller.
183 1.3.4.2 yamt */
184 1.3.4.2 yamt if (map->_dm_bounce_thresh != 0 &&
185 1.3.4.2 yamt curaddr >= map->_dm_bounce_thresh)
186 1.3.4.2 yamt return (EINVAL);
187 1.3.4.2 yamt
188 1.3.4.2 yamt /*
189 1.3.4.2 yamt * Compute the segment size, and adjust counts.
190 1.3.4.2 yamt */
191 1.3.4.2 yamt sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
192 1.3.4.2 yamt if (buflen < sgsize)
193 1.3.4.2 yamt sgsize = buflen;
194 1.3.4.2 yamt sgsize = min(sgsize, map->dm_maxsegsz);
195 1.3.4.2 yamt
196 1.3.4.2 yamt /*
197 1.3.4.2 yamt * Make sure we don't cross any boundaries.
198 1.3.4.2 yamt */
199 1.3.4.2 yamt if (map->_dm_boundary > 0) {
200 1.3.4.2 yamt baddr = (curaddr + map->_dm_boundary) & bmask;
201 1.3.4.2 yamt if (sgsize > (baddr - curaddr))
202 1.3.4.2 yamt sgsize = (baddr - curaddr);
203 1.3.4.2 yamt }
204 1.3.4.2 yamt
205 1.3.4.2 yamt /*
206 1.3.4.2 yamt * Insert chunk into a segment, coalescing with
207 1.3.4.2 yamt * the previous segment if possible.
208 1.3.4.2 yamt */
209 1.3.4.2 yamt if (first) {
210 1.3.4.2 yamt map->dm_segs[seg].ds_addr
211 1.3.4.2 yamt = rumpcomp_pci_virt_to_mach((void *)curaddr);
212 1.3.4.2 yamt map->dm_segs[seg].ds_len = sgsize;
213 1.3.4.2 yamt first = 0;
214 1.3.4.2 yamt } else {
215 1.3.4.2 yamt if (curaddr == lastaddr &&
216 1.3.4.2 yamt (map->dm_segs[seg].ds_len + sgsize) <=
217 1.3.4.2 yamt map->dm_maxsegsz &&
218 1.3.4.2 yamt (map->_dm_boundary == 0 ||
219 1.3.4.2 yamt (map->dm_segs[seg].ds_addr & bmask) ==
220 1.3.4.2 yamt (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask)))
221 1.3.4.2 yamt map->dm_segs[seg].ds_len += sgsize;
222 1.3.4.2 yamt else {
223 1.3.4.2 yamt if (++seg >= map->_dm_segcnt)
224 1.3.4.2 yamt break;
225 1.3.4.2 yamt map->dm_segs[seg].ds_addr =
226 1.3.4.2 yamt rumpcomp_pci_virt_to_mach((void *)curaddr);
227 1.3.4.2 yamt map->dm_segs[seg].ds_len = sgsize;
228 1.3.4.2 yamt }
229 1.3.4.2 yamt }
230 1.3.4.2 yamt
231 1.3.4.2 yamt lastaddr = curaddr + sgsize;
232 1.3.4.2 yamt vaddr += sgsize;
233 1.3.4.2 yamt buflen -= sgsize;
234 1.3.4.2 yamt }
235 1.3.4.2 yamt
236 1.3.4.2 yamt *segp = seg;
237 1.3.4.2 yamt *lastaddrp = lastaddr;
238 1.3.4.2 yamt
239 1.3.4.2 yamt /*
240 1.3.4.2 yamt * Did we fit?
241 1.3.4.2 yamt */
242 1.3.4.2 yamt if (buflen != 0)
243 1.3.4.2 yamt return (EFBIG); /* XXX better return value here? */
244 1.3.4.2 yamt
245 1.3.4.2 yamt return (0);
246 1.3.4.2 yamt }
247 1.3.4.2 yamt
248 1.3.4.2 yamt /*
249 1.3.4.2 yamt * Common function for loading a DMA map with a linear buffer. May
250 1.3.4.2 yamt * be called by bus-specific DMA map load functions.
251 1.3.4.2 yamt */
252 1.3.4.2 yamt int
253 1.3.4.2 yamt bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
254 1.3.4.2 yamt void *buf, bus_size_t buflen, struct proc *p, int flags)
255 1.3.4.2 yamt {
256 1.3.4.2 yamt paddr_t lastaddr = 0;
257 1.3.4.2 yamt int seg, error;
258 1.3.4.2 yamt struct vmspace *vm;
259 1.3.4.2 yamt
260 1.3.4.2 yamt /*
261 1.3.4.2 yamt * Make sure that on error condition we return "no valid mappings".
262 1.3.4.2 yamt */
263 1.3.4.2 yamt map->dm_mapsize = 0;
264 1.3.4.2 yamt map->dm_nsegs = 0;
265 1.3.4.2 yamt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
266 1.3.4.2 yamt
267 1.3.4.2 yamt if (buflen > map->_dm_size)
268 1.3.4.2 yamt return (EINVAL);
269 1.3.4.2 yamt
270 1.3.4.2 yamt if (p != NULL) {
271 1.3.4.2 yamt vm = p->p_vmspace;
272 1.3.4.2 yamt } else {
273 1.3.4.2 yamt vm = vmspace_kernel();
274 1.3.4.2 yamt }
275 1.3.4.2 yamt
276 1.3.4.2 yamt seg = 0;
277 1.3.4.2 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
278 1.3.4.2 yamt &lastaddr, &seg, 1);
279 1.3.4.2 yamt if (error == 0) {
280 1.3.4.2 yamt map->dm_mapsize = buflen;
281 1.3.4.2 yamt map->dm_nsegs = seg + 1;
282 1.3.4.2 yamt }
283 1.3.4.2 yamt return (error);
284 1.3.4.2 yamt }
285 1.3.4.2 yamt
286 1.3.4.2 yamt /*
287 1.3.4.2 yamt * Like _bus_dmamap_load(), but for mbufs.
288 1.3.4.2 yamt */
289 1.3.4.2 yamt int
290 1.3.4.2 yamt bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
291 1.3.4.2 yamt struct mbuf *m0, int flags)
292 1.3.4.2 yamt {
293 1.3.4.2 yamt paddr_t lastaddr = 0;
294 1.3.4.2 yamt int seg, error, first;
295 1.3.4.2 yamt struct mbuf *m;
296 1.3.4.2 yamt
297 1.3.4.2 yamt /*
298 1.3.4.2 yamt * Make sure that on error condition we return "no valid mappings."
299 1.3.4.2 yamt */
300 1.3.4.2 yamt map->dm_mapsize = 0;
301 1.3.4.2 yamt map->dm_nsegs = 0;
302 1.3.4.2 yamt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
303 1.3.4.2 yamt
304 1.3.4.2 yamt #ifdef DIAGNOSTIC
305 1.3.4.2 yamt if ((m0->m_flags & M_PKTHDR) == 0)
306 1.3.4.2 yamt panic("_bus_dmamap_load_mbuf: no packet header");
307 1.3.4.2 yamt #endif
308 1.3.4.2 yamt
309 1.3.4.2 yamt if (m0->m_pkthdr.len > map->_dm_size)
310 1.3.4.2 yamt return (EINVAL);
311 1.3.4.2 yamt
312 1.3.4.2 yamt first = 1;
313 1.3.4.2 yamt seg = 0;
314 1.3.4.2 yamt error = 0;
315 1.3.4.2 yamt for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
316 1.3.4.2 yamt if (m->m_len == 0)
317 1.3.4.2 yamt continue;
318 1.3.4.2 yamt #ifdef POOL_VTOPHYS
319 1.3.4.2 yamt /* XXX Could be better about coalescing. */
320 1.3.4.2 yamt /* XXX Doesn't check boundaries. */
321 1.3.4.2 yamt switch (m->m_flags & (M_EXT|M_CLUSTER)) {
322 1.3.4.2 yamt case M_EXT|M_CLUSTER:
323 1.3.4.2 yamt /* XXX KDASSERT */
324 1.3.4.2 yamt KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
325 1.3.4.2 yamt lastaddr = m->m_ext.ext_paddr +
326 1.3.4.2 yamt (m->m_data - m->m_ext.ext_buf);
327 1.3.4.2 yamt have_addr:
328 1.3.4.2 yamt if (first == 0 && ++seg >= map->_dm_segcnt) {
329 1.3.4.2 yamt error = EFBIG;
330 1.3.4.2 yamt continue;
331 1.3.4.2 yamt }
332 1.3.4.2 yamt map->dm_segs[seg].ds_addr =
333 1.3.4.2 yamt rumpcomp_pci_virt_to_mach((void *)lastaddr);
334 1.3.4.2 yamt map->dm_segs[seg].ds_len = m->m_len;
335 1.3.4.2 yamt lastaddr += m->m_len;
336 1.3.4.2 yamt continue;
337 1.3.4.2 yamt
338 1.3.4.2 yamt case 0:
339 1.3.4.2 yamt lastaddr = m->m_paddr + M_BUFOFFSET(m) +
340 1.3.4.2 yamt (m->m_data - M_BUFADDR(m));
341 1.3.4.2 yamt goto have_addr;
342 1.3.4.2 yamt
343 1.3.4.2 yamt default:
344 1.3.4.2 yamt break;
345 1.3.4.2 yamt }
346 1.3.4.2 yamt #endif
347 1.3.4.2 yamt error = _bus_dmamap_load_buffer(t, map, m->m_data,
348 1.3.4.2 yamt m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
349 1.3.4.2 yamt }
350 1.3.4.2 yamt if (error == 0) {
351 1.3.4.2 yamt map->dm_mapsize = m0->m_pkthdr.len;
352 1.3.4.2 yamt map->dm_nsegs = seg + 1;
353 1.3.4.2 yamt }
354 1.3.4.2 yamt return (error);
355 1.3.4.2 yamt }
356 1.3.4.2 yamt
357 1.3.4.2 yamt /*
358 1.3.4.2 yamt * Like _bus_dmamap_load(), but for uios.
359 1.3.4.2 yamt */
360 1.3.4.2 yamt int
361 1.3.4.2 yamt bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
362 1.3.4.2 yamt struct uio *uio, int flags)
363 1.3.4.2 yamt {
364 1.3.4.2 yamt paddr_t lastaddr = 0;
365 1.3.4.2 yamt int seg, i, error, first;
366 1.3.4.2 yamt bus_size_t minlen, resid;
367 1.3.4.2 yamt struct iovec *iov;
368 1.3.4.2 yamt void *addr;
369 1.3.4.2 yamt
370 1.3.4.2 yamt /*
371 1.3.4.2 yamt * Make sure that on error condition we return "no valid mappings."
372 1.3.4.2 yamt */
373 1.3.4.2 yamt map->dm_mapsize = 0;
374 1.3.4.2 yamt map->dm_nsegs = 0;
375 1.3.4.2 yamt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
376 1.3.4.2 yamt
377 1.3.4.2 yamt resid = uio->uio_resid;
378 1.3.4.2 yamt iov = uio->uio_iov;
379 1.3.4.2 yamt
380 1.3.4.2 yamt first = 1;
381 1.3.4.2 yamt seg = 0;
382 1.3.4.2 yamt error = 0;
383 1.3.4.2 yamt for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
384 1.3.4.2 yamt /*
385 1.3.4.2 yamt * Now at the first iovec to load. Load each iovec
386 1.3.4.2 yamt * until we have exhausted the residual count.
387 1.3.4.2 yamt */
388 1.3.4.2 yamt minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
389 1.3.4.2 yamt addr = (void *)iov[i].iov_base;
390 1.3.4.2 yamt
391 1.3.4.2 yamt error = _bus_dmamap_load_buffer(t, map, addr, minlen,
392 1.3.4.2 yamt uio->uio_vmspace, flags, &lastaddr, &seg, first);
393 1.3.4.2 yamt first = 0;
394 1.3.4.2 yamt
395 1.3.4.2 yamt resid -= minlen;
396 1.3.4.2 yamt }
397 1.3.4.2 yamt if (error == 0) {
398 1.3.4.2 yamt map->dm_mapsize = uio->uio_resid;
399 1.3.4.2 yamt map->dm_nsegs = seg + 1;
400 1.3.4.2 yamt }
401 1.3.4.2 yamt return (error);
402 1.3.4.2 yamt }
403 1.3.4.2 yamt
404 1.3.4.2 yamt /*
405 1.3.4.2 yamt * Like _bus_dmamap_load(), but for raw memory allocated with
406 1.3.4.2 yamt * bus_dmamem_alloc().
407 1.3.4.2 yamt */
408 1.3.4.2 yamt int
409 1.3.4.2 yamt bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
410 1.3.4.2 yamt bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
411 1.3.4.2 yamt {
412 1.3.4.2 yamt
413 1.3.4.2 yamt panic("_bus_dmamap_load_raw: not implemented");
414 1.3.4.2 yamt }
415 1.3.4.2 yamt
416 1.3.4.2 yamt /*
417 1.3.4.2 yamt * Common function for unloading a DMA map. May be called by
418 1.3.4.2 yamt * chipset-specific DMA map unload functions.
419 1.3.4.2 yamt */
420 1.3.4.2 yamt void
421 1.3.4.2 yamt bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
422 1.3.4.2 yamt {
423 1.3.4.2 yamt
424 1.3.4.2 yamt /*
425 1.3.4.2 yamt * No resources to free; just mark the mappings as
426 1.3.4.2 yamt * invalid.
427 1.3.4.2 yamt */
428 1.3.4.2 yamt map->dm_maxsegsz = map->_dm_maxmaxsegsz;
429 1.3.4.2 yamt map->dm_mapsize = 0;
430 1.3.4.2 yamt map->dm_nsegs = 0;
431 1.3.4.2 yamt }
432 1.3.4.2 yamt
433 1.3.4.2 yamt void
434 1.3.4.2 yamt bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
435 1.3.4.2 yamt bus_addr_t offset, bus_size_t len, int ops)
436 1.3.4.2 yamt {
437 1.3.4.2 yamt
438 1.3.4.2 yamt /* XXX: this might need some MD tweaks */
439 1.3.4.2 yamt membar_sync();
440 1.3.4.2 yamt }
441 1.3.4.2 yamt
442 1.3.4.2 yamt /*
443 1.3.4.2 yamt * Common function for freeing DMA-safe memory. May be called by
444 1.3.4.2 yamt * bus-specific DMA memory free functions.
445 1.3.4.2 yamt */
446 1.3.4.2 yamt void
447 1.3.4.2 yamt bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
448 1.3.4.2 yamt {
449 1.3.4.2 yamt
450 1.3.4.2 yamt panic("bus_dmamem_free not implemented");
451 1.3.4.2 yamt }
452 1.3.4.2 yamt
453 1.3.4.2 yamt /*
454 1.3.4.2 yamt * Don't have hypercall for mapping scatter-gather memory.
455 1.3.4.2 yamt * So just simply fail if there's more than one segment to map
456 1.3.4.2 yamt */
457 1.3.4.2 yamt int
458 1.3.4.2 yamt bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
459 1.3.4.2 yamt size_t size, void **kvap, int flags)
460 1.3.4.2 yamt {
461 1.3.4.2 yamt struct rumpcomp_pci_dmaseg *dss;
462 1.3.4.2 yamt size_t allocsize = nsegs * sizeof(*dss);
463 1.3.4.2 yamt int rv, i;
464 1.3.4.2 yamt
465 1.3.4.2 yamt /*
466 1.3.4.2 yamt * Though rumpcomp_pci_dmaseg "accidentally" matches the
467 1.3.4.2 yamt * bus_dma segment descriptor (at least for now), act
468 1.3.4.2 yamt * proper and actually translate it.
469 1.3.4.2 yamt */
470 1.3.4.2 yamt dss = kmem_alloc(allocsize, KM_SLEEP);
471 1.3.4.2 yamt for (i = 0; i < nsegs; i++) {
472 1.3.4.2 yamt dss[i].ds_pa = segs[i].ds_addr;
473 1.3.4.2 yamt dss[i].ds_len = segs[i].ds_len;
474 1.3.4.2 yamt dss[i].ds_vacookie = segs[i]._ds_vacookie;
475 1.3.4.2 yamt }
476 1.3.4.2 yamt rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap);
477 1.3.4.2 yamt kmem_free(dss, allocsize);
478 1.3.4.2 yamt
479 1.3.4.2 yamt return rv;
480 1.3.4.2 yamt }
481 1.3.4.2 yamt
482 1.3.4.2 yamt /*
483 1.3.4.2 yamt * Common function for unmapping DMA-safe memory. May be called by
484 1.3.4.2 yamt * bus-specific DMA memory unmapping functions.
485 1.3.4.2 yamt */
486 1.3.4.2 yamt void
487 1.3.4.2 yamt bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
488 1.3.4.2 yamt {
489 1.3.4.2 yamt
490 1.3.4.2 yamt /* nothing to do as long as bus_dmamem_map() is what it is */
491 1.3.4.2 yamt }
492 1.3.4.2 yamt
493 1.3.4.2 yamt paddr_t
494 1.3.4.2 yamt bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
495 1.3.4.2 yamt off_t off, int prot, int flags)
496 1.3.4.2 yamt {
497 1.3.4.2 yamt
498 1.3.4.2 yamt panic("bus_dmamem_mmap not supported");
499 1.3.4.2 yamt }
500 1.3.4.2 yamt
501 1.3.4.2 yamt /*
502 1.3.4.2 yamt * Allocate physical memory from the given physical address range.
503 1.3.4.2 yamt * Called by DMA-safe memory allocation methods.
504 1.3.4.2 yamt */
505 1.3.4.2 yamt int
506 1.3.4.2 yamt bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
507 1.3.4.2 yamt bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
508 1.3.4.2 yamt int flags)
509 1.3.4.2 yamt {
510 1.3.4.2 yamt paddr_t curaddr, lastaddr, pa;
511 1.3.4.2 yamt vaddr_t vacookie;
512 1.3.4.2 yamt int curseg, error;
513 1.3.4.2 yamt
514 1.3.4.2 yamt /* Always round the size. */
515 1.3.4.2 yamt size = round_page(size);
516 1.3.4.2 yamt
517 1.3.4.2 yamt /*
518 1.3.4.2 yamt * Allocate pages from the VM system.
519 1.3.4.2 yamt */
520 1.3.4.2 yamt #if 0
521 1.3.4.2 yamt error = uvm_pglistalloc(size, low, high, alignment, boundary,
522 1.3.4.2 yamt &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
523 1.3.4.2 yamt #else
524 1.3.4.2 yamt /* XXX: ignores boundary, nsegs, etc. */
525 1.3.4.2 yamt //printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs);
526 1.3.4.2 yamt error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie);
527 1.3.4.2 yamt #endif
528 1.3.4.2 yamt if (error)
529 1.3.4.2 yamt return (error);
530 1.3.4.2 yamt
531 1.3.4.2 yamt /*
532 1.3.4.2 yamt * Compute the location, size, and number of segments actually
533 1.3.4.2 yamt * returned by the VM code.
534 1.3.4.2 yamt */
535 1.3.4.2 yamt curseg = 0;
536 1.3.4.2 yamt lastaddr = segs[curseg].ds_addr = pa;
537 1.3.4.2 yamt segs[curseg].ds_len = PAGE_SIZE;
538 1.3.4.2 yamt segs[curseg]._ds_vacookie = vacookie;
539 1.3.4.2 yamt size -= PAGE_SIZE;
540 1.3.4.2 yamt pa += PAGE_SIZE;
541 1.3.4.2 yamt vacookie += PAGE_SIZE;
542 1.3.4.2 yamt
543 1.3.4.2 yamt for (; size;
544 1.3.4.2 yamt pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) {
545 1.3.4.2 yamt curaddr = pa;
546 1.3.4.2 yamt if (curaddr == (lastaddr + PAGE_SIZE) &&
547 1.3.4.2 yamt (lastaddr & boundary) == (curaddr & boundary)) {
548 1.3.4.2 yamt segs[curseg].ds_len += PAGE_SIZE;
549 1.3.4.2 yamt } else {
550 1.3.4.2 yamt curseg++;
551 1.3.4.2 yamt if (curseg >= nsegs)
552 1.3.4.2 yamt return EFBIG;
553 1.3.4.2 yamt segs[curseg].ds_addr = curaddr;
554 1.3.4.2 yamt segs[curseg].ds_len = PAGE_SIZE;
555 1.3.4.2 yamt segs[curseg]._ds_vacookie = vacookie;
556 1.3.4.2 yamt }
557 1.3.4.2 yamt lastaddr = curaddr;
558 1.3.4.2 yamt }
559 1.3.4.2 yamt *rsegs = curseg + 1;
560 1.3.4.2 yamt
561 1.3.4.2 yamt return (0);
562 1.3.4.2 yamt }
563