bus_dma.c revision 1.34 1 1.34 uebayasi /* $NetBSD: bus_dma.c,v 1.34 2010/11/12 13:18:59 uebayasi Exp $ */
2 1.1 ragge
3 1.1 ragge /*-
4 1.1 ragge * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 ragge * NASA Ames Research Center.
10 1.1 ragge *
11 1.1 ragge * Redistribution and use in source and binary forms, with or without
12 1.1 ragge * modification, are permitted provided that the following conditions
13 1.1 ragge * are met:
14 1.1 ragge * 1. Redistributions of source code must retain the above copyright
15 1.1 ragge * notice, this list of conditions and the following disclaimer.
16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ragge * notice, this list of conditions and the following disclaimer in the
18 1.1 ragge * documentation and/or other materials provided with the distribution.
19 1.1 ragge *
20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 ragge * POSSIBILITY OF SUCH DAMAGE.
31 1.1 ragge */
32 1.1 ragge /*
33 1.1 ragge * bus_dma routines for vax. File copied from arm32/bus_dma.c.
34 1.1 ragge * NetBSD: bus_dma.c,v 1.11 1998/09/21 22:53:35 thorpej Exp
35 1.1 ragge */
36 1.20 lukem
37 1.20 lukem #include <sys/cdefs.h>
38 1.34 uebayasi __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.34 2010/11/12 13:18:59 uebayasi Exp $");
39 1.1 ragge
40 1.1 ragge #include <sys/param.h>
41 1.1 ragge #include <sys/systm.h>
42 1.1 ragge #include <sys/kernel.h>
43 1.1 ragge #include <sys/proc.h>
44 1.1 ragge #include <sys/buf.h>
45 1.1 ragge #include <sys/reboot.h>
46 1.1 ragge #include <sys/conf.h>
47 1.1 ragge #include <sys/file.h>
48 1.1 ragge #include <sys/malloc.h>
49 1.1 ragge #include <sys/mbuf.h>
50 1.1 ragge #include <sys/vnode.h>
51 1.1 ragge #include <sys/device.h>
52 1.1 ragge
53 1.34 uebayasi #include <uvm/uvm.h>
54 1.1 ragge
55 1.1 ragge #define _VAX_BUS_DMA_PRIVATE
56 1.1 ragge #include <machine/bus.h>
57 1.1 ragge
58 1.1 ragge #include <machine/ka43.h>
59 1.1 ragge #include <machine/sid.h>
60 1.1 ragge
61 1.16 matt extern paddr_t avail_start, avail_end;
62 1.16 matt extern vaddr_t virtual_avail;
63 1.1 ragge
64 1.30 matt int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
65 1.30 matt bus_size_t, struct vmspace *, int, vaddr_t *, int *, bool);
66 1.30 matt int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
67 1.30 matt int _bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t,
68 1.30 matt bus_size_t, bus_dma_segment_t*, int, int *, int, vaddr_t, vaddr_t);
69 1.1 ragge /*
70 1.1 ragge * Common function for DMA map creation. May be called by bus-specific
71 1.1 ragge * DMA map creation functions.
72 1.1 ragge */
73 1.1 ragge int
74 1.30 matt _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
75 1.30 matt bus_size_t maxsegsz, bus_size_t boundary, int flags,
76 1.30 matt bus_dmamap_t *dmamp)
77 1.1 ragge {
78 1.1 ragge struct vax_bus_dmamap *map;
79 1.1 ragge void *mapstore;
80 1.1 ragge size_t mapsize;
81 1.1 ragge
82 1.1 ragge #ifdef DEBUG_DMA
83 1.1 ragge printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
84 1.1 ragge t, size, nsegments, maxsegsz, boundary, flags);
85 1.1 ragge #endif /* DEBUG_DMA */
86 1.1 ragge
87 1.1 ragge /*
88 1.1 ragge * Allocate and initialize the DMA map. The end of the map
89 1.1 ragge * is a variable-sized array of segments, so we allocate enough
90 1.1 ragge * room for them in one shot.
91 1.1 ragge *
92 1.1 ragge * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
93 1.1 ragge * of ALLOCNOW notifies others that we've reserved these resources,
94 1.1 ragge * and they are not to be freed.
95 1.1 ragge *
96 1.1 ragge * The bus_dmamap_t includes one bus_dma_segment_t, hence
97 1.1 ragge * the (nsegments - 1).
98 1.1 ragge */
99 1.1 ragge mapsize = sizeof(struct vax_bus_dmamap) +
100 1.1 ragge (sizeof(bus_dma_segment_t) * (nsegments - 1));
101 1.1 ragge if ((mapstore = malloc(mapsize, M_DMAMAP,
102 1.1 ragge (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
103 1.1 ragge return (ENOMEM);
104 1.1 ragge
105 1.33 cegger memset(mapstore, 0, mapsize);
106 1.1 ragge map = (struct vax_bus_dmamap *)mapstore;
107 1.1 ragge map->_dm_size = size;
108 1.1 ragge map->_dm_segcnt = nsegments;
109 1.22 matt map->_dm_maxmaxsegsz = maxsegsz;
110 1.1 ragge map->_dm_boundary = boundary;
111 1.1 ragge map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
112 1.22 matt map->dm_maxsegsz = maxsegsz;
113 1.1 ragge map->dm_mapsize = 0; /* no valid mappings */
114 1.1 ragge map->dm_nsegs = 0;
115 1.1 ragge
116 1.1 ragge *dmamp = map;
117 1.1 ragge #ifdef DEBUG_DMA
118 1.1 ragge printf("dmamap_create:map=%p\n", map);
119 1.1 ragge #endif /* DEBUG_DMA */
120 1.1 ragge return (0);
121 1.1 ragge }
122 1.1 ragge
123 1.1 ragge /*
124 1.1 ragge * Common function for DMA map destruction. May be called by bus-specific
125 1.1 ragge * DMA map destruction functions.
126 1.1 ragge */
127 1.1 ragge void
128 1.30 matt _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
129 1.1 ragge {
130 1.1 ragge
131 1.1 ragge #ifdef DEBUG_DMA
132 1.1 ragge printf("dmamap_destroy: t=%p map=%p\n", t, map);
133 1.1 ragge #endif /* DEBUG_DMA */
134 1.1 ragge #ifdef DIAGNOSTIC
135 1.1 ragge if (map->dm_nsegs > 0)
136 1.1 ragge printf("bus_dmamap_destroy() called for map with valid mappings\n");
137 1.1 ragge #endif /* DIAGNOSTIC */
138 1.1 ragge free(map, M_DEVBUF);
139 1.1 ragge }
140 1.1 ragge
141 1.1 ragge /*
142 1.1 ragge * Common function for loading a DMA map with a linear buffer. May
143 1.1 ragge * be called by bus-specific DMA map load functions.
144 1.1 ragge */
145 1.1 ragge int
146 1.30 matt _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
147 1.30 matt bus_size_t buflen, struct proc *p, int flags)
148 1.1 ragge {
149 1.28 matt vaddr_t lastaddr = 0;
150 1.1 ragge int seg, error;
151 1.26 yamt struct vmspace *vm;
152 1.1 ragge
153 1.1 ragge #ifdef DEBUG_DMA
154 1.1 ragge printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
155 1.1 ragge t, map, buf, buflen, p, flags);
156 1.1 ragge #endif /* DEBUG_DMA */
157 1.1 ragge
158 1.1 ragge /*
159 1.1 ragge * Make sure that on error condition we return "no valid mappings".
160 1.1 ragge */
161 1.1 ragge map->dm_mapsize = 0;
162 1.1 ragge map->dm_nsegs = 0;
163 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
164 1.1 ragge
165 1.1 ragge if (buflen > map->_dm_size)
166 1.1 ragge return (EINVAL);
167 1.1 ragge
168 1.26 yamt if (p != NULL) {
169 1.26 yamt vm = p->p_vmspace;
170 1.26 yamt } else {
171 1.26 yamt vm = vmspace_kernel();
172 1.26 yamt }
173 1.26 yamt
174 1.1 ragge seg = 0;
175 1.26 yamt error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
176 1.1 ragge &lastaddr, &seg, 1);
177 1.1 ragge if (error == 0) {
178 1.1 ragge map->dm_mapsize = buflen;
179 1.1 ragge map->dm_nsegs = seg + 1;
180 1.1 ragge }
181 1.1 ragge #ifdef DEBUG_DMA
182 1.1 ragge printf("dmamap_load: error=%d\n", error);
183 1.1 ragge #endif /* DEBUG_DMA */
184 1.1 ragge return (error);
185 1.1 ragge }
186 1.1 ragge
187 1.1 ragge /*
188 1.1 ragge * Like _bus_dmamap_load(), but for mbufs.
189 1.1 ragge */
190 1.1 ragge int
191 1.30 matt _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
192 1.30 matt int flags)
193 1.1 ragge {
194 1.28 matt vaddr_t lastaddr = 0;
195 1.29 matt int seg, error;
196 1.29 matt bool first;
197 1.1 ragge struct mbuf *m;
198 1.1 ragge
199 1.1 ragge #ifdef DEBUG_DMA
200 1.1 ragge printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
201 1.1 ragge t, map, m0, flags);
202 1.1 ragge #endif /* DEBUG_DMA */
203 1.1 ragge
204 1.1 ragge /*
205 1.1 ragge * Make sure that on error condition we return "no valid mappings."
206 1.1 ragge */
207 1.1 ragge map->dm_mapsize = 0;
208 1.1 ragge map->dm_nsegs = 0;
209 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
210 1.1 ragge
211 1.1 ragge #ifdef DIAGNOSTIC
212 1.1 ragge if ((m0->m_flags & M_PKTHDR) == 0)
213 1.1 ragge panic("_bus_dmamap_load_mbuf: no packet header");
214 1.1 ragge #endif /* DIAGNOSTIC */
215 1.1 ragge
216 1.1 ragge if (m0->m_pkthdr.len > map->_dm_size)
217 1.1 ragge return (EINVAL);
218 1.1 ragge
219 1.29 matt first = true;
220 1.1 ragge seg = 0;
221 1.1 ragge error = 0;
222 1.29 matt for (m = m0; m != NULL && error == 0; m = m->m_next, first = false) {
223 1.21 thorpej if (m->m_len == 0)
224 1.21 thorpej continue;
225 1.29 matt #if 0
226 1.29 matt switch (m->m_flags & (M_EXT|M_CLUSTER)) {
227 1.29 matt #if 0
228 1.29 matt case M_EXT|M_CLUSTER:
229 1.29 matt KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
230 1.29 matt lastaddr = m->m_ext.ext_paddr
231 1.29 matt + (m->m_data - m->m_ext.ext_buf);
232 1.29 matt #endif
233 1.29 matt #if 1
234 1.29 matt have_addr:
235 1.29 matt #endif
236 1.29 matt if (!first && ++seg >= map->_dm_segcnt) {
237 1.29 matt error = EFBIG;
238 1.29 matt continue;
239 1.29 matt }
240 1.29 matt map->dm_segs[seg].ds_addr = lastaddr;
241 1.29 matt map->dm_segs[seg].ds_len = m->m_len;
242 1.29 matt lastaddr += m->m_len;
243 1.29 matt continue;
244 1.29 matt #if 1
245 1.29 matt case 0:
246 1.29 matt KASSERT(m->m_paddr != M_PADDR_INVALID);
247 1.29 matt lastaddr = m->m_paddr + M_BUFOFFSET(m)
248 1.29 matt + (m->m_data - M_BUFADDR(m));
249 1.29 matt goto have_addr;
250 1.29 matt #endif
251 1.29 matt default:
252 1.29 matt break;
253 1.29 matt }
254 1.29 matt #endif
255 1.1 ragge error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
256 1.26 yamt vmspace_kernel(), flags, &lastaddr, &seg, first);
257 1.1 ragge }
258 1.1 ragge if (error == 0) {
259 1.1 ragge map->dm_mapsize = m0->m_pkthdr.len;
260 1.1 ragge map->dm_nsegs = seg + 1;
261 1.1 ragge }
262 1.1 ragge #ifdef DEBUG_DMA
263 1.1 ragge printf("dmamap_load_mbuf: error=%d\n", error);
264 1.1 ragge #endif /* DEBUG_DMA */
265 1.1 ragge return (error);
266 1.1 ragge }
267 1.1 ragge
268 1.1 ragge /*
269 1.1 ragge * Like _bus_dmamap_load(), but for uios.
270 1.1 ragge */
271 1.1 ragge int
272 1.30 matt _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
273 1.30 matt int flags)
274 1.1 ragge {
275 1.28 matt vaddr_t lastaddr = 0;
276 1.29 matt int seg, i, error;
277 1.29 matt bool first;
278 1.1 ragge bus_size_t minlen, resid;
279 1.1 ragge struct iovec *iov;
280 1.27 christos void *addr;
281 1.1 ragge
282 1.1 ragge /*
283 1.1 ragge * Make sure that on error condition we return "no valid mappings."
284 1.1 ragge */
285 1.1 ragge map->dm_mapsize = 0;
286 1.1 ragge map->dm_nsegs = 0;
287 1.22 matt KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
288 1.1 ragge
289 1.1 ragge resid = uio->uio_resid;
290 1.1 ragge iov = uio->uio_iov;
291 1.1 ragge
292 1.29 matt first = true;
293 1.1 ragge seg = 0;
294 1.1 ragge error = 0;
295 1.1 ragge for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
296 1.1 ragge /*
297 1.1 ragge * Now at the first iovec to load. Load each iovec
298 1.1 ragge * until we have exhausted the residual count.
299 1.1 ragge */
300 1.1 ragge minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
301 1.27 christos addr = (void *)iov[i].iov_base;
302 1.1 ragge
303 1.1 ragge error = _bus_dmamap_load_buffer(t, map, addr, minlen,
304 1.26 yamt uio->uio_vmspace, flags, &lastaddr, &seg, first);
305 1.29 matt first = false;
306 1.1 ragge
307 1.1 ragge resid -= minlen;
308 1.1 ragge }
309 1.1 ragge if (error == 0) {
310 1.1 ragge map->dm_mapsize = uio->uio_resid;
311 1.1 ragge map->dm_nsegs = seg + 1;
312 1.1 ragge }
313 1.1 ragge return (error);
314 1.1 ragge }
315 1.1 ragge
316 1.1 ragge /*
317 1.1 ragge * Like _bus_dmamap_load(), but for raw memory allocated with
318 1.1 ragge * bus_dmamem_alloc().
319 1.1 ragge */
320 1.1 ragge int
321 1.30 matt _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
322 1.30 matt int nsegs, bus_size_t size, int flags)
323 1.1 ragge {
324 1.1 ragge
325 1.1 ragge panic("_bus_dmamap_load_raw: not implemented");
326 1.1 ragge }
327 1.1 ragge
328 1.1 ragge /*
329 1.1 ragge * Common function for unloading a DMA map. May be called by
330 1.1 ragge * bus-specific DMA map unload functions.
331 1.1 ragge */
332 1.1 ragge void
333 1.30 matt _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
334 1.1 ragge {
335 1.1 ragge
336 1.1 ragge #ifdef DEBUG_DMA
337 1.1 ragge printf("dmamap_unload: t=%p map=%p\n", t, map);
338 1.1 ragge #endif /* DEBUG_DMA */
339 1.1 ragge
340 1.1 ragge /*
341 1.1 ragge * No resources to free; just mark the mappings as
342 1.1 ragge * invalid.
343 1.1 ragge */
344 1.22 matt map->dm_maxsegsz = map->_dm_maxmaxsegsz;
345 1.1 ragge map->dm_mapsize = 0;
346 1.1 ragge map->dm_nsegs = 0;
347 1.1 ragge }
348 1.1 ragge
349 1.1 ragge /*
350 1.1 ragge * Common function for DMA map synchronization. May be called
351 1.1 ragge * by bus-specific DMA map synchronization functions.
352 1.1 ragge */
353 1.1 ragge void
354 1.30 matt _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
355 1.30 matt bus_size_t len, int ops)
356 1.1 ragge {
357 1.1 ragge #ifdef DEBUG_DMA
358 1.1 ragge printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
359 1.1 ragge t, map, offset, len, ops);
360 1.1 ragge #endif /* DEBUG_DMA */
361 1.1 ragge /*
362 1.1 ragge * A vax only has snoop-cache, so this routine is a no-op.
363 1.1 ragge */
364 1.1 ragge return;
365 1.1 ragge }
366 1.1 ragge
367 1.1 ragge /*
368 1.1 ragge * Common function for DMA-safe memory allocation. May be called
369 1.1 ragge * by bus-specific DMA memory allocation functions.
370 1.1 ragge */
371 1.1 ragge
372 1.1 ragge int
373 1.30 matt _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
374 1.30 matt bus_size_t boundary, bus_dma_segment_t *segs,
375 1.30 matt int nsegs, int *rsegs, int flags)
376 1.1 ragge {
377 1.1 ragge int error;
378 1.1 ragge
379 1.1 ragge error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
380 1.1 ragge segs, nsegs, rsegs, flags, round_page(avail_start),
381 1.1 ragge trunc_page(avail_end)));
382 1.1 ragge return(error);
383 1.1 ragge }
384 1.1 ragge
385 1.1 ragge /*
386 1.1 ragge * Common function for freeing DMA-safe memory. May be called by
387 1.1 ragge * bus-specific DMA memory free functions.
388 1.1 ragge */
389 1.1 ragge void
390 1.30 matt _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
391 1.1 ragge {
392 1.12 chs struct vm_page *m;
393 1.1 ragge bus_addr_t addr;
394 1.1 ragge struct pglist mlist;
395 1.1 ragge int curseg;
396 1.1 ragge
397 1.1 ragge #ifdef DEBUG_DMA
398 1.1 ragge printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
399 1.1 ragge #endif /* DEBUG_DMA */
400 1.1 ragge
401 1.1 ragge /*
402 1.1 ragge * Build a list of pages to free back to the VM system.
403 1.1 ragge */
404 1.1 ragge TAILQ_INIT(&mlist);
405 1.1 ragge for (curseg = 0; curseg < nsegs; curseg++) {
406 1.1 ragge for (addr = segs[curseg].ds_addr;
407 1.1 ragge addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
408 1.1 ragge addr += PAGE_SIZE) {
409 1.1 ragge m = PHYS_TO_VM_PAGE(addr);
410 1.32 ad TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
411 1.1 ragge }
412 1.1 ragge }
413 1.1 ragge uvm_pglistfree(&mlist);
414 1.1 ragge }
415 1.1 ragge
416 1.1 ragge /*
417 1.1 ragge * Common function for mapping DMA-safe memory. May be called by
418 1.1 ragge * bus-specific DMA memory map functions.
419 1.1 ragge */
420 1.1 ragge int
421 1.30 matt _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
422 1.30 matt size_t size, void **kvap, int flags)
423 1.1 ragge {
424 1.6 matt vaddr_t va;
425 1.1 ragge bus_addr_t addr;
426 1.1 ragge int curseg;
427 1.24 yamt const uvm_flag_t kmflags =
428 1.24 yamt (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
429 1.1 ragge
430 1.1 ragge /*
431 1.1 ragge * Special case (but common):
432 1.1 ragge * If there is only one physical segment then the already-mapped
433 1.1 ragge * virtual address is returned, since all physical memory is already
434 1.1 ragge * in the beginning of kernel virtual memory.
435 1.1 ragge */
436 1.1 ragge if (nsegs == 1) {
437 1.27 christos *kvap = (void *)(segs[0].ds_addr | KERNBASE);
438 1.1 ragge /*
439 1.1 ragge * KA43 (3100/m76) must have its DMA-safe memory accessed
440 1.1 ragge * through DIAGMEM. Remap it here.
441 1.1 ragge */
442 1.1 ragge if (vax_boardtype == VAX_BTYP_43) {
443 1.1 ragge pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM,
444 1.1 ragge (segs[0].ds_addr|KA43_DIAGMEM) + size,
445 1.1 ragge VM_PROT_READ|VM_PROT_WRITE);
446 1.1 ragge }
447 1.1 ragge return 0;
448 1.1 ragge }
449 1.1 ragge size = round_page(size);
450 1.24 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
451 1.1 ragge
452 1.1 ragge if (va == 0)
453 1.1 ragge return (ENOMEM);
454 1.1 ragge
455 1.27 christos *kvap = (void *)va;
456 1.1 ragge
457 1.1 ragge for (curseg = 0; curseg < nsegs; curseg++) {
458 1.1 ragge for (addr = segs[curseg].ds_addr;
459 1.1 ragge addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
460 1.17 thorpej addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
461 1.1 ragge if (size == 0)
462 1.1 ragge panic("_bus_dmamem_map: size botch");
463 1.1 ragge if (vax_boardtype == VAX_BTYP_43)
464 1.1 ragge addr |= KA43_DIAGMEM;
465 1.2 thorpej pmap_enter(pmap_kernel(), va, addr,
466 1.5 thorpej VM_PROT_READ | VM_PROT_WRITE,
467 1.5 thorpej VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
468 1.1 ragge }
469 1.1 ragge }
470 1.13 chris pmap_update(pmap_kernel());
471 1.1 ragge return (0);
472 1.1 ragge }
473 1.1 ragge
474 1.1 ragge /*
475 1.1 ragge * Common function for unmapping DMA-safe memory. May be called by
476 1.1 ragge * bus-specific DMA memory unmapping functions.
477 1.1 ragge */
478 1.1 ragge void
479 1.30 matt _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
480 1.1 ragge {
481 1.1 ragge
482 1.1 ragge #ifdef DEBUG_DMA
483 1.1 ragge printf("dmamem_unmap: t=%p kva=%p size=%x\n", t, kva, size);
484 1.1 ragge #endif /* DEBUG_DMA */
485 1.1 ragge #ifdef DIAGNOSTIC
486 1.1 ragge if ((u_long)kva & PGOFSET)
487 1.1 ragge panic("_bus_dmamem_unmap");
488 1.1 ragge #endif /* DIAGNOSTIC */
489 1.1 ragge
490 1.1 ragge /* Avoid free'ing if not mapped */
491 1.27 christos if (kva < (void *)virtual_avail)
492 1.23 yamt return;
493 1.23 yamt
494 1.23 yamt size = round_page(size);
495 1.23 yamt pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
496 1.23 yamt pmap_update(pmap_kernel());
497 1.23 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
498 1.1 ragge }
499 1.1 ragge
500 1.1 ragge /*
501 1.1 ragge * Common functin for mmap(2)'ing DMA-safe memory. May be called by
502 1.1 ragge * bus-specific DMA mmap(2)'ing functions.
503 1.1 ragge */
504 1.7 simonb paddr_t
505 1.30 matt _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
506 1.30 matt off_t off, int prot, int flags)
507 1.1 ragge {
508 1.1 ragge int i;
509 1.1 ragge
510 1.1 ragge for (i = 0; i < nsegs; i++) {
511 1.1 ragge #ifdef DIAGNOSTIC
512 1.1 ragge if (off & PGOFSET)
513 1.1 ragge panic("_bus_dmamem_mmap: offset unaligned");
514 1.1 ragge if (segs[i].ds_addr & PGOFSET)
515 1.1 ragge panic("_bus_dmamem_mmap: segment unaligned");
516 1.1 ragge if (segs[i].ds_len & PGOFSET)
517 1.1 ragge panic("_bus_dmamem_mmap: segment size not multiple"
518 1.1 ragge " of page size");
519 1.1 ragge #endif /* DIAGNOSTIC */
520 1.1 ragge if (off >= segs[i].ds_len) {
521 1.1 ragge off -= segs[i].ds_len;
522 1.1 ragge continue;
523 1.1 ragge }
524 1.1 ragge
525 1.1 ragge return (btop((u_long)segs[i].ds_addr + off));
526 1.1 ragge }
527 1.1 ragge
528 1.1 ragge /* Page not found. */
529 1.1 ragge return (-1);
530 1.1 ragge }
531 1.1 ragge
532 1.1 ragge /**********************************************************************
533 1.1 ragge * DMA utility functions
534 1.1 ragge **********************************************************************/
535 1.1 ragge
536 1.1 ragge /*
537 1.1 ragge * Utility function to load a linear buffer. lastaddrp holds state
538 1.1 ragge * between invocations (for multiple-buffer loads). segp contains
539 1.1 ragge * the starting segment on entrace, and the ending segment on exit.
540 1.1 ragge * first indicates if this is the first invocation of this function.
541 1.1 ragge */
542 1.1 ragge int
543 1.30 matt _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
544 1.30 matt bus_size_t buflen, struct vmspace *vm, int flags, vaddr_t *lastaddrp,
545 1.30 matt int *segp, bool first)
546 1.1 ragge {
547 1.1 ragge bus_size_t sgsize;
548 1.1 ragge bus_addr_t curaddr, lastaddr, baddr, bmask;
549 1.6 matt vaddr_t vaddr = (vaddr_t)buf;
550 1.1 ragge int seg;
551 1.1 ragge pmap_t pmap;
552 1.1 ragge
553 1.1 ragge #ifdef DEBUG_DMA
554 1.1 ragge printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
555 1.1 ragge buf, buflen, flags, first);
556 1.1 ragge #endif /* DEBUG_DMA */
557 1.1 ragge
558 1.26 yamt pmap = vm_map_pmap(&vm->vm_map);
559 1.1 ragge
560 1.1 ragge lastaddr = *lastaddrp;
561 1.1 ragge bmask = ~(map->_dm_boundary - 1);
562 1.1 ragge
563 1.1 ragge for (seg = *segp; buflen > 0; ) {
564 1.1 ragge /*
565 1.1 ragge * Get the physical address for this segment.
566 1.1 ragge */
567 1.3 thorpej (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
568 1.1 ragge
569 1.1 ragge #if 0
570 1.1 ragge /*
571 1.1 ragge * Make sure we're in an allowed DMA range.
572 1.1 ragge */
573 1.1 ragge if (t->_ranges != NULL &&
574 1.1 ragge _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
575 1.1 ragge return (EINVAL);
576 1.1 ragge #endif
577 1.1 ragge
578 1.1 ragge /*
579 1.1 ragge * Compute the segment size, and adjust counts.
580 1.1 ragge */
581 1.17 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
582 1.1 ragge if (buflen < sgsize)
583 1.1 ragge sgsize = buflen;
584 1.1 ragge
585 1.1 ragge /*
586 1.1 ragge * Make sure we don't cross any boundaries.
587 1.1 ragge */
588 1.1 ragge if (map->_dm_boundary > 0) {
589 1.1 ragge baddr = (curaddr + map->_dm_boundary) & bmask;
590 1.1 ragge if (sgsize > (baddr - curaddr))
591 1.1 ragge sgsize = (baddr - curaddr);
592 1.1 ragge }
593 1.1 ragge
594 1.1 ragge /*
595 1.1 ragge * Insert chunk into a segment, coalescing with
596 1.1 ragge * previous segment if possible.
597 1.1 ragge */
598 1.1 ragge if (first) {
599 1.1 ragge map->dm_segs[seg].ds_addr = curaddr;
600 1.1 ragge map->dm_segs[seg].ds_len = sgsize;
601 1.29 matt first = false;
602 1.1 ragge } else {
603 1.1 ragge if (curaddr == lastaddr &&
604 1.1 ragge (map->dm_segs[seg].ds_len + sgsize) <=
605 1.22 matt map->dm_maxsegsz &&
606 1.1 ragge (map->_dm_boundary == 0 ||
607 1.1 ragge (map->dm_segs[seg].ds_addr & bmask) ==
608 1.1 ragge (curaddr & bmask)))
609 1.1 ragge map->dm_segs[seg].ds_len += sgsize;
610 1.1 ragge else {
611 1.1 ragge if (++seg >= map->_dm_segcnt)
612 1.1 ragge break;
613 1.1 ragge map->dm_segs[seg].ds_addr = curaddr;
614 1.1 ragge map->dm_segs[seg].ds_len = sgsize;
615 1.1 ragge }
616 1.1 ragge }
617 1.1 ragge
618 1.1 ragge lastaddr = curaddr + sgsize;
619 1.1 ragge vaddr += sgsize;
620 1.1 ragge buflen -= sgsize;
621 1.1 ragge }
622 1.1 ragge
623 1.1 ragge *segp = seg;
624 1.1 ragge *lastaddrp = lastaddr;
625 1.1 ragge
626 1.1 ragge /*
627 1.1 ragge * Did we fit?
628 1.1 ragge */
629 1.1 ragge if (buflen != 0)
630 1.1 ragge return (EFBIG); /* XXX better return value here? */
631 1.1 ragge return (0);
632 1.1 ragge }
633 1.1 ragge
634 1.1 ragge /*
635 1.1 ragge * Check to see if the specified page is in an allowed DMA range.
636 1.1 ragge */
637 1.1 ragge int
638 1.30 matt _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr)
639 1.1 ragge {
640 1.1 ragge bus_dma_segment_t *ds;
641 1.1 ragge int i;
642 1.1 ragge
643 1.1 ragge for (i = 0, ds = ranges; i < nranges; i++, ds++) {
644 1.1 ragge if (curaddr >= ds->ds_addr &&
645 1.1 ragge round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
646 1.1 ragge return (1);
647 1.1 ragge }
648 1.1 ragge
649 1.1 ragge return (0);
650 1.1 ragge }
651 1.1 ragge
652 1.1 ragge /*
653 1.1 ragge * Allocate physical memory from the given physical address range.
654 1.1 ragge * Called by DMA-safe memory allocation methods.
655 1.1 ragge */
656 1.1 ragge int
657 1.30 matt _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
658 1.30 matt bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
659 1.30 matt int flags, vaddr_t low, vaddr_t high)
660 1.1 ragge {
661 1.6 matt vaddr_t curaddr, lastaddr;
662 1.12 chs struct vm_page *m;
663 1.1 ragge struct pglist mlist;
664 1.1 ragge int curseg, error;
665 1.1 ragge
666 1.1 ragge #ifdef DEBUG_DMA
667 1.1 ragge printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
668 1.1 ragge t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
669 1.1 ragge #endif /* DEBUG_DMA */
670 1.1 ragge
671 1.1 ragge /* Always round the size. */
672 1.1 ragge size = round_page(size);
673 1.1 ragge
674 1.1 ragge /*
675 1.1 ragge * Allocate pages from the VM system.
676 1.1 ragge */
677 1.1 ragge error = uvm_pglistalloc(size, low, high, alignment, boundary,
678 1.1 ragge &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
679 1.1 ragge if (error)
680 1.1 ragge return (error);
681 1.1 ragge
682 1.1 ragge /*
683 1.1 ragge * Compute the location, size, and number of segments actually
684 1.1 ragge * returned by the VM code.
685 1.1 ragge */
686 1.1 ragge m = mlist.tqh_first;
687 1.1 ragge curseg = 0;
688 1.1 ragge lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
689 1.1 ragge segs[curseg].ds_len = PAGE_SIZE;
690 1.1 ragge #ifdef DEBUG_DMA
691 1.1 ragge printf("alloc: page %lx\n", lastaddr);
692 1.1 ragge #endif /* DEBUG_DMA */
693 1.32 ad m = m->pageq.queue.tqe_next;
694 1.1 ragge
695 1.32 ad for (; m != NULL; m = m->pageq.queue.tqe_next) {
696 1.1 ragge curaddr = VM_PAGE_TO_PHYS(m);
697 1.1 ragge #ifdef DIAGNOSTIC
698 1.1 ragge if (curaddr < low || curaddr >= high) {
699 1.10 soren printf("uvm_pglistalloc returned non-sensical"
700 1.1 ragge " address 0x%lx\n", curaddr);
701 1.1 ragge panic("_bus_dmamem_alloc_range");
702 1.1 ragge }
703 1.1 ragge #endif /* DIAGNOSTIC */
704 1.1 ragge #ifdef DEBUG_DMA
705 1.1 ragge printf("alloc: page %lx\n", curaddr);
706 1.1 ragge #endif /* DEBUG_DMA */
707 1.1 ragge if (curaddr == (lastaddr + PAGE_SIZE))
708 1.1 ragge segs[curseg].ds_len += PAGE_SIZE;
709 1.1 ragge else {
710 1.1 ragge curseg++;
711 1.1 ragge segs[curseg].ds_addr = curaddr;
712 1.1 ragge segs[curseg].ds_len = PAGE_SIZE;
713 1.1 ragge }
714 1.1 ragge lastaddr = curaddr;
715 1.1 ragge }
716 1.1 ragge
717 1.1 ragge *rsegs = curseg + 1;
718 1.1 ragge
719 1.1 ragge return (0);
720 1.1 ragge }
721 1.4 ragge
722 1.4 ragge /*
723 1.4 ragge * "generic" DMA struct, nothing special.
724 1.4 ragge */
725 1.4 ragge struct vax_bus_dma_tag vax_bus_dma_tag = {
726 1.30 matt ._dmamap_create = _bus_dmamap_create,
727 1.30 matt ._dmamap_destroy = _bus_dmamap_destroy,
728 1.30 matt ._dmamap_load = _bus_dmamap_load,
729 1.30 matt ._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
730 1.30 matt ._dmamap_load_uio = _bus_dmamap_load_uio,
731 1.30 matt ._dmamap_load_raw = _bus_dmamap_load_raw,
732 1.30 matt ._dmamap_unload = _bus_dmamap_unload,
733 1.30 matt ._dmamap_sync = _bus_dmamap_sync,
734 1.30 matt ._dmamem_alloc = _bus_dmamem_alloc,
735 1.30 matt ._dmamem_free = _bus_dmamem_free,
736 1.30 matt ._dmamem_map = _bus_dmamem_map,
737 1.30 matt ._dmamem_unmap = _bus_dmamem_unmap,
738 1.30 matt ._dmamem_mmap = _bus_dmamem_mmap,
739 1.4 ragge };
740