iommu.c revision 1.41 1 /* $NetBSD: iommu.c,v 1.41 2000/05/23 11:39:58 pk Exp $ */
2
3 /*
4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1995 Paul Kranenburg
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Aaron Brown and
19 * Harvard University.
20 * This product includes software developed by Paul Kranenburg.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 */
38
39 #include <sys/param.h>
40 #include <sys/extent.h>
41 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/systm.h>
44 #include <sys/device.h>
45 #include <vm/vm.h>
46 #include <vm/vm_kern.h>
47
48 #include <uvm/uvm_extern.h>
49 #include <uvm/uvm.h>
50
51 #define _SPARC_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53 #include <machine/autoconf.h>
54 #include <machine/ctlreg.h>
55 #include <sparc/sparc/asm.h>
56 #include <sparc/sparc/vaddrs.h>
57 #include <sparc/sparc/cpuvar.h>
58 #include <sparc/sparc/iommureg.h>
59 #include <sparc/sparc/iommuvar.h>
60
61 struct iommu_softc {
62 struct device sc_dev; /* base device */
63 struct iommureg *sc_reg;
64 u_int sc_pagesize;
65 u_int sc_range;
66 bus_addr_t sc_dvmabase;
67 iopte_t *sc_ptes;
68 int sc_hasiocache;
69 };
70 struct iommu_softc *iommu_sc;/*XXX*/
71 int has_iocache;
72 u_long dvma_cachealign;
73
74 /*
75 * Note: operations on the extent map are being protected with
76 * splhigh(), since we cannot predict at which interrupt priority
77 * our clients will run.
78 */
79 struct extent *iommu_dvmamap;
80
81
82 /* autoconfiguration driver */
83 int iommu_print __P((void *, const char *));
84 void iommu_attach __P((struct device *, struct device *, void *));
85 int iommu_match __P((struct device *, struct cfdata *, void *));
86
87 struct cfattach iommu_ca = {
88 sizeof(struct iommu_softc), iommu_match, iommu_attach
89 };
90
91 /* IOMMU DMA map functions */
92 int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
93 bus_size_t, struct proc *, int));
94 int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
95 struct mbuf *, int));
96 int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
97 struct uio *, int));
98 int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
99 bus_dma_segment_t *, int, bus_size_t, int));
100 void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
101 void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
102 bus_size_t, int));
103
104 int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
105 int nsegs, size_t size, caddr_t *kvap, int flags));
106 int iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
107 int nsegs, int off, int prot, int flags));
108 int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, int,
109 bus_addr_t *, bus_size_t *);
110
111
112 struct sparc_bus_dma_tag iommu_dma_tag = {
113 NULL,
114 _bus_dmamap_create,
115 _bus_dmamap_destroy,
116 iommu_dmamap_load,
117 iommu_dmamap_load_mbuf,
118 iommu_dmamap_load_uio,
119 iommu_dmamap_load_raw,
120 iommu_dmamap_unload,
121 iommu_dmamap_sync,
122
123 _bus_dmamem_alloc,
124 _bus_dmamem_free,
125 iommu_dmamem_map,
126 _bus_dmamem_unmap,
127 iommu_dmamem_mmap
128 };
129 /*
130 * Print the location of some iommu-attached device (called just
131 * before attaching that device). If `iommu' is not NULL, the
132 * device was found but not configured; print the iommu as well.
133 * Return UNCONF (config_find ignores this if the device was configured).
134 */
135 int
136 iommu_print(args, iommu)
137 void *args;
138 const char *iommu;
139 {
140 struct iommu_attach_args *ia = args;
141
142 if (iommu)
143 printf("%s at %s", ia->iom_name, iommu);
144 return (UNCONF);
145 }
146
147 int
148 iommu_match(parent, cf, aux)
149 struct device *parent;
150 struct cfdata *cf;
151 void *aux;
152 {
153 struct mainbus_attach_args *ma = aux;
154
155 if (CPU_ISSUN4OR4C)
156 return (0);
157 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
158 }
159
160 /*
161 * Attach the iommu.
162 */
163 void
164 iommu_attach(parent, self, aux)
165 struct device *parent;
166 struct device *self;
167 void *aux;
168 {
169 #if defined(SUN4M)
170 struct iommu_softc *sc = (struct iommu_softc *)self;
171 struct mainbus_attach_args *ma = aux;
172 int node;
173 bus_space_handle_t bh;
174 u_int pbase, pa;
175 int i, mmupcrsave, s;
176 iopte_t *tpte_p;
177 extern u_int *kernel_iopte_table;
178 extern u_int kernel_iopte_table_pa;
179
180 /*XXX-GCC!*/mmupcrsave=0;
181 iommu_sc = sc;
182 /*
183 * XXX there is only one iommu, for now -- do not know how to
184 * address children on others
185 */
186 if (sc->sc_dev.dv_unit > 0) {
187 printf(" unsupported\n");
188 return;
189 }
190 node = ma->ma_node;
191
192 #if 0
193 if (ra->ra_vaddr)
194 sc->sc_reg = (struct iommureg *)ca->ca_ra.ra_vaddr;
195 #else
196 /*
197 * Map registers into our space. The PROM may have done this
198 * already, but I feel better if we have our own copy. Plus, the
199 * prom doesn't map the entire register set
200 *
201 * XXX struct iommureg is bigger than ra->ra_len; what are the
202 * other fields for?
203 */
204 if (bus_space_map2(
205 ma->ma_bustag,
206 ma->ma_iospace,
207 ma->ma_paddr,
208 sizeof(struct iommureg),
209 0,
210 0,
211 &bh) != 0) {
212 printf("iommu_attach: cannot map registers\n");
213 return;
214 }
215 sc->sc_reg = (struct iommureg *)bh;
216 #endif
217
218 sc->sc_hasiocache = node_has_property(node, "cache-coherence?");
219 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
220 sc->sc_hasiocache = 0;
221 has_iocache = sc->sc_hasiocache; /* Set global flag */
222
223 sc->sc_pagesize = getpropint(node, "page-size", NBPG),
224 sc->sc_range = (1 << 24) <<
225 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
226 #if 0
227 sc->sc_dvmabase = (0 - sc->sc_range);
228 #endif
229 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
230 (14 - IOMMU_BAR_IBASHFT);
231
232 /*
233 * Now we build our own copy of the IOMMU page tables. We need to
234 * do this since we're going to change the range to give us 64M of
235 * mappings, and thus we can move DVMA space down to 0xfd000000 to
236 * give us lots of space and to avoid bumping into the PROM, etc.
237 *
238 * XXX Note that this is rather messy.
239 */
240 sc->sc_ptes = (iopte_t *) kernel_iopte_table;
241
242 /*
243 * Now discache the page tables so that the IOMMU sees our
244 * changes.
245 */
246 kvm_uncache((caddr_t)sc->sc_ptes,
247 (((0 - IOMMU_DVMA_BASE)/sc->sc_pagesize) * sizeof(iopte_t)) / NBPG);
248
249 /*
250 * Ok. We've got to read in the original table using MMU bypass,
251 * and copy all of its entries to the appropriate place in our
252 * new table, even if the sizes are different.
253 * This is pretty easy since we know DVMA ends at 0xffffffff.
254 *
255 * XXX: PGOFSET, NBPG assume same page size as SRMMU
256 */
257 if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
258 /* set MMU AC bit */
259 sta(SRMMU_PCR, ASI_SRMMU,
260 ((mmupcrsave = lda(SRMMU_PCR, ASI_SRMMU)) | VIKING_PCR_AC));
261 }
262
263 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/NBPG) - 1],
264 pa = (u_int)pbase - sizeof(iopte_t) +
265 ((u_int)sc->sc_range/NBPG)*sizeof(iopte_t);
266 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
267 tpte_p--, pa -= sizeof(iopte_t)) {
268
269 IOMMU_FLUSHPAGE(sc,
270 (tpte_p - &sc->sc_ptes[0])*NBPG + IOMMU_DVMA_BASE);
271 *tpte_p = lda(pa, ASI_BYPASS);
272 }
273 if (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc) {
274 /* restore mmu after bug-avoidance */
275 sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
276 }
277
278 /*
279 * Now we can install our new pagetable into the IOMMU
280 */
281 sc->sc_range = 0 - IOMMU_DVMA_BASE;
282 sc->sc_dvmabase = IOMMU_DVMA_BASE;
283
284 /* calculate log2(sc->sc_range/16MB) */
285 i = ffs(sc->sc_range/(1 << 24)) - 1;
286 if ((1 << i) != (sc->sc_range/(1 << 24)))
287 panic("bad iommu range: %d\n",i);
288
289 s = splhigh();
290 IOMMU_FLUSHALL(sc);
291
292 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
293 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
294 sc->sc_reg->io_bar = (kernel_iopte_table_pa >> 4) & IOMMU_BAR_IBA;
295
296 IOMMU_FLUSHALL(sc);
297 splx(s);
298
299 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
300 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
301 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
302 sc->sc_pagesize,
303 sc->sc_range >> 20);
304
305 iommu_dvmamap = extent_create("iommudvma",
306 IOMMU_DVMA_BASE, IOMMU_DVMA_END,
307 M_DEVBUF, 0, 0, EX_NOWAIT);
308 if (iommu_dvmamap == NULL)
309 panic("iommu: unable to allocate DVMA map");
310
311 /*
312 * Loop through ROM children (expect Sbus among them).
313 */
314 for (node = firstchild(node); node; node = nextsibling(node)) {
315 struct iommu_attach_args ia;
316
317 bzero(&ia, sizeof ia);
318 ia.iom_name = getpropstring(node, "name");
319
320 /* Propagate BUS & DMA tags */
321 ia.iom_bustag = ma->ma_bustag;
322 ia.iom_dmatag = &iommu_dma_tag;
323
324 ia.iom_node = node;
325
326 ia.iom_reg = NULL;
327 getprop(node, "reg", sizeof(struct sbus_reg),
328 &ia.iom_nreg, (void **)&ia.iom_reg);
329
330 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
331 if (ia.iom_reg != NULL)
332 free(ia.iom_reg, M_DEVBUF);
333 }
334 #endif
335 }
336
337 void
338 iommu_enter(dva, pa)
339 bus_addr_t dva;
340 paddr_t pa;
341 {
342 struct iommu_softc *sc = iommu_sc;
343 int pte;
344
345 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */
346
347 #ifdef DIAGNOSTIC
348 if (dva < sc->sc_dvmabase)
349 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva);
350 #endif
351
352 pte = atop(pa) << IOPTE_PPNSHFT;
353 pte &= IOPTE_PPN;
354 pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
355 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte;
356 IOMMU_FLUSHPAGE(sc, dva);
357 }
358
359 /*
360 * iommu_clear: clears mappings created by iommu_enter
361 */
362 void
363 iommu_remove(va, len)
364 bus_addr_t va;
365 bus_size_t len;
366 {
367 struct iommu_softc *sc = iommu_sc;
368 u_int pagesz = sc->sc_pagesize;
369 bus_addr_t base = sc->sc_dvmabase;
370
371 #ifdef DEBUG
372 if (va < base)
373 panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
374 #endif
375
376 while ((long)len > 0) {
377 #ifdef notyet
378 #ifdef DEBUG
379 if ((sc->sc_ptes[atop(va - base)] & IOPTE_V) == 0)
380 panic("iommu_clear: clearing invalid pte at va 0x%lx",
381 (long)va);
382 #endif
383 #endif
384 sc->sc_ptes[atop(va - base)] = 0;
385 IOMMU_FLUSHPAGE(sc, va);
386 len -= pagesz;
387 va += pagesz;
388 }
389 }
390
391 #if 0 /* These registers aren't there??? */
392 void
393 iommu_error()
394 {
395 struct iommu_softc *sc = X;
396 struct iommureg *iop = sc->sc_reg;
397
398 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
399 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
400 }
401 int
402 iommu_alloc(va, len)
403 u_int va, len;
404 {
405 struct iommu_softc *sc = X;
406 int off, tva, iovaddr, pte;
407 paddr_t pa;
408
409 off = (int)va & PGOFSET;
410 len = round_page(len + off);
411 va -= off;
412
413 if ((int)sc->sc_dvmacur + len > 0)
414 sc->sc_dvmacur = sc->sc_dvmabase;
415
416 iovaddr = tva = sc->sc_dvmacur;
417 sc->sc_dvmacur += len;
418 while (len) {
419 (void) pmap_extract(pmap_kernel(), va, &pa);
420
421 #define IOMMU_PPNSHIFT 8
422 #define IOMMU_V 0x00000002
423 #define IOMMU_W 0x00000004
424
425 pte = atop(pa) << IOMMU_PPNSHIFT;
426 pte |= IOMMU_V | IOMMU_W;
427 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
428 sc->sc_reg->io_flushpage = tva;
429 len -= NBPG;
430 va += NBPG;
431 tva += NBPG;
432 }
433 return iovaddr + off;
434 }
435 #endif
436
437
438 /*
439 * Internal routine to allocate space in the IOMMU map.
440 */
441 int
442 iommu_dvma_alloc(map, va, len, flags, dvap, sgsizep)
443 bus_dmamap_t map;
444 vaddr_t va;
445 bus_size_t len;
446 int flags;
447 bus_addr_t *dvap;
448 bus_size_t *sgsizep;
449 {
450 bus_size_t sgsize;
451 u_long align, voff;
452 u_long ex_start, ex_end;
453 int s, error;
454 int pagesz = PAGE_SIZE;
455
456 /*
457 * Remember page offset, then truncate the buffer address to
458 * a page boundary.
459 */
460 voff = va & (pagesz - 1);
461 va &= -pagesz;
462
463 if (len > map->_dm_size)
464 return (EINVAL);
465
466 sgsize = (len + voff + pagesz - 1) & -pagesz;
467 align = dvma_cachealign ? dvma_cachealign : pagesz;
468
469 s = splhigh();
470
471 /* Check `24 address bits' in the map's attributes */
472 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
473 ex_start = D24_DVMA_BASE;
474 ex_end = D24_DVMA_END;
475 } else {
476 ex_start = iommu_dvmamap->ex_start;
477 ex_end = iommu_dvmamap->ex_end;
478 }
479 error = extent_alloc_subregion1(iommu_dvmamap,
480 ex_start, ex_end,
481 sgsize, align, va & (align-1),
482 map->_dm_boundary,
483 (flags & BUS_DMA_NOWAIT) == 0
484 ? EX_WAITOK : EX_NOWAIT,
485 (u_long *)dvap);
486 splx(s);
487
488 *sgsizep = sgsize;
489 return (error);
490 }
491
492 /*
493 * IOMMU DMA map functions.
494 */
495 int
496 iommu_dmamap_load(t, map, buf, buflen, p, flags)
497 bus_dma_tag_t t;
498 bus_dmamap_t map;
499 void *buf;
500 bus_size_t buflen;
501 struct proc *p;
502 int flags;
503 {
504 bus_size_t sgsize;
505 bus_addr_t dva;
506 vaddr_t va = (vaddr_t)buf;
507 int pagesz = PAGE_SIZE;
508 pmap_t pmap;
509 int error;
510
511 /*
512 * Make sure that on error condition we return "no valid mappings".
513 */
514 map->dm_nsegs = 0;
515
516 /* Allocate IOMMU resources */
517 if ((error = iommu_dvma_alloc(map, va, buflen, flags,
518 &dva, &sgsize)) != 0)
519 return (error);
520
521 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
522
523 /*
524 * We always use just one segment.
525 */
526 map->dm_mapsize = buflen;
527 map->dm_nsegs = 1;
528 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
529 map->dm_segs[0].ds_len = buflen;
530 map->dm_segs[0]._ds_sgsize = sgsize;
531
532 if (p != NULL)
533 pmap = p->p_vmspace->vm_map.pmap;
534 else
535 pmap = pmap_kernel();
536
537 for (; sgsize != 0; ) {
538 paddr_t pa;
539 /*
540 * Get the physical address for this page.
541 */
542 (void) pmap_extract(pmap, va, &pa);
543
544 iommu_enter(dva, pa);
545
546 dva += pagesz;
547 va += pagesz;
548 sgsize -= pagesz;
549 }
550
551 return (0);
552 }
553
554 /*
555 * Like _bus_dmamap_load(), but for mbufs.
556 */
557 int
558 iommu_dmamap_load_mbuf(t, map, m, flags)
559 bus_dma_tag_t t;
560 bus_dmamap_t map;
561 struct mbuf *m;
562 int flags;
563 {
564
565 panic("_bus_dmamap_load_mbuf: not implemented");
566 }
567
568 /*
569 * Like _bus_dmamap_load(), but for uios.
570 */
571 int
572 iommu_dmamap_load_uio(t, map, uio, flags)
573 bus_dma_tag_t t;
574 bus_dmamap_t map;
575 struct uio *uio;
576 int flags;
577 {
578
579 panic("_bus_dmamap_load_uio: not implemented");
580 }
581
582 /*
583 * Like _bus_dmamap_load(), but for raw memory allocated with
584 * bus_dmamem_alloc().
585 */
586 int
587 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
588 bus_dma_tag_t t;
589 bus_dmamap_t map;
590 bus_dma_segment_t *segs;
591 int nsegs;
592 bus_size_t size;
593 int flags;
594 {
595 vm_page_t m;
596 paddr_t pa;
597 bus_addr_t dva;
598 bus_size_t sgsize;
599 struct pglist *mlist;
600 int pagesz = PAGE_SIZE;
601 int error;
602
603 map->dm_nsegs = 0;
604
605 /* Allocate IOMMU resources */
606 if ((error = iommu_dvma_alloc(map, segs[0]._ds_va, size,
607 flags, &dva, &sgsize)) != 0)
608 return (error);
609
610 /*
611 * Note DVMA address in case bus_dmamem_map() is called later.
612 * It can then insure cache coherency by choosing a KVA that
613 * is aligned to `ds_addr'.
614 */
615 segs[0].ds_addr = dva;
616 segs[0].ds_len = size;
617
618 map->dm_segs[0].ds_addr = dva;
619 map->dm_segs[0].ds_len = size;
620 map->dm_segs[0]._ds_sgsize = sgsize;
621
622 /* Map physical pages into IOMMU */
623 mlist = segs[0]._ds_mlist;
624 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
625 if (sgsize == 0)
626 panic("iommu_dmamap_load_raw: size botch");
627 pa = VM_PAGE_TO_PHYS(m);
628 iommu_enter(dva, pa);
629 dva += pagesz;
630 sgsize -= pagesz;
631 }
632
633 map->dm_nsegs = 1;
634 map->dm_mapsize = size;
635
636 return (0);
637 }
638
639 /*
640 * Unload an IOMMU DMA map.
641 */
642 void
643 iommu_dmamap_unload(t, map)
644 bus_dma_tag_t t;
645 bus_dmamap_t map;
646 {
647 bus_dma_segment_t *segs = map->dm_segs;
648 int nsegs = map->dm_nsegs;
649 bus_addr_t dva;
650 bus_size_t len;
651 int i, s, error;
652
653 for (i = 0; i < nsegs; i++) {
654 dva = segs[i].ds_addr & -PAGE_SIZE;
655 len = segs[i]._ds_sgsize;
656
657 iommu_remove(dva, len);
658 s = splhigh();
659 error = extent_free(iommu_dvmamap, dva, len, EX_NOWAIT);
660 splx(s);
661 if (error != 0)
662 printf("warning: %ld of DVMA space lost\n", (long)len);
663 }
664
665 /* Mark the mappings as invalid. */
666 map->dm_mapsize = 0;
667 map->dm_nsegs = 0;
668 }
669
670 /*
671 * DMA map synchronization.
672 */
673 void
674 iommu_dmamap_sync(t, map, offset, len, ops)
675 bus_dma_tag_t t;
676 bus_dmamap_t map;
677 bus_addr_t offset;
678 bus_size_t len;
679 int ops;
680 {
681
682 /*
683 * XXX Should flush CPU write buffers.
684 */
685 }
686
687 /*
688 * Map DMA-safe memory.
689 */
690 int
691 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
692 bus_dma_tag_t t;
693 bus_dma_segment_t *segs;
694 int nsegs;
695 size_t size;
696 caddr_t *kvap;
697 int flags;
698 {
699 vm_page_t m;
700 vaddr_t va;
701 bus_addr_t addr;
702 struct pglist *mlist;
703 int cbit;
704 u_long align;
705 int pagesz = PAGE_SIZE;
706
707 if (nsegs != 1)
708 panic("iommu_dmamem_map: nsegs = %d", nsegs);
709
710 cbit = has_iocache ? 0 : PMAP_NC;
711 align = dvma_cachealign ? dvma_cachealign : pagesz;
712
713 size = round_page(size);
714
715 /*
716 * In case the segment has already been loaded by
717 * iommu_dmamap_load_raw(), find a region of kernel virtual
718 * addresses that can accomodate our aligment requirements.
719 */
720 va = _bus_dma_valloc_skewed(size, 0, align,
721 segs[0].ds_addr & (align - 1));
722 if (va == 0)
723 return (ENOMEM);
724
725 segs[0]._ds_va = va;
726 *kvap = (caddr_t)va;
727
728 /*
729 * Map the pages allocated in _bus_dmamem_alloc() to the
730 * kernel virtual address space.
731 */
732 mlist = segs[0]._ds_mlist;
733 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
734
735 if (size == 0)
736 panic("iommu_dmamem_map: size botch");
737
738 addr = VM_PAGE_TO_PHYS(m);
739 pmap_enter(pmap_kernel(), va, addr | cbit,
740 VM_PROT_READ | VM_PROT_WRITE,
741 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
742 #if 0
743 if (flags & BUS_DMA_COHERENT)
744 /* XXX */;
745 #endif
746 va += pagesz;
747 size -= pagesz;
748 }
749
750 return (0);
751 }
752
753 /*
754 * mmap(2)'ing DMA-safe memory.
755 */
756 int
757 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
758 bus_dma_tag_t t;
759 bus_dma_segment_t *segs;
760 int nsegs, off, prot, flags;
761 {
762
763 panic("_bus_dmamem_mmap: not implemented");
764 }
765