iommu.c revision 1.65 1 /* $NetBSD: iommu.c,v 1.65 2002/07/17 04:55:57 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1995 Paul Kranenburg
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Aaron Brown and
19 * Harvard University.
20 * This product includes software developed by Paul Kranenburg.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 */
38 #include "opt_sparc_arch.h"
39
40 #include <sys/param.h>
41 #include <sys/extent.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46 #include <sys/proc.h>
47
48 #include <uvm/uvm.h>
49
50 #define _SPARC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <machine/autoconf.h>
53 #include <machine/ctlreg.h>
54 #include <sparc/sparc/asm.h>
55 #include <sparc/sparc/vaddrs.h>
56 #include <sparc/sparc/cpuvar.h>
57 #include <sparc/sparc/iommureg.h>
58 #include <sparc/sparc/iommuvar.h>
59
60 struct iommu_softc {
61 struct device sc_dev; /* base device */
62 struct iommureg *sc_reg;
63 u_int sc_pagesize;
64 u_int sc_range;
65 bus_addr_t sc_dvmabase;
66 iopte_t *sc_ptes;
67 int sc_hasiocache;
68 };
69 struct iommu_softc *iommu_sc;/*XXX*/
70 int has_iocache;
71
72 /*
73 * Note: operations on the extent map are being protected with
74 * splhigh(), since we cannot predict at which interrupt priority
75 * our clients will run.
76 */
77 struct extent *iommu_dvmamap;
78
79
80 /* autoconfiguration driver */
81 int iommu_print __P((void *, const char *));
82 void iommu_attach __P((struct device *, struct device *, void *));
83 int iommu_match __P((struct device *, struct cfdata *, void *));
84
85 #if defined(SUN4M)
86 static void iommu_copy_prom_entries __P((struct iommu_softc *));
87 #endif
88
89 struct cfattach iommu_ca = {
90 sizeof(struct iommu_softc), iommu_match, iommu_attach
91 };
92
93 /* IOMMU DMA map functions */
94 int iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t,
95 bus_size_t, int, bus_dmamap_t *));
96 int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
97 bus_size_t, struct proc *, int));
98 int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
99 struct mbuf *, int));
100 int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
101 struct uio *, int));
102 int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
103 bus_dma_segment_t *, int, bus_size_t, int));
104 void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
105 void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
106 bus_size_t, int));
107
108 int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
109 int nsegs, size_t size, caddr_t *kvap, int flags));
110 paddr_t iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
111 int nsegs, off_t off, int prot, int flags));
112 int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, int,
113 bus_addr_t *, bus_size_t *);
114
115
116 struct sparc_bus_dma_tag iommu_dma_tag = {
117 NULL,
118 iommu_dmamap_create,
119 _bus_dmamap_destroy,
120 iommu_dmamap_load,
121 iommu_dmamap_load_mbuf,
122 iommu_dmamap_load_uio,
123 iommu_dmamap_load_raw,
124 iommu_dmamap_unload,
125 iommu_dmamap_sync,
126
127 _bus_dmamem_alloc,
128 _bus_dmamem_free,
129 iommu_dmamem_map,
130 _bus_dmamem_unmap,
131 iommu_dmamem_mmap
132 };
133 /*
134 * Print the location of some iommu-attached device (called just
135 * before attaching that device). If `iommu' is not NULL, the
136 * device was found but not configured; print the iommu as well.
137 * Return UNCONF (config_find ignores this if the device was configured).
138 */
139 int
140 iommu_print(args, iommu)
141 void *args;
142 const char *iommu;
143 {
144 struct iommu_attach_args *ia = args;
145
146 if (iommu)
147 printf("%s at %s", ia->iom_name, iommu);
148 return (UNCONF);
149 }
150
151 int
152 iommu_match(parent, cf, aux)
153 struct device *parent;
154 struct cfdata *cf;
155 void *aux;
156 {
157 struct mainbus_attach_args *ma = aux;
158
159 if (CPU_ISSUN4 || CPU_ISSUN4C)
160 return (0);
161 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
162 }
163
164 /*
165 * Attach the iommu.
166 */
167 void
168 iommu_attach(parent, self, aux)
169 struct device *parent;
170 struct device *self;
171 void *aux;
172 {
173 #if defined(SUN4M)
174 struct iommu_softc *sc = (struct iommu_softc *)self;
175 struct mainbus_attach_args *ma = aux;
176 bus_space_handle_t bh;
177 int node;
178 int js1_implicit_iommu;
179 int i, s;
180 u_int iopte_table_pa;
181 struct pglist mlist;
182 u_int size;
183 struct vm_page *m;
184 vaddr_t va;
185
186 /*
187 * XXX there is only one iommu, for now -- do not know how to
188 * address children on others
189 */
190 if (sc->sc_dev.dv_unit > 0) {
191 printf(" unsupported\n");
192 return;
193 }
194 iommu_sc = sc;
195
196 /*
197 * JS1/OF device tree does not have an iommu node and sbus
198 * node is directly under root. mainbus_attach detects this
199 * and calls us with sbus node instead so that we can attach
200 * implicit iommu and attach that sbus node under it.
201 */
202 node = ma->ma_node;
203 if (strcmp(PROM_getpropstring(node, "name"), "sbus") == 0)
204 js1_implicit_iommu = 1;
205 else
206 js1_implicit_iommu = 0;
207
208 /*
209 * Map registers into our space. The PROM may have done this
210 * already, but I feel better if we have our own copy. Plus, the
211 * prom doesn't map the entire register set.
212 *
213 * XXX struct iommureg is bigger than ra->ra_len; what are the
214 * other fields for?
215 */
216 if (bus_space_map(
217 ma->ma_bustag,
218 ma->ma_paddr,
219 sizeof(struct iommureg),
220 0,
221 &bh) != 0) {
222 printf("iommu_attach: cannot map registers\n");
223 return;
224 }
225 sc->sc_reg = (struct iommureg *)bh;
226
227 sc->sc_hasiocache = js1_implicit_iommu ? 0
228 : node_has_property(node, "cache-coherence?");
229 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
230 sc->sc_hasiocache = 0;
231 has_iocache = sc->sc_hasiocache; /* Set global flag */
232
233 sc->sc_pagesize = js1_implicit_iommu ? NBPG
234 : PROM_getpropint(node, "page-size", NBPG),
235
236 /*
237 * Allocate memory for I/O pagetables.
238 * This takes 64K of contiguous physical memory to map 64M of
239 * DVMA space (starting at IOMMU_DVMA_BASE).
240 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize)
241 * boundary (i.e. 64K for 64M of DVMA space).
242 */
243
244 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t);
245 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
246 size, 0, &mlist, 1, 0) != 0)
247 panic("iommu_attach: no memory");
248
249 va = uvm_km_valloc(kernel_map, size);
250 if (va == 0)
251 panic("iommu_attach: no memory");
252
253 sc->sc_ptes = (iopte_t *)va;
254
255 m = TAILQ_FIRST(&mlist);
256 iopte_table_pa = VM_PAGE_TO_PHYS(m);
257
258 /* Map the pages */
259 for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
260 paddr_t pa = VM_PAGE_TO_PHYS(m);
261 pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE);
262 va += NBPG;
263 }
264 pmap_update(pmap_kernel());
265
266 /*
267 * Copy entries from current IOMMU table.
268 * XXX - Why do we need to do this?
269 */
270 iommu_copy_prom_entries(sc);
271
272 /*
273 * Now we can install our new pagetable into the IOMMU
274 */
275 sc->sc_range = 0 - IOMMU_DVMA_BASE;
276 sc->sc_dvmabase = IOMMU_DVMA_BASE;
277
278 /* calculate log2(sc->sc_range/16MB) */
279 i = ffs(sc->sc_range/(1 << 24)) - 1;
280 if ((1 << i) != (sc->sc_range/(1 << 24)))
281 panic("iommu: bad range: %d\n", i);
282
283 s = splhigh();
284 IOMMU_FLUSHALL(sc);
285
286 /* Load range and physical address of PTEs */
287 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
288 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
289 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA;
290
291 IOMMU_FLUSHALL(sc);
292 splx(s);
293
294 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
295 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
296 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
297 sc->sc_pagesize,
298 sc->sc_range >> 20);
299
300 iommu_dvmamap = extent_create("iommudvma",
301 IOMMU_DVMA_BASE, IOMMU_DVMA_END,
302 M_DEVBUF, 0, 0, EX_NOWAIT);
303 if (iommu_dvmamap == NULL)
304 panic("iommu: unable to allocate DVMA map");
305
306 /*
307 * If we are attaching implicit iommu on JS1/OF we do not have
308 * an iommu node to traverse, instead mainbus_attach passed us
309 * sbus node in ma.ma_node. Attach it as the only iommu child.
310 */
311 if (js1_implicit_iommu) {
312 struct iommu_attach_args ia;
313 struct iommu_reg sbus_iommu_reg = { 0, 0x10001000, 0x28 };
314
315 bzero(&ia, sizeof ia);
316
317 /* Propagate BUS & DMA tags */
318 ia.iom_bustag = ma->ma_bustag;
319 ia.iom_dmatag = &iommu_dma_tag;
320
321 ia.iom_name = "sbus";
322 ia.iom_node = node;
323 ia.iom_reg = &sbus_iommu_reg;
324 ia.iom_nreg = 1;
325
326 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
327 return;
328 }
329
330 /*
331 * Loop through ROM children (expect Sbus among them).
332 */
333 for (node = firstchild(node); node; node = nextsibling(node)) {
334 struct iommu_attach_args ia;
335
336 bzero(&ia, sizeof ia);
337 ia.iom_name = PROM_getpropstring(node, "name");
338
339 /* Propagate BUS & DMA tags */
340 ia.iom_bustag = ma->ma_bustag;
341 ia.iom_dmatag = &iommu_dma_tag;
342
343 ia.iom_node = node;
344
345 ia.iom_reg = NULL;
346 PROM_getprop(node, "reg", sizeof(struct sbus_reg),
347 &ia.iom_nreg, (void **)&ia.iom_reg);
348
349 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
350 if (ia.iom_reg != NULL)
351 free(ia.iom_reg, M_DEVBUF);
352 }
353 #endif
354 }
355
356 #if defined(SUN4M)
357 static void
358 iommu_copy_prom_entries(sc)
359 struct iommu_softc *sc;
360 {
361 u_int pbase, pa;
362 u_int range;
363 iopte_t *tpte_p;
364 u_int pagesz = sc->sc_pagesize;
365 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc);
366 u_int mmupcr_save;
367
368 /*
369 * We read in the original table using MMU bypass and copy all
370 * of its entries to the appropriate place in our new table,
371 * even if the sizes are different.
372 * This is pretty easy since we know DVMA ends at 0xffffffff.
373 */
374
375 range = (1 << 24) <<
376 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
377
378 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
379 (14 - IOMMU_BAR_IBASHFT);
380
381 if (use_ac) {
382 /*
383 * Set MMU AC bit so we'll still read from the cache
384 * in by-pass mode.
385 */
386 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU);
387 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC);
388 } else
389 mmupcr_save = 0; /* XXX - avoid GCC `unintialized' warning */
390
391 /* Flush entire IOMMU TLB before messing with the in-memory tables */
392 IOMMU_FLUSHALL(sc);
393
394 /*
395 * tpte_p = top of our PTE table
396 * pa = top of current PTE table
397 * Then work downwards and copy entries until we hit the bottom
398 * of either table.
399 */
400 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1],
401 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t);
402 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
403 tpte_p--, pa -= sizeof(iopte_t)) {
404
405 *tpte_p = lda(pa, ASI_BYPASS);
406 }
407
408 if (use_ac) {
409 /* restore mmu after bug-avoidance */
410 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save);
411 }
412 }
413 #endif
414
415 void
416 iommu_enter(dva, pa)
417 bus_addr_t dva;
418 paddr_t pa;
419 {
420 struct iommu_softc *sc = iommu_sc;
421 int pte;
422
423 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */
424
425 #ifdef DIAGNOSTIC
426 if (dva < sc->sc_dvmabase)
427 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva);
428 #endif
429
430 pte = atop(pa) << IOPTE_PPNSHFT;
431 pte &= IOPTE_PPN;
432 pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
433 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte;
434 IOMMU_FLUSHPAGE(sc, dva);
435 }
436
437 /*
438 * iommu_clear: clears mappings created by iommu_enter
439 */
440 void
441 iommu_remove(dva, len)
442 bus_addr_t dva;
443 bus_size_t len;
444 {
445 struct iommu_softc *sc = iommu_sc;
446 u_int pagesz = sc->sc_pagesize;
447 bus_addr_t base = sc->sc_dvmabase;
448
449 #ifdef DEBUG
450 if (dva < base)
451 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva);
452 #endif
453
454 while ((long)len > 0) {
455 #ifdef notyet
456 #ifdef DEBUG
457 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0)
458 panic("iommu_remove: clearing invalid pte at dva 0x%lx",
459 (long)dva);
460 #endif
461 #endif
462 sc->sc_ptes[atop(dva - base)] = 0;
463 IOMMU_FLUSHPAGE(sc, dva);
464 len -= pagesz;
465 dva += pagesz;
466 }
467 }
468
469 #if 0 /* These registers aren't there??? */
470 void
471 iommu_error()
472 {
473 struct iommu_softc *sc = X;
474 struct iommureg *iop = sc->sc_reg;
475
476 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
477 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
478 }
479 int
480 iommu_alloc(va, len)
481 u_int va, len;
482 {
483 struct iommu_softc *sc = X;
484 int off, tva, iovaddr, pte;
485 paddr_t pa;
486
487 off = (int)va & PGOFSET;
488 len = round_page(len + off);
489 va -= off;
490
491 if ((int)sc->sc_dvmacur + len > 0)
492 sc->sc_dvmacur = sc->sc_dvmabase;
493
494 iovaddr = tva = sc->sc_dvmacur;
495 sc->sc_dvmacur += len;
496 while (len) {
497 (void) pmap_extract(pmap_kernel(), va, &pa);
498
499 #define IOMMU_PPNSHIFT 8
500 #define IOMMU_V 0x00000002
501 #define IOMMU_W 0x00000004
502
503 pte = atop(pa) << IOMMU_PPNSHIFT;
504 pte |= IOMMU_V | IOMMU_W;
505 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
506 sc->sc_reg->io_flushpage = tva;
507 len -= NBPG;
508 va += NBPG;
509 tva += NBPG;
510 }
511 return iovaddr + off;
512 }
513 #endif
514
515
516 /*
517 * IOMMU DMA map functions.
518 */
519 int
520 iommu_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
521 bus_dma_tag_t t;
522 bus_size_t size;
523 int nsegments;
524 bus_size_t maxsegsz;
525 bus_size_t boundary;
526 int flags;
527 bus_dmamap_t *dmamp;
528 {
529 bus_dmamap_t map;
530 int error;
531
532 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
533 boundary, flags, &map)) != 0)
534 return (error);
535
536 if ((flags & BUS_DMA_24BIT) != 0) {
537 /* Limit this map to the range usable by `24-bit' devices */
538 map->_dm_ex_start = D24_DVMA_BASE;
539 map->_dm_ex_end = D24_DVMA_END;
540 } else {
541 /* Enable allocations from the entire map */
542 map->_dm_ex_start = iommu_dvmamap->ex_start;
543 map->_dm_ex_end = iommu_dvmamap->ex_end;
544 }
545
546 *dmamp = map;
547 return (0);
548 }
549
550 /*
551 * Internal routine to allocate space in the IOMMU map.
552 */
553 int
554 iommu_dvma_alloc(map, va, len, flags, dvap, sgsizep)
555 bus_dmamap_t map;
556 vaddr_t va;
557 bus_size_t len;
558 int flags;
559 bus_addr_t *dvap;
560 bus_size_t *sgsizep;
561 {
562 bus_size_t sgsize;
563 u_long align, voff, dvaddr;
564 int s, error;
565 int pagesz = PAGE_SIZE;
566
567 /*
568 * Remember page offset, then truncate the buffer address to
569 * a page boundary.
570 */
571 voff = va & (pagesz - 1);
572 va &= -pagesz;
573
574 if (len > map->_dm_size)
575 return (EINVAL);
576
577 sgsize = (len + voff + pagesz - 1) & -pagesz;
578 align = dvma_cachealign ? dvma_cachealign : map->_dm_align;
579
580 s = splhigh();
581 error = extent_alloc_subregion1(iommu_dvmamap,
582 map->_dm_ex_start, map->_dm_ex_end,
583 sgsize, align, va & (align-1),
584 map->_dm_boundary,
585 (flags & BUS_DMA_NOWAIT) == 0
586 ? EX_WAITOK : EX_NOWAIT,
587 &dvaddr);
588 splx(s);
589 *dvap = (bus_addr_t)dvaddr;
590 *sgsizep = sgsize;
591 return (error);
592 }
593
594 /*
595 * Prepare buffer for DMA transfer.
596 */
597 int
598 iommu_dmamap_load(t, map, buf, buflen, p, flags)
599 bus_dma_tag_t t;
600 bus_dmamap_t map;
601 void *buf;
602 bus_size_t buflen;
603 struct proc *p;
604 int flags;
605 {
606 bus_size_t sgsize;
607 bus_addr_t dva;
608 vaddr_t va = (vaddr_t)buf;
609 int pagesz = PAGE_SIZE;
610 pmap_t pmap;
611 int error;
612
613 /*
614 * Make sure that on error condition we return "no valid mappings".
615 */
616 map->dm_nsegs = 0;
617
618 /* Allocate IOMMU resources */
619 if ((error = iommu_dvma_alloc(map, va, buflen, flags,
620 &dva, &sgsize)) != 0)
621 return (error);
622
623 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
624
625 /*
626 * We always use just one segment.
627 */
628 map->dm_mapsize = buflen;
629 map->dm_nsegs = 1;
630 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
631 map->dm_segs[0].ds_len = buflen;
632 map->dm_segs[0]._ds_sgsize = sgsize;
633
634 if (p != NULL)
635 pmap = p->p_vmspace->vm_map.pmap;
636 else
637 pmap = pmap_kernel();
638
639 for (; sgsize != 0; ) {
640 paddr_t pa;
641 /*
642 * Get the physical address for this page.
643 */
644 (void) pmap_extract(pmap, va, &pa);
645
646 iommu_enter(dva, pa);
647
648 dva += pagesz;
649 va += pagesz;
650 sgsize -= pagesz;
651 }
652
653 return (0);
654 }
655
656 /*
657 * Like _bus_dmamap_load(), but for mbufs.
658 */
659 int
660 iommu_dmamap_load_mbuf(t, map, m, flags)
661 bus_dma_tag_t t;
662 bus_dmamap_t map;
663 struct mbuf *m;
664 int flags;
665 {
666
667 panic("_bus_dmamap_load_mbuf: not implemented");
668 }
669
670 /*
671 * Like _bus_dmamap_load(), but for uios.
672 */
673 int
674 iommu_dmamap_load_uio(t, map, uio, flags)
675 bus_dma_tag_t t;
676 bus_dmamap_t map;
677 struct uio *uio;
678 int flags;
679 {
680
681 panic("_bus_dmamap_load_uio: not implemented");
682 }
683
684 /*
685 * Like _bus_dmamap_load(), but for raw memory allocated with
686 * bus_dmamem_alloc().
687 */
688 int
689 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
690 bus_dma_tag_t t;
691 bus_dmamap_t map;
692 bus_dma_segment_t *segs;
693 int nsegs;
694 bus_size_t size;
695 int flags;
696 {
697 struct vm_page *m;
698 paddr_t pa;
699 bus_addr_t dva;
700 bus_size_t sgsize;
701 struct pglist *mlist;
702 int pagesz = PAGE_SIZE;
703 int error;
704
705 map->dm_nsegs = 0;
706
707 /* Allocate IOMMU resources */
708 if ((error = iommu_dvma_alloc(map, segs[0]._ds_va, size,
709 flags, &dva, &sgsize)) != 0)
710 return (error);
711
712 /*
713 * Note DVMA address in case bus_dmamem_map() is called later.
714 * It can then insure cache coherency by choosing a KVA that
715 * is aligned to `ds_addr'.
716 */
717 segs[0].ds_addr = dva;
718 segs[0].ds_len = size;
719
720 map->dm_segs[0].ds_addr = dva;
721 map->dm_segs[0].ds_len = size;
722 map->dm_segs[0]._ds_sgsize = sgsize;
723
724 /* Map physical pages into IOMMU */
725 mlist = segs[0]._ds_mlist;
726 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
727 if (sgsize == 0)
728 panic("iommu_dmamap_load_raw: size botch");
729 pa = VM_PAGE_TO_PHYS(m);
730 iommu_enter(dva, pa);
731 dva += pagesz;
732 sgsize -= pagesz;
733 }
734
735 map->dm_nsegs = 1;
736 map->dm_mapsize = size;
737
738 return (0);
739 }
740
741 /*
742 * Unload an IOMMU DMA map.
743 */
744 void
745 iommu_dmamap_unload(t, map)
746 bus_dma_tag_t t;
747 bus_dmamap_t map;
748 {
749 bus_dma_segment_t *segs = map->dm_segs;
750 int nsegs = map->dm_nsegs;
751 bus_addr_t dva;
752 bus_size_t len;
753 int i, s, error;
754
755 for (i = 0; i < nsegs; i++) {
756 dva = segs[i].ds_addr & -PAGE_SIZE;
757 len = segs[i]._ds_sgsize;
758
759 iommu_remove(dva, len);
760 s = splhigh();
761 error = extent_free(iommu_dvmamap, dva, len, EX_NOWAIT);
762 splx(s);
763 if (error != 0)
764 printf("warning: %ld of DVMA space lost\n", (long)len);
765 }
766
767 /* Mark the mappings as invalid. */
768 map->dm_mapsize = 0;
769 map->dm_nsegs = 0;
770 }
771
772 /*
773 * DMA map synchronization.
774 */
775 void
776 iommu_dmamap_sync(t, map, offset, len, ops)
777 bus_dma_tag_t t;
778 bus_dmamap_t map;
779 bus_addr_t offset;
780 bus_size_t len;
781 int ops;
782 {
783
784 /*
785 * XXX Should flush CPU write buffers.
786 */
787 }
788
789 /*
790 * Map DMA-safe memory.
791 */
792 int
793 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
794 bus_dma_tag_t t;
795 bus_dma_segment_t *segs;
796 int nsegs;
797 size_t size;
798 caddr_t *kvap;
799 int flags;
800 {
801 struct vm_page *m;
802 vaddr_t va;
803 bus_addr_t addr;
804 struct pglist *mlist;
805 int cbit;
806 u_long align;
807 int pagesz = PAGE_SIZE;
808
809 if (nsegs != 1)
810 panic("iommu_dmamem_map: nsegs = %d", nsegs);
811
812 cbit = has_iocache ? 0 : PMAP_NC;
813 align = dvma_cachealign ? dvma_cachealign : pagesz;
814
815 size = round_page(size);
816
817 /*
818 * In case the segment has already been loaded by
819 * iommu_dmamap_load_raw(), find a region of kernel virtual
820 * addresses that can accomodate our aligment requirements.
821 */
822 va = _bus_dma_valloc_skewed(size, 0, align,
823 segs[0].ds_addr & (align - 1));
824 if (va == 0)
825 return (ENOMEM);
826
827 segs[0]._ds_va = va;
828 *kvap = (caddr_t)va;
829
830 /*
831 * Map the pages allocated in _bus_dmamem_alloc() to the
832 * kernel virtual address space.
833 */
834 mlist = segs[0]._ds_mlist;
835 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
836
837 if (size == 0)
838 panic("iommu_dmamem_map: size botch");
839
840 addr = VM_PAGE_TO_PHYS(m);
841 pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE);
842 #if 0
843 if (flags & BUS_DMA_COHERENT)
844 /* XXX */;
845 #endif
846 va += pagesz;
847 size -= pagesz;
848 }
849 pmap_update(pmap_kernel());
850
851 return (0);
852 }
853
854 /*
855 * mmap(2)'ing DMA-safe memory.
856 */
857 paddr_t
858 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
859 bus_dma_tag_t t;
860 bus_dma_segment_t *segs;
861 int nsegs;
862 off_t off;
863 int prot, flags;
864 {
865
866 panic("_bus_dmamem_mmap: not implemented");
867 }
868