iommu.c revision 1.69 1 /* $NetBSD: iommu.c,v 1.69 2002/09/27 15:36:47 provos Exp $ */
2
3 /*
4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1995 Paul Kranenburg
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Aaron Brown and
19 * Harvard University.
20 * This product includes software developed by Paul Kranenburg.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 */
38 #include "opt_sparc_arch.h"
39
40 #include <sys/param.h>
41 #include <sys/extent.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46 #include <sys/proc.h>
47
48 #include <uvm/uvm.h>
49
50 #define _SPARC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <machine/autoconf.h>
53 #include <machine/ctlreg.h>
54 #include <sparc/sparc/asm.h>
55 #include <sparc/sparc/vaddrs.h>
56 #include <sparc/sparc/cpuvar.h>
57 #include <sparc/sparc/iommureg.h>
58 #include <sparc/sparc/iommuvar.h>
59
60 struct iommu_softc {
61 struct device sc_dev; /* base device */
62 struct iommureg *sc_reg;
63 u_int sc_pagesize;
64 u_int sc_range;
65 bus_addr_t sc_dvmabase;
66 iopte_t *sc_ptes;
67 int sc_hasiocache;
68 /*
69 * Note: operations on the extent map are being protected with
70 * splhigh(), since we cannot predict at which interrupt priority
71 * our clients will run.
72 */
73 struct sparc_bus_dma_tag sc_dmatag;
74 struct extent *sc_dvmamap;
75 };
76 static int has_iocache;
77
78 /* autoconfiguration driver */
79 int iommu_print __P((void *, const char *));
80 void iommu_attach __P((struct device *, struct device *, void *));
81 int iommu_match __P((struct device *, struct cfdata *, void *));
82
83 #if defined(SUN4M)
84 static void iommu_copy_prom_entries __P((struct iommu_softc *));
85 #endif
86
87 struct cfattach iommu_ca = {
88 sizeof(struct iommu_softc), iommu_match, iommu_attach
89 };
90
91 /* IOMMU DMA map functions */
92 int iommu_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t,
93 bus_size_t, int, bus_dmamap_t *));
94 int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
95 bus_size_t, struct proc *, int));
96 int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
97 struct mbuf *, int));
98 int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
99 struct uio *, int));
100 int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
101 bus_dma_segment_t *, int, bus_size_t, int));
102 void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
103 void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
104 bus_size_t, int));
105
106 int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
107 int nsegs, size_t size, caddr_t *kvap, int flags));
108 paddr_t iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
109 int nsegs, off_t off, int prot, int flags));
110 int iommu_dvma_alloc(struct iommu_softc *, bus_dmamap_t, vaddr_t,
111 bus_size_t, int, bus_addr_t *, bus_size_t *);
112
113 /*
114 * Print the location of some iommu-attached device (called just
115 * before attaching that device). If `iommu' is not NULL, the
116 * device was found but not configured; print the iommu as well.
117 * Return UNCONF (config_find ignores this if the device was configured).
118 */
119 int
120 iommu_print(args, iommu)
121 void *args;
122 const char *iommu;
123 {
124 struct iommu_attach_args *ia = args;
125
126 if (iommu)
127 printf("%s at %s", ia->iom_name, iommu);
128 return (UNCONF);
129 }
130
131 int
132 iommu_match(parent, cf, aux)
133 struct device *parent;
134 struct cfdata *cf;
135 void *aux;
136 {
137 struct mainbus_attach_args *ma = aux;
138
139 if (CPU_ISSUN4 || CPU_ISSUN4C)
140 return (0);
141 return (strcmp(cf->cf_name, ma->ma_name) == 0);
142 }
143
144 /*
145 * Attach the iommu.
146 */
147 void
148 iommu_attach(parent, self, aux)
149 struct device *parent;
150 struct device *self;
151 void *aux;
152 {
153 #if defined(SUN4M)
154 struct iommu_softc *sc = (struct iommu_softc *)self;
155 struct mainbus_attach_args *ma = aux;
156 struct sparc_bus_dma_tag *dmat = &sc->sc_dmatag;
157 bus_space_handle_t bh;
158 int node;
159 int js1_implicit_iommu;
160 int i, s;
161 u_int iopte_table_pa;
162 struct pglist mlist;
163 u_int size;
164 struct vm_page *m;
165 vaddr_t va;
166
167 dmat->_cookie = sc;
168 dmat->_dmamap_create = iommu_dmamap_create;
169 dmat->_dmamap_destroy = _bus_dmamap_destroy;
170 dmat->_dmamap_load = iommu_dmamap_load;
171 dmat->_dmamap_load_mbuf = iommu_dmamap_load_mbuf;
172 dmat->_dmamap_load_uio = iommu_dmamap_load_uio;
173 dmat->_dmamap_load_raw = iommu_dmamap_load_raw;
174 dmat->_dmamap_unload = iommu_dmamap_unload;
175 dmat->_dmamap_sync = iommu_dmamap_sync;
176
177 dmat->_dmamem_alloc = _bus_dmamem_alloc;
178 dmat->_dmamem_free = _bus_dmamem_free;
179 dmat->_dmamem_map = iommu_dmamem_map;
180 dmat->_dmamem_unmap = _bus_dmamem_unmap;
181 dmat->_dmamem_mmap = iommu_dmamem_mmap;
182
183 /*
184 * JS1/OF device tree does not have an iommu node and sbus
185 * node is directly under root. mainbus_attach detects this
186 * and calls us with sbus node instead so that we can attach
187 * implicit iommu and attach that sbus node under it.
188 */
189 node = ma->ma_node;
190 if (strcmp(PROM_getpropstring(node, "name"), "sbus") == 0)
191 js1_implicit_iommu = 1;
192 else
193 js1_implicit_iommu = 0;
194
195 /*
196 * Map registers into our space. The PROM may have done this
197 * already, but I feel better if we have our own copy. Plus, the
198 * prom doesn't map the entire register set.
199 *
200 * XXX struct iommureg is bigger than ra->ra_len; what are the
201 * other fields for?
202 */
203 if (bus_space_map(ma->ma_bustag, ma->ma_paddr,
204 sizeof(struct iommureg), 0, &bh) != 0) {
205 printf("iommu_attach: cannot map registers\n");
206 return;
207 }
208 sc->sc_reg = (struct iommureg *)bh;
209
210 sc->sc_hasiocache = js1_implicit_iommu ? 0
211 : node_has_property(node, "cache-coherence?");
212 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
213 sc->sc_hasiocache = 0;
214 has_iocache = sc->sc_hasiocache; /* Set global flag */
215
216 sc->sc_pagesize = js1_implicit_iommu ? NBPG
217 : PROM_getpropint(node, "page-size", NBPG),
218
219 /*
220 * Allocate memory for I/O pagetables.
221 * This takes 64K of contiguous physical memory to map 64M of
222 * DVMA space (starting at IOMMU_DVMA_BASE).
223 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize)
224 * boundary (i.e. 64K for 64M of DVMA space).
225 */
226
227 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t);
228 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
229 size, 0, &mlist, 1, 0) != 0)
230 panic("iommu_attach: no memory");
231
232 va = uvm_km_valloc(kernel_map, size);
233 if (va == 0)
234 panic("iommu_attach: no memory");
235
236 sc->sc_ptes = (iopte_t *)va;
237
238 m = TAILQ_FIRST(&mlist);
239 iopte_table_pa = VM_PAGE_TO_PHYS(m);
240
241 /* Map the pages */
242 for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
243 paddr_t pa = VM_PAGE_TO_PHYS(m);
244 pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ | VM_PROT_WRITE);
245 va += NBPG;
246 }
247 pmap_update(pmap_kernel());
248
249 /*
250 * Copy entries from current IOMMU table.
251 * XXX - Why do we need to do this?
252 */
253 iommu_copy_prom_entries(sc);
254
255 /*
256 * Now we can install our new pagetable into the IOMMU
257 */
258 sc->sc_range = 0 - IOMMU_DVMA_BASE;
259 sc->sc_dvmabase = IOMMU_DVMA_BASE;
260
261 /* calculate log2(sc->sc_range/16MB) */
262 i = ffs(sc->sc_range/(1 << 24)) - 1;
263 if ((1 << i) != (sc->sc_range/(1 << 24)))
264 panic("iommu: bad range: %d", i);
265
266 s = splhigh();
267 IOMMU_FLUSHALL(sc);
268
269 /* Load range and physical address of PTEs */
270 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
271 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
272 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA;
273
274 IOMMU_FLUSHALL(sc);
275 splx(s);
276
277 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
278 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
279 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
280 sc->sc_pagesize,
281 sc->sc_range >> 20);
282
283 sc->sc_dvmamap = extent_create("iommudvma",
284 IOMMU_DVMA_BASE, IOMMU_DVMA_END,
285 M_DEVBUF, 0, 0, EX_NOWAIT);
286 if (sc->sc_dvmamap == NULL)
287 panic("iommu: unable to allocate DVMA map");
288
289 /*
290 * If we are attaching implicit iommu on JS1/OF we do not have
291 * an iommu node to traverse, instead mainbus_attach passed us
292 * sbus node in ma.ma_node. Attach it as the only iommu child.
293 */
294 if (js1_implicit_iommu) {
295 struct iommu_attach_args ia;
296 struct openprom_addr sbus_iommu_reg = { 0, 0x10001000, 0x28 };
297
298 bzero(&ia, sizeof ia);
299
300 /* Propagate BUS & DMA tags */
301 ia.iom_bustag = ma->ma_bustag;
302 ia.iom_dmatag = &sc->sc_dmatag;
303
304 ia.iom_name = "sbus";
305 ia.iom_node = node;
306 ia.iom_reg = &sbus_iommu_reg;
307 ia.iom_nreg = 1;
308
309 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
310 return;
311 }
312
313 /*
314 * Loop through ROM children (expect Sbus among them).
315 */
316 for (node = firstchild(node); node; node = nextsibling(node)) {
317 struct iommu_attach_args ia;
318
319 bzero(&ia, sizeof ia);
320 ia.iom_name = PROM_getpropstring(node, "name");
321
322 /* Propagate BUS & DMA tags */
323 ia.iom_bustag = ma->ma_bustag;
324 ia.iom_dmatag = &sc->sc_dmatag;
325
326 ia.iom_node = node;
327
328 ia.iom_reg = NULL;
329 PROM_getprop(node, "reg", sizeof(struct openprom_addr),
330 &ia.iom_nreg, (void **)&ia.iom_reg);
331
332 (void) config_found(&sc->sc_dev, (void *)&ia, iommu_print);
333 if (ia.iom_reg != NULL)
334 free(ia.iom_reg, M_DEVBUF);
335 }
336 #endif
337 }
338
339 #if defined(SUN4M)
340 static void
341 iommu_copy_prom_entries(sc)
342 struct iommu_softc *sc;
343 {
344 u_int pbase, pa;
345 u_int range;
346 iopte_t *tpte_p;
347 u_int pagesz = sc->sc_pagesize;
348 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc);
349 u_int mmupcr_save;
350
351 /*
352 * We read in the original table using MMU bypass and copy all
353 * of its entries to the appropriate place in our new table,
354 * even if the sizes are different.
355 * This is pretty easy since we know DVMA ends at 0xffffffff.
356 */
357
358 range = (1 << 24) <<
359 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
360
361 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
362 (14 - IOMMU_BAR_IBASHFT);
363
364 if (use_ac) {
365 /*
366 * Set MMU AC bit so we'll still read from the cache
367 * in by-pass mode.
368 */
369 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU);
370 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC);
371 } else
372 mmupcr_save = 0; /* XXX - avoid GCC `unintialized' warning */
373
374 /* Flush entire IOMMU TLB before messing with the in-memory tables */
375 IOMMU_FLUSHALL(sc);
376
377 /*
378 * tpte_p = top of our PTE table
379 * pa = top of current PTE table
380 * Then work downwards and copy entries until we hit the bottom
381 * of either table.
382 */
383 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1],
384 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t);
385 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
386 tpte_p--, pa -= sizeof(iopte_t)) {
387
388 *tpte_p = lda(pa, ASI_BYPASS);
389 }
390
391 if (use_ac) {
392 /* restore mmu after bug-avoidance */
393 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save);
394 }
395 }
396 #endif
397
398 static void
399 iommu_enter(struct iommu_softc *sc, bus_addr_t dva, paddr_t pa)
400 {
401 int pte;
402
403 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */
404
405 #ifdef DIAGNOSTIC
406 if (dva < sc->sc_dvmabase)
407 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva);
408 #endif
409
410 pte = atop(pa) << IOPTE_PPNSHFT;
411 pte &= IOPTE_PPN;
412 pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
413 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte;
414 IOMMU_FLUSHPAGE(sc, dva);
415 }
416
417 /*
418 * iommu_remove: removes mappings created by iommu_enter
419 */
420 static void
421 iommu_remove(struct iommu_softc *sc, bus_addr_t dva, bus_size_t len)
422 {
423 u_int pagesz = sc->sc_pagesize;
424 bus_addr_t base = sc->sc_dvmabase;
425
426 #ifdef DEBUG
427 if (dva < base)
428 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva);
429 #endif
430
431 while ((long)len > 0) {
432 #ifdef notyet
433 #ifdef DEBUG
434 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0)
435 panic("iommu_remove: clearing invalid pte at dva 0x%lx",
436 (long)dva);
437 #endif
438 #endif
439 sc->sc_ptes[atop(dva - base)] = 0;
440 IOMMU_FLUSHPAGE(sc, dva);
441 len -= pagesz;
442 dva += pagesz;
443 }
444 }
445
446 #if 0 /* These registers aren't there??? */
447 void
448 iommu_error()
449 {
450 struct iommu_softc *sc = X;
451 struct iommureg *iop = sc->sc_reg;
452
453 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
454 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
455 }
456 int
457 iommu_alloc(va, len)
458 u_int va, len;
459 {
460 struct iommu_softc *sc = X;
461 int off, tva, iovaddr, pte;
462 paddr_t pa;
463
464 off = (int)va & PGOFSET;
465 len = round_page(len + off);
466 va -= off;
467
468 if ((int)sc->sc_dvmacur + len > 0)
469 sc->sc_dvmacur = sc->sc_dvmabase;
470
471 iovaddr = tva = sc->sc_dvmacur;
472 sc->sc_dvmacur += len;
473 while (len) {
474 (void) pmap_extract(pmap_kernel(), va, &pa);
475
476 #define IOMMU_PPNSHIFT 8
477 #define IOMMU_V 0x00000002
478 #define IOMMU_W 0x00000004
479
480 pte = atop(pa) << IOMMU_PPNSHIFT;
481 pte |= IOMMU_V | IOMMU_W;
482 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
483 sc->sc_reg->io_flushpage = tva;
484 len -= NBPG;
485 va += NBPG;
486 tva += NBPG;
487 }
488 return iovaddr + off;
489 }
490 #endif
491
492
493 /*
494 * IOMMU DMA map functions.
495 */
496 int
497 iommu_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
498 bus_dma_tag_t t;
499 bus_size_t size;
500 int nsegments;
501 bus_size_t maxsegsz;
502 bus_size_t boundary;
503 int flags;
504 bus_dmamap_t *dmamp;
505 {
506 struct iommu_softc *sc = t->_cookie;
507 bus_dmamap_t map;
508 int error;
509
510 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
511 boundary, flags, &map)) != 0)
512 return (error);
513
514 if ((flags & BUS_DMA_24BIT) != 0) {
515 /* Limit this map to the range usable by `24-bit' devices */
516 map->_dm_ex_start = D24_DVMA_BASE;
517 map->_dm_ex_end = D24_DVMA_END;
518 } else {
519 /* Enable allocations from the entire map */
520 map->_dm_ex_start = sc->sc_dvmamap->ex_start;
521 map->_dm_ex_end = sc->sc_dvmamap->ex_end;
522 }
523
524 *dmamp = map;
525 return (0);
526 }
527
528 /*
529 * Internal routine to allocate space in the IOMMU map.
530 */
531 int
532 iommu_dvma_alloc(sc, map, va, len, flags, dvap, sgsizep)
533 struct iommu_softc *sc;
534 bus_dmamap_t map;
535 vaddr_t va;
536 bus_size_t len;
537 int flags;
538 bus_addr_t *dvap;
539 bus_size_t *sgsizep;
540 {
541 bus_size_t sgsize;
542 u_long align, voff, dvaddr;
543 int s, error;
544 int pagesz = PAGE_SIZE;
545
546 /*
547 * Remember page offset, then truncate the buffer address to
548 * a page boundary.
549 */
550 voff = va & (pagesz - 1);
551 va &= -pagesz;
552
553 if (len > map->_dm_size)
554 return (EINVAL);
555
556 sgsize = (len + voff + pagesz - 1) & -pagesz;
557 align = dvma_cachealign ? dvma_cachealign : map->_dm_align;
558
559 s = splhigh();
560 error = extent_alloc_subregion1(sc->sc_dvmamap,
561 map->_dm_ex_start, map->_dm_ex_end,
562 sgsize, align, va & (align-1),
563 map->_dm_boundary,
564 (flags & BUS_DMA_NOWAIT) == 0
565 ? EX_WAITOK : EX_NOWAIT,
566 &dvaddr);
567 splx(s);
568 *dvap = (bus_addr_t)dvaddr;
569 *sgsizep = sgsize;
570 return (error);
571 }
572
573 /*
574 * Prepare buffer for DMA transfer.
575 */
576 int
577 iommu_dmamap_load(t, map, buf, buflen, p, flags)
578 bus_dma_tag_t t;
579 bus_dmamap_t map;
580 void *buf;
581 bus_size_t buflen;
582 struct proc *p;
583 int flags;
584 {
585 struct iommu_softc *sc = t->_cookie;
586 bus_size_t sgsize;
587 bus_addr_t dva;
588 vaddr_t va = (vaddr_t)buf;
589 int pagesz = PAGE_SIZE;
590 pmap_t pmap;
591 int error;
592
593 /*
594 * Make sure that on error condition we return "no valid mappings".
595 */
596 map->dm_nsegs = 0;
597
598 /* Allocate IOMMU resources */
599 if ((error = iommu_dvma_alloc(sc, map, va, buflen, flags,
600 &dva, &sgsize)) != 0)
601 return (error);
602
603 cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
604
605 /*
606 * We always use just one segment.
607 */
608 map->dm_mapsize = buflen;
609 map->dm_nsegs = 1;
610 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
611 map->dm_segs[0].ds_len = buflen;
612 map->dm_segs[0]._ds_sgsize = sgsize;
613
614 if (p != NULL)
615 pmap = p->p_vmspace->vm_map.pmap;
616 else
617 pmap = pmap_kernel();
618
619 for (; sgsize != 0; ) {
620 paddr_t pa;
621 /*
622 * Get the physical address for this page.
623 */
624 (void) pmap_extract(pmap, va, &pa);
625
626 iommu_enter(sc, dva, pa);
627
628 dva += pagesz;
629 va += pagesz;
630 sgsize -= pagesz;
631 }
632
633 return (0);
634 }
635
636 /*
637 * Like _bus_dmamap_load(), but for mbufs.
638 */
639 int
640 iommu_dmamap_load_mbuf(t, map, m, flags)
641 bus_dma_tag_t t;
642 bus_dmamap_t map;
643 struct mbuf *m;
644 int flags;
645 {
646
647 panic("_bus_dmamap_load_mbuf: not implemented");
648 }
649
650 /*
651 * Like _bus_dmamap_load(), but for uios.
652 */
653 int
654 iommu_dmamap_load_uio(t, map, uio, flags)
655 bus_dma_tag_t t;
656 bus_dmamap_t map;
657 struct uio *uio;
658 int flags;
659 {
660
661 panic("_bus_dmamap_load_uio: not implemented");
662 }
663
664 /*
665 * Like _bus_dmamap_load(), but for raw memory allocated with
666 * bus_dmamem_alloc().
667 */
668 int
669 iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
670 bus_dma_tag_t t;
671 bus_dmamap_t map;
672 bus_dma_segment_t *segs;
673 int nsegs;
674 bus_size_t size;
675 int flags;
676 {
677 struct iommu_softc *sc = t->_cookie;
678 struct vm_page *m;
679 paddr_t pa;
680 bus_addr_t dva;
681 bus_size_t sgsize;
682 struct pglist *mlist;
683 int pagesz = PAGE_SIZE;
684 int error;
685
686 map->dm_nsegs = 0;
687
688 /* Allocate IOMMU resources */
689 if ((error = iommu_dvma_alloc(sc, map, segs[0]._ds_va, size,
690 flags, &dva, &sgsize)) != 0)
691 return (error);
692
693 /*
694 * Note DVMA address in case bus_dmamem_map() is called later.
695 * It can then insure cache coherency by choosing a KVA that
696 * is aligned to `ds_addr'.
697 */
698 segs[0].ds_addr = dva;
699 segs[0].ds_len = size;
700
701 map->dm_segs[0].ds_addr = dva;
702 map->dm_segs[0].ds_len = size;
703 map->dm_segs[0]._ds_sgsize = sgsize;
704
705 /* Map physical pages into IOMMU */
706 mlist = segs[0]._ds_mlist;
707 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
708 if (sgsize == 0)
709 panic("iommu_dmamap_load_raw: size botch");
710 pa = VM_PAGE_TO_PHYS(m);
711 iommu_enter(sc, dva, pa);
712 dva += pagesz;
713 sgsize -= pagesz;
714 }
715
716 map->dm_nsegs = 1;
717 map->dm_mapsize = size;
718
719 return (0);
720 }
721
722 /*
723 * Unload an IOMMU DMA map.
724 */
725 void
726 iommu_dmamap_unload(t, map)
727 bus_dma_tag_t t;
728 bus_dmamap_t map;
729 {
730 struct iommu_softc *sc = t->_cookie;
731 bus_dma_segment_t *segs = map->dm_segs;
732 int nsegs = map->dm_nsegs;
733 bus_addr_t dva;
734 bus_size_t len;
735 int i, s, error;
736
737 for (i = 0; i < nsegs; i++) {
738 dva = segs[i].ds_addr & -PAGE_SIZE;
739 len = segs[i]._ds_sgsize;
740
741 iommu_remove(sc, dva, len);
742 s = splhigh();
743 error = extent_free(sc->sc_dvmamap, dva, len, EX_NOWAIT);
744 splx(s);
745 if (error != 0)
746 printf("warning: %ld of DVMA space lost\n", (long)len);
747 }
748
749 /* Mark the mappings as invalid. */
750 map->dm_mapsize = 0;
751 map->dm_nsegs = 0;
752 }
753
754 /*
755 * DMA map synchronization.
756 */
757 void
758 iommu_dmamap_sync(t, map, offset, len, ops)
759 bus_dma_tag_t t;
760 bus_dmamap_t map;
761 bus_addr_t offset;
762 bus_size_t len;
763 int ops;
764 {
765
766 /*
767 * XXX Should flush CPU write buffers.
768 */
769 }
770
771 /*
772 * Map DMA-safe memory.
773 */
774 int
775 iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
776 bus_dma_tag_t t;
777 bus_dma_segment_t *segs;
778 int nsegs;
779 size_t size;
780 caddr_t *kvap;
781 int flags;
782 {
783 struct vm_page *m;
784 vaddr_t va;
785 bus_addr_t addr;
786 struct pglist *mlist;
787 int cbit;
788 u_long align;
789 int pagesz = PAGE_SIZE;
790
791 if (nsegs != 1)
792 panic("iommu_dmamem_map: nsegs = %d", nsegs);
793
794 cbit = has_iocache ? 0 : PMAP_NC;
795 align = dvma_cachealign ? dvma_cachealign : pagesz;
796
797 size = round_page(size);
798
799 /*
800 * In case the segment has already been loaded by
801 * iommu_dmamap_load_raw(), find a region of kernel virtual
802 * addresses that can accomodate our aligment requirements.
803 */
804 va = _bus_dma_valloc_skewed(size, 0, align,
805 segs[0].ds_addr & (align - 1));
806 if (va == 0)
807 return (ENOMEM);
808
809 segs[0]._ds_va = va;
810 *kvap = (caddr_t)va;
811
812 /*
813 * Map the pages allocated in _bus_dmamem_alloc() to the
814 * kernel virtual address space.
815 */
816 mlist = segs[0]._ds_mlist;
817 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
818
819 if (size == 0)
820 panic("iommu_dmamem_map: size botch");
821
822 addr = VM_PAGE_TO_PHYS(m);
823 pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE);
824 #if 0
825 if (flags & BUS_DMA_COHERENT)
826 /* XXX */;
827 #endif
828 va += pagesz;
829 size -= pagesz;
830 }
831 pmap_update(pmap_kernel());
832
833 return (0);
834 }
835
836 /*
837 * mmap(2)'ing DMA-safe memory.
838 */
839 paddr_t
840 iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
841 bus_dma_tag_t t;
842 bus_dma_segment_t *segs;
843 int nsegs;
844 off_t off;
845 int prot, flags;
846 {
847
848 panic("_bus_dmamem_mmap: not implemented");
849 }
850