iommu.c revision 1.99 1 /* $NetBSD: iommu.c,v 1.99 2021/08/07 16:19:05 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1995 Paul Kranenburg
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Aaron Brown and
19 * Harvard University.
20 * This product includes software developed by Paul Kranenburg.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.99 2021/08/07 16:19:05 thorpej Exp $");
41
42 #include "opt_sparc_arch.h"
43
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/queue.h>
48 #include <sys/systm.h>
49 #include <sys/device.h>
50 #include <sys/proc.h>
51
52 #include <uvm/uvm.h>
53
54 #define _SPARC_BUS_DMA_PRIVATE
55 #include <sys/bus.h>
56 #include <machine/autoconf.h>
57 #include <machine/ctlreg.h>
58 #include <sparc/sparc/asm.h>
59 #include <sparc/sparc/vaddrs.h>
60 #include <sparc/sparc/cpuvar.h>
61 #include <sparc/sparc/iommureg.h>
62 #include <sparc/sparc/iommuvar.h>
63
64 struct iommu_softc {
65 struct iommureg *sc_reg;
66 u_int sc_pagesize;
67 u_int sc_range;
68 bus_addr_t sc_dvmabase;
69 iopte_t *sc_ptes;
70 int sc_cachecoherent;
71 /*
72 * Note: operations on the extent map are being protected with
73 * splhigh(), since we cannot predict at which interrupt priority
74 * our clients will run.
75 */
76 struct sparc_bus_dma_tag sc_dmatag;
77 struct extent *sc_dvmamap;
78 };
79
80 /* autoconfiguration driver */
81 int iommu_print(void *, const char *);
82 void iommu_attach(device_t, device_t, void *);
83 int iommu_match(device_t, cfdata_t, void *);
84
85 #if defined(SUN4M)
86 static void iommu_copy_prom_entries(struct iommu_softc *);
87 #endif
88
89 CFATTACH_DECL_NEW(iommu, sizeof(struct iommu_softc),
90 iommu_match, iommu_attach, NULL, NULL);
91
92 /* IOMMU DMA map functions */
93 int iommu_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
94 bus_size_t, int, bus_dmamap_t *);
95 int iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
96 bus_size_t, struct proc *, int);
97 int iommu_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
98 struct mbuf *, int);
99 int iommu_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
100 struct uio *, int);
101 int iommu_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
102 bus_dma_segment_t *, int, bus_size_t, int);
103 void iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
104 void iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
105 bus_size_t, int);
106
107 int iommu_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
108 int, size_t, void **, int);
109 void iommu_dmamem_unmap(bus_dma_tag_t, void *, size_t);
110 paddr_t iommu_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *,
111 int, off_t, int, int);
112 int iommu_dvma_alloc(struct iommu_softc *, bus_dmamap_t, vaddr_t,
113 bus_size_t, int, bus_addr_t *, bus_size_t *);
114
115 /*
116 * Print the location of some iommu-attached device (called just
117 * before attaching that device). If `iommu' is not NULL, the
118 * device was found but not configured; print the iommu as well.
119 * Return UNCONF (config_find ignores this if the device was configured).
120 */
121 int
122 iommu_print(void *args, const char *iommu)
123 {
124 struct iommu_attach_args *ia = args;
125
126 if (iommu)
127 aprint_normal("%s at %s", ia->iom_name, iommu);
128 return (UNCONF);
129 }
130
131 int
132 iommu_match(device_t parent, cfdata_t cf, void *aux)
133 {
134 struct mainbus_attach_args *ma = aux;
135
136 if (CPU_ISSUN4 || CPU_ISSUN4C)
137 return (0);
138 return (strcmp(cf->cf_name, ma->ma_name) == 0);
139 }
140
141 /*
142 * Attach the iommu.
143 */
144 void
145 iommu_attach(device_t parent, device_t self, void *aux)
146 {
147 #if defined(SUN4M)
148 struct iommu_softc *sc = device_private(self);
149 struct mainbus_attach_args *ma = aux;
150 struct sparc_bus_dma_tag *dmat = &sc->sc_dmatag;
151 bus_space_handle_t bh;
152 int node;
153 int js1_implicit_iommu;
154 int i, s;
155 u_int iopte_table_pa;
156 struct pglist mlist;
157 u_int size;
158 struct vm_page *m;
159 vaddr_t va;
160
161 dmat->_cookie = sc;
162 dmat->_dmamap_create = iommu_dmamap_create;
163 dmat->_dmamap_destroy = _bus_dmamap_destroy;
164 dmat->_dmamap_load = iommu_dmamap_load;
165 dmat->_dmamap_load_mbuf = iommu_dmamap_load_mbuf;
166 dmat->_dmamap_load_uio = iommu_dmamap_load_uio;
167 dmat->_dmamap_load_raw = iommu_dmamap_load_raw;
168 dmat->_dmamap_unload = iommu_dmamap_unload;
169 dmat->_dmamap_sync = iommu_dmamap_sync;
170
171 dmat->_dmamem_alloc = _bus_dmamem_alloc;
172 dmat->_dmamem_free = _bus_dmamem_free;
173 dmat->_dmamem_map = iommu_dmamem_map;
174 dmat->_dmamem_unmap = _bus_dmamem_unmap;
175 dmat->_dmamem_mmap = iommu_dmamem_mmap;
176
177 /*
178 * JS1/OF device tree does not have an iommu node and sbus
179 * node is directly under root. mainbus_attach detects this
180 * and calls us with sbus node instead so that we can attach
181 * implicit iommu and attach that sbus node under it.
182 */
183 node = ma->ma_node;
184 if (strcmp(prom_getpropstring(node, "name"), "sbus") == 0)
185 js1_implicit_iommu = 1;
186 else
187 js1_implicit_iommu = 0;
188
189 /*
190 * Map registers into our space. The PROM may have done this
191 * already, but I feel better if we have our own copy. Plus, the
192 * prom doesn't map the entire register set.
193 *
194 * XXX struct iommureg is bigger than ra->ra_len; what are the
195 * other fields for?
196 */
197 if (bus_space_map(ma->ma_bustag, ma->ma_paddr,
198 sizeof(struct iommureg), 0, &bh) != 0) {
199 printf("iommu_attach: cannot map registers\n");
200 return;
201 }
202 sc->sc_reg = (struct iommureg *)bh;
203
204 sc->sc_cachecoherent = js1_implicit_iommu ? 0
205 : node_has_property(node, "cache-coherence?");
206 if (CACHEINFO.c_enabled == 0) /* XXX - is this correct? */
207 sc->sc_cachecoherent = 0;
208
209 sc->sc_pagesize = js1_implicit_iommu ? PAGE_SIZE
210 : prom_getpropint(node, "page-size", PAGE_SIZE),
211
212 /*
213 * Allocate memory for I/O pagetables.
214 * This takes 64K of contiguous physical memory to map 64M of
215 * DVMA space (starting at IOMMU_DVMA_BASE).
216 * The table must be aligned on a (-IOMMU_DVMA_BASE/pagesize)
217 * boundary (i.e. 64K for 64M of DVMA space).
218 */
219
220 size = ((0 - IOMMU_DVMA_BASE) / sc->sc_pagesize) * sizeof(iopte_t);
221 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
222 size, 0, &mlist, 1, 0) != 0)
223 panic("iommu_attach: no memory");
224
225 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
226 if (va == 0)
227 panic("iommu_attach: no memory");
228
229 sc->sc_ptes = (iopte_t *)va;
230
231 m = TAILQ_FIRST(&mlist);
232 iopte_table_pa = VM_PAGE_TO_PHYS(m);
233
234 /* Map the pages */
235 for (; m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
236 paddr_t pa = VM_PAGE_TO_PHYS(m);
237 pmap_kenter_pa(va, pa | PMAP_NC,
238 VM_PROT_READ | VM_PROT_WRITE, 0);
239 va += PAGE_SIZE;
240 }
241 pmap_update(pmap_kernel());
242
243 /*
244 * Copy entries from current IOMMU table.
245 * XXX - Why do we need to do this?
246 */
247 iommu_copy_prom_entries(sc);
248
249 /*
250 * Now we can install our new pagetable into the IOMMU
251 */
252 sc->sc_range = 0 - IOMMU_DVMA_BASE;
253 sc->sc_dvmabase = IOMMU_DVMA_BASE;
254
255 /* calculate log2(sc->sc_range/16MB) */
256 i = ffs(sc->sc_range/(1 << 24)) - 1;
257 if ((1 << i) != (sc->sc_range/(1 << 24)))
258 panic("iommu: bad range: %d", i);
259
260 s = splhigh();
261 IOMMU_FLUSHALL(sc);
262
263 /* Load range and physical address of PTEs */
264 sc->sc_reg->io_cr = (sc->sc_reg->io_cr & ~IOMMU_CTL_RANGE) |
265 (i << IOMMU_CTL_RANGESHFT) | IOMMU_CTL_ME;
266 sc->sc_reg->io_bar = (iopte_table_pa >> 4) & IOMMU_BAR_IBA;
267
268 IOMMU_FLUSHALL(sc);
269 splx(s);
270
271 printf(": version 0x%x/0x%x, page-size %d, range %dMB\n",
272 (sc->sc_reg->io_cr & IOMMU_CTL_VER) >> 24,
273 (sc->sc_reg->io_cr & IOMMU_CTL_IMPL) >> 28,
274 sc->sc_pagesize,
275 sc->sc_range >> 20);
276
277 sc->sc_dvmamap = extent_create("iommudvma",
278 IOMMU_DVMA_BASE, IOMMU_DVMA_END,
279 0, 0, EX_WAITOK);
280
281 /*
282 * If we are attaching implicit iommu on JS1/OF we do not have
283 * an iommu node to traverse, instead mainbus_attach passed us
284 * sbus node in ma.ma_node. Attach it as the only iommu child.
285 */
286 if (js1_implicit_iommu) {
287 struct iommu_attach_args ia;
288 struct openprom_addr sbus_iommu_reg = { 0, 0x10001000, 0x28 };
289
290 memset(&ia, 0, sizeof ia);
291
292 /* Propagate BUS & DMA tags */
293 ia.iom_bustag = ma->ma_bustag;
294 ia.iom_dmatag = &sc->sc_dmatag;
295
296 ia.iom_name = "sbus";
297 ia.iom_node = node;
298 ia.iom_reg = &sbus_iommu_reg;
299 ia.iom_nreg = 1;
300
301 config_found(self, (void *)&ia, iommu_print,
302 CFARGS(.devhandle = prom_node_to_devhandle(node)));
303 return;
304 }
305
306 /*
307 * Loop through ROM children (expect Sbus among them).
308 */
309 for (node = firstchild(node); node; node = nextsibling(node)) {
310 struct iommu_attach_args ia;
311
312 memset(&ia, 0, sizeof ia);
313 ia.iom_name = prom_getpropstring(node, "name");
314
315 /* Propagate BUS & DMA tags */
316 ia.iom_bustag = ma->ma_bustag;
317 ia.iom_dmatag = &sc->sc_dmatag;
318
319 ia.iom_node = node;
320
321 ia.iom_reg = NULL;
322 prom_getprop(node, "reg", sizeof(struct openprom_addr),
323 &ia.iom_nreg, &ia.iom_reg);
324
325 config_found(self, (void *)&ia, iommu_print,
326 CFARGS(.devhandle = prom_node_to_devhandle(node)));
327 if (ia.iom_reg != NULL)
328 free(ia.iom_reg, M_DEVBUF);
329 }
330 #endif
331 }
332
333 #if defined(SUN4M)
334 static void
335 iommu_copy_prom_entries(struct iommu_softc *sc)
336 {
337 u_int pbase, pa;
338 u_int range;
339 iopte_t *tpte_p;
340 u_int pagesz = sc->sc_pagesize;
341 int use_ac = (cpuinfo.cpu_impl == 4 && cpuinfo.mxcc);
342 u_int mmupcr_save;
343
344 /*
345 * We read in the original table using MMU bypass and copy all
346 * of its entries to the appropriate place in our new table,
347 * even if the sizes are different.
348 * This is pretty easy since we know DVMA ends at 0xffffffff.
349 */
350
351 range = (1 << 24) <<
352 ((sc->sc_reg->io_cr & IOMMU_CTL_RANGE) >> IOMMU_CTL_RANGESHFT);
353
354 pbase = (sc->sc_reg->io_bar & IOMMU_BAR_IBA) <<
355 (14 - IOMMU_BAR_IBASHFT);
356
357 if (use_ac) {
358 /*
359 * Set MMU AC bit so we'll still read from the cache
360 * in by-pass mode.
361 */
362 mmupcr_save = lda(SRMMU_PCR, ASI_SRMMU);
363 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save | VIKING_PCR_AC);
364 } else
365 mmupcr_save = 0; /* XXX - avoid GCC `uninitialized' warning */
366
367 /* Flush entire IOMMU TLB before messing with the in-memory tables */
368 IOMMU_FLUSHALL(sc);
369
370 /*
371 * tpte_p = top of our PTE table
372 * pa = top of current PTE table
373 * Then work downwards and copy entries until we hit the bottom
374 * of either table.
375 */
376 for (tpte_p = &sc->sc_ptes[((0 - IOMMU_DVMA_BASE)/pagesz) - 1],
377 pa = (u_int)pbase + (range/pagesz - 1)*sizeof(iopte_t);
378 tpte_p >= &sc->sc_ptes[0] && pa >= (u_int)pbase;
379 tpte_p--, pa -= sizeof(iopte_t)) {
380
381 *tpte_p = lda(pa, ASI_BYPASS);
382 }
383
384 if (use_ac) {
385 /* restore mmu after bug-avoidance */
386 sta(SRMMU_PCR, ASI_SRMMU, mmupcr_save);
387 }
388 }
389 #endif
390
391 static void
392 iommu_enter(struct iommu_softc *sc, bus_addr_t dva, paddr_t pa)
393 {
394 int pte;
395
396 /* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */
397
398 #ifdef DIAGNOSTIC
399 if (dva < sc->sc_dvmabase)
400 panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva);
401 #endif
402
403 pte = atop(pa) << IOPTE_PPNSHFT;
404 pte &= IOPTE_PPN;
405 pte |= IOPTE_V | IOPTE_W | (sc->sc_cachecoherent ? IOPTE_C : 0);
406 sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte;
407 IOMMU_FLUSHPAGE(sc, dva);
408 }
409
410 /*
411 * iommu_remove: removes mappings created by iommu_enter
412 */
413 static void
414 iommu_remove(struct iommu_softc *sc, bus_addr_t dva, bus_size_t len)
415 {
416 u_int pagesz = sc->sc_pagesize;
417 bus_addr_t base = sc->sc_dvmabase;
418
419 #ifdef DEBUG
420 if (dva < base)
421 panic("iommu_remove: va 0x%lx not in DVMA space", (long)dva);
422 #endif
423
424 while ((long)len > 0) {
425 #ifdef notyet
426 #ifdef DEBUG
427 if ((sc->sc_ptes[atop(dva - base)] & IOPTE_V) == 0)
428 panic("iommu_remove: clearing invalid pte at dva 0x%lx",
429 (long)dva);
430 #endif
431 #endif
432 sc->sc_ptes[atop(dva - base)] = 0;
433 IOMMU_FLUSHPAGE(sc, dva);
434 len -= pagesz;
435 dva += pagesz;
436 }
437 }
438
439 #if 0 /* These registers aren't there??? */
440 void
441 iommu_error(void)
442 {
443 struct iommu_softc *sc = X;
444 struct iommureg *iop = sc->sc_reg;
445
446 printf("iommu: afsr 0x%x, afar 0x%x\n", iop->io_afsr, iop->io_afar);
447 printf("iommu: mfsr 0x%x, mfar 0x%x\n", iop->io_mfsr, iop->io_mfar);
448 }
449
450 int
451 iommu_alloc(u_int va, u_int len)
452 {
453 struct iommu_softc *sc = X;
454 int off, tva, iovaddr, pte;
455 paddr_t pa;
456
457 off = (int)va & PGOFSET;
458 len = round_page(len + off);
459 va -= off;
460
461 if ((int)sc->sc_dvmacur + len > 0)
462 sc->sc_dvmacur = sc->sc_dvmabase;
463
464 iovaddr = tva = sc->sc_dvmacur;
465 sc->sc_dvmacur += len;
466 while (len) {
467 (void) pmap_extract(pmap_kernel(), va, &pa);
468
469 #define IOMMU_PPNSHIFT 8
470 #define IOMMU_V 0x00000002
471 #define IOMMU_W 0x00000004
472
473 pte = atop(pa) << IOMMU_PPNSHIFT;
474 pte |= IOMMU_V | IOMMU_W;
475 sta(sc->sc_ptes + atop(tva - sc->sc_dvmabase), ASI_BYPASS, pte);
476 sc->sc_reg->io_flushpage = tva;
477 len -= PAGE_SIZE;
478 va += PAGE_SIZE;
479 tva += PAGE_SIZE;
480 }
481 return iovaddr + off;
482 }
483 #endif
484
485
486 /*
487 * IOMMU DMA map functions.
488 */
489 int
490 iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
491 bus_size_t maxsegsz, bus_size_t boundary, int flags,
492 bus_dmamap_t *dmamp)
493 {
494 struct iommu_softc *sc = t->_cookie;
495 bus_dmamap_t map;
496 int error;
497
498 if ((error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
499 boundary, flags, &map)) != 0)
500 return (error);
501
502 if ((flags & BUS_DMA_24BIT) != 0) {
503 /* Limit this map to the range usable by `24-bit' devices */
504 map->_dm_ex_start = D24_DVMA_BASE;
505 map->_dm_ex_end = D24_DVMA_END;
506 } else {
507 /* Enable allocations from the entire map */
508 map->_dm_ex_start = sc->sc_dvmamap->ex_start;
509 map->_dm_ex_end = sc->sc_dvmamap->ex_end;
510 }
511
512 *dmamp = map;
513 return (0);
514 }
515
516 /*
517 * Internal routine to allocate space in the IOMMU map.
518 */
519 int
520 iommu_dvma_alloc(struct iommu_softc *sc, bus_dmamap_t map,
521 vaddr_t va, bus_size_t len, int flags,
522 bus_addr_t *dvap, bus_size_t *sgsizep)
523 {
524 bus_size_t sgsize;
525 u_long align, voff, dvaddr;
526 int s, error;
527 int pagesz = PAGE_SIZE;
528
529 /*
530 * Remember page offset, then truncate the buffer address to
531 * a page boundary.
532 */
533 voff = va & (pagesz - 1);
534 va &= -pagesz;
535
536 if (len > map->_dm_size)
537 return (EINVAL);
538
539 sgsize = (len + voff + pagesz - 1) & -pagesz;
540 align = dvma_cachealign ? dvma_cachealign : map->_dm_align;
541
542 s = splhigh();
543 error = extent_alloc_subregion1(sc->sc_dvmamap,
544 map->_dm_ex_start, map->_dm_ex_end,
545 sgsize, align, va & (align-1),
546 map->_dm_boundary,
547 (flags & BUS_DMA_NOWAIT) == 0
548 ? EX_WAITOK : EX_NOWAIT,
549 &dvaddr);
550 splx(s);
551 *dvap = (bus_addr_t)dvaddr;
552 *sgsizep = sgsize;
553 return (error);
554 }
555
556 /*
557 * Prepare buffer for DMA transfer.
558 */
559 int
560 iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
561 void *buf, bus_size_t buflen,
562 struct proc *p, int flags)
563 {
564 struct iommu_softc *sc = t->_cookie;
565 bus_size_t sgsize;
566 bus_addr_t dva;
567 vaddr_t va = (vaddr_t)buf;
568 int pagesz = PAGE_SIZE;
569 pmap_t pmap;
570 int error;
571
572 /*
573 * Make sure that on error condition we return "no valid mappings".
574 */
575 map->dm_nsegs = 0;
576
577 /* Allocate IOMMU resources */
578 if ((error = iommu_dvma_alloc(sc, map, va, buflen, flags,
579 &dva, &sgsize)) != 0)
580 return (error);
581
582 if ((sc->sc_cachecoherent == 0) ||
583 (curcpu()->cacheinfo.ec_totalsize == 0))
584 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
585
586 /*
587 * We always use just one segment.
588 */
589 map->dm_mapsize = buflen;
590 map->dm_nsegs = 1;
591 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
592 map->dm_segs[0].ds_len = buflen;
593 map->dm_segs[0]._ds_sgsize = sgsize;
594
595 if (p != NULL)
596 pmap = p->p_vmspace->vm_map.pmap;
597 else
598 pmap = pmap_kernel();
599
600 for (; sgsize != 0; ) {
601 paddr_t pa;
602 /*
603 * Get the physical address for this page.
604 */
605 if (!pmap_extract(pmap, va, &pa)) {
606 iommu_dmamap_unload(t, map);
607 return (EFAULT);
608 }
609
610 iommu_enter(sc, dva, pa);
611
612 dva += pagesz;
613 va += pagesz;
614 sgsize -= pagesz;
615 }
616
617 return (0);
618 }
619
620 /*
621 * Like _bus_dmamap_load(), but for mbufs.
622 */
623 int
624 iommu_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
625 struct mbuf *m, int flags)
626 {
627
628 panic("_bus_dmamap_load_mbuf: not implemented");
629 }
630
631 /*
632 * Like _bus_dmamap_load(), but for uios.
633 */
634 int
635 iommu_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
636 struct uio *uio, int flags)
637 {
638
639 panic("_bus_dmamap_load_uio: not implemented");
640 }
641
642 /*
643 * Like _bus_dmamap_load(), but for raw memory allocated with
644 * bus_dmamem_alloc().
645 */
646 int
647 iommu_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
648 bus_dma_segment_t *segs, int nsegs, bus_size_t size,
649 int flags)
650 {
651 struct iommu_softc *sc = t->_cookie;
652 struct vm_page *m;
653 paddr_t pa;
654 bus_addr_t dva;
655 bus_size_t sgsize;
656 struct pglist *mlist;
657 int pagesz = PAGE_SIZE;
658 int error;
659
660 map->dm_nsegs = 0;
661
662 /* Allocate IOMMU resources */
663 if ((error = iommu_dvma_alloc(sc, map, segs[0]._ds_va, size,
664 flags, &dva, &sgsize)) != 0)
665 return (error);
666
667 /*
668 * Note DVMA address in case bus_dmamem_map() is called later.
669 * It can then insure cache coherency by choosing a KVA that
670 * is aligned to `ds_addr'.
671 */
672 segs[0].ds_addr = dva;
673 segs[0].ds_len = size;
674
675 map->dm_segs[0].ds_addr = dva;
676 map->dm_segs[0].ds_len = size;
677 map->dm_segs[0]._ds_sgsize = sgsize;
678
679 /* Map physical pages into IOMMU */
680 mlist = segs[0]._ds_mlist;
681 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
682 if (sgsize == 0)
683 panic("iommu_dmamap_load_raw: size botch");
684 pa = VM_PAGE_TO_PHYS(m);
685 iommu_enter(sc, dva, pa);
686 dva += pagesz;
687 sgsize -= pagesz;
688 }
689
690 map->dm_nsegs = 1;
691 map->dm_mapsize = size;
692
693 return (0);
694 }
695
696 /*
697 * Unload an IOMMU DMA map.
698 */
699 void
700 iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
701 {
702 struct iommu_softc *sc = t->_cookie;
703 bus_dma_segment_t *segs = map->dm_segs;
704 int nsegs = map->dm_nsegs;
705 bus_addr_t dva;
706 bus_size_t len;
707 int i, s, error;
708
709 for (i = 0; i < nsegs; i++) {
710 dva = segs[i].ds_addr & -PAGE_SIZE;
711 len = segs[i]._ds_sgsize;
712
713 iommu_remove(sc, dva, len);
714 s = splhigh();
715 error = extent_free(sc->sc_dvmamap, dva, len, EX_NOWAIT);
716 splx(s);
717 if (error != 0)
718 printf("warning: %ld of DVMA space lost\n", (long)len);
719 }
720
721 /* Mark the mappings as invalid. */
722 map->dm_mapsize = 0;
723 map->dm_nsegs = 0;
724 }
725
726 /*
727 * DMA map synchronization.
728 */
729 void
730 iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
731 bus_addr_t offset, bus_size_t len, int ops)
732 {
733
734 /*
735 * XXX Should flush CPU write buffers.
736 */
737 }
738
739 /*
740 * Map DMA-safe memory.
741 */
742 int
743 iommu_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
744 size_t size, void **kvap, int flags)
745 {
746 struct iommu_softc *sc = t->_cookie;
747 struct vm_page *m;
748 vaddr_t va;
749 bus_addr_t addr;
750 struct pglist *mlist;
751 int cbit;
752 u_long align;
753 int pagesz = PAGE_SIZE;
754
755 if (nsegs != 1)
756 panic("iommu_dmamem_map: nsegs = %d", nsegs);
757
758 cbit = sc->sc_cachecoherent ? 0 : PMAP_NC;
759 align = dvma_cachealign ? dvma_cachealign : pagesz;
760
761 size = round_page(size);
762
763 /*
764 * In case the segment has already been loaded by
765 * iommu_dmamap_load_raw(), find a region of kernel virtual
766 * addresses that can accommodate our aligment requirements.
767 */
768 va = _bus_dma_valloc_skewed(size, 0, align,
769 segs[0].ds_addr & (align - 1));
770 if (va == 0)
771 return (ENOMEM);
772
773 segs[0]._ds_va = va;
774 *kvap = (void *)va;
775
776 /*
777 * Map the pages allocated in _bus_dmamem_alloc() to the
778 * kernel virtual address space.
779 */
780 mlist = segs[0]._ds_mlist;
781 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
782
783 if (size == 0)
784 panic("iommu_dmamem_map: size botch");
785
786 addr = VM_PAGE_TO_PHYS(m);
787 pmap_kenter_pa(va, addr | cbit,
788 VM_PROT_READ | VM_PROT_WRITE, 0);
789 #if 0
790 if (flags & BUS_DMA_COHERENT)
791 /* XXX */;
792 #endif
793 va += pagesz;
794 size -= pagesz;
795 }
796 pmap_update(pmap_kernel());
797
798 return (0);
799 }
800
801 void
802 iommu_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
803 {
804
805 #ifdef DIAGNOSTIC
806 if ((u_long)kva & PAGE_MASK)
807 panic("iommu_dmamem_unmap");
808 #endif
809
810 size = round_page(size);
811 pmap_kremove((vaddr_t)kva, size);
812 pmap_update(pmap_kernel());
813 uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
814 }
815
816
817 /*
818 * mmap(2)'ing DMA-safe memory.
819 */
820 paddr_t
821 iommu_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
822 off_t off, int prot, int flags)
823 {
824
825 panic("_bus_dmamem_mmap: not implemented");
826 }
827