iommu.c revision 1.110 1 /* $NetBSD: iommu.c,v 1.110 2015/09/06 23:47:09 nakayama Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 2001, 2002 Eduardo Horvath
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57 /*
58 * UltraSPARC IOMMU support; used by both the sbus and pci code.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.110 2015/09/06 23:47:09 nakayama Exp $");
63
64 #include "opt_ddb.h"
65
66 #include <sys/param.h>
67 #include <sys/extent.h>
68 #include <sys/malloc.h>
69 #include <sys/systm.h>
70 #include <sys/device.h>
71 #include <sys/proc.h>
72
73 #include <uvm/uvm.h>
74
75 #include <sys/bus.h>
76 #include <sparc64/dev/iommureg.h>
77 #include <sparc64/dev/iommuvar.h>
78
79 #include <machine/autoconf.h>
80 #include <machine/cpu.h>
81 #include <machine/hypervisor.h>
82
83 #ifdef DEBUG
84 #define IDB_BUSDMA 0x1
85 #define IDB_IOMMU 0x2
86 #define IDB_INFO 0x4
87 #define IDB_SYNC 0x8
88 int iommudebug = 0x0;
89 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0)
90 #define IOTTE_DEBUG(n) (n)
91 #else
92 #define DPRINTF(l, s)
93 #define IOTTE_DEBUG(n) 0
94 #endif
95
96 #define iommu_strbuf_flush(i, v) do { \
97 if ((i)->sb_flush) \
98 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \
99 STRBUFREG(strbuf_pgflush), (v)); \
100 } while (0)
101
102 static int iommu_strbuf_flush_done(struct strbuf_ctl *);
103 static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
104 bus_size_t, int);
105 static void iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags);
106 static void iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags);
107 static void iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len);
108 static void iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len);
109
110 /*
111 * initialise the UltraSPARC IOMMU (SBUS or PCI):
112 * - allocate and setup the iotsb.
113 * - enable the IOMMU
114 * - initialise the streaming buffers (if they exist)
115 * - create a private DVMA map.
116 */
117 void
118 iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase)
119 {
120 psize_t size;
121 vaddr_t va;
122 paddr_t pa;
123 struct vm_page *pg;
124 struct pglist pglist;
125
126 DPRINTF(IDB_INFO, ("iommu_init: tsbsize %x iovabase %x\n", tsbsize, iovabase));
127
128 /*
129 * Setup the iommu.
130 *
131 * The sun4u iommu is part of the SBUS or PCI controller so we will
132 * deal with it here..
133 *
134 * For sysio and psycho/psycho+ the IOMMU address space always ends at
135 * 0xffffe000, but the starting address depends on the size of the
136 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each
137 * entry is 8 bytes. The start of the map can be calculated by
138 * (0xffffe000 << (8 + is->is_tsbsize)).
139 *
140 * But sabre and hummingbird use a different scheme that seems to
141 * be hard-wired, so we read the start and size from the PROM and
142 * just use those values.
143 */
144 if (strncmp(name, "pyro", 4) == 0) {
145 is->is_cr = IOMMUREG_READ(is, iommu_cr);
146 is->is_cr &= ~IOMMUCR_FIRE_BE;
147 is->is_cr |= (IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN |
148 IOMMUCR_FIRE_TE);
149 } else
150 is->is_cr = IOMMUCR_EN;
151 is->is_tsbsize = tsbsize;
152 if (iovabase == -1) {
153 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
154 is->is_dvmaend = IOTSB_VEND - 1;
155 } else {
156 is->is_dvmabase = iovabase;
157 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize) - 1;
158 }
159
160 /*
161 * Allocate memory for I/O pagetables. They need to be physically
162 * contiguous.
163 */
164
165 size = PAGE_SIZE << is->is_tsbsize;
166 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
167 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
168 panic("iommu_init: no memory");
169
170 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
171 if (va == 0)
172 panic("iommu_init: no memory");
173 is->is_tsb = (int64_t *)va;
174
175 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
176
177 /* Map the pages */
178 TAILQ_FOREACH(pg, &pglist, pageq.queue) {
179 pa = VM_PAGE_TO_PHYS(pg);
180 pmap_kenter_pa(va, pa | PMAP_NVC,
181 VM_PROT_READ | VM_PROT_WRITE, 0);
182 va += PAGE_SIZE;
183 }
184 pmap_update(pmap_kernel());
185 memset(is->is_tsb, 0, size);
186
187 #ifdef DEBUG
188 if (iommudebug & IDB_INFO)
189 {
190 /* Probe the iommu */
191 if (!CPU_ISSUN4V) {
192 printf("iommu cr=%llx tsb=%llx\n",
193 (unsigned long long)bus_space_read_8(is->is_bustag,
194 is->is_iommu,
195 offsetof(struct iommureg, iommu_cr)),
196 (unsigned long long)bus_space_read_8(is->is_bustag,
197 is->is_iommu,
198 offsetof(struct iommureg, iommu_tsb)));
199 printf("TSB base %p phys %llx\n", (void *)is->is_tsb,
200 (unsigned long long)is->is_ptsb);
201 delay(1000000); /* 1 s */
202 }
203 }
204 #endif
205
206 /*
207 * Now all the hardware's working we need to allocate a dvma map.
208 */
209 aprint_debug("DVMA map: %x to %x\n",
210 (unsigned int)is->is_dvmabase,
211 (unsigned int)is->is_dvmaend);
212 aprint_debug("IOTSB: %llx to %llx\n",
213 (unsigned long long)is->is_ptsb,
214 (unsigned long long)(is->is_ptsb + size - 1));
215 is->is_dvmamap = extent_create(name,
216 is->is_dvmabase, is->is_dvmaend,
217 0, 0, EX_NOWAIT);
218 if (!is->is_dvmamap)
219 panic("iommu_init: extent_create() failed");
220
221 mutex_init(&is->is_lock, MUTEX_DEFAULT, IPL_HIGH);
222
223 /*
224 * Set the TSB size. The relevant bits were moved to the TSB
225 * base register in the PCIe host bridges.
226 */
227 if (is->is_flags & IOMMU_TSBSIZE_IN_PTSB)
228 is->is_ptsb |= is->is_tsbsize;
229 else
230 is->is_cr |= (is->is_tsbsize << 16);
231
232 /*
233 * now actually start up the IOMMU
234 */
235 iommu_reset(is);
236 }
237
238 /*
239 * Streaming buffers don't exist on the UltraSPARC IIi; we should have
240 * detected that already and disabled them. If not, we will notice that
241 * they aren't there when the STRBUF_EN bit does not remain.
242 */
243 void
244 iommu_reset(struct iommu_state *is)
245 {
246 int i;
247 struct strbuf_ctl *sb;
248
249 if (CPU_ISSUN4V)
250 return;
251
252 IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb);
253
254 /* Enable IOMMU in diagnostic mode */
255 IOMMUREG_WRITE(is, iommu_cr, is->is_cr|IOMMUCR_DE);
256
257 for (i = 0; i < 2; i++) {
258 if ((sb = is->is_sb[i])) {
259
260 /* Enable diagnostics mode? */
261 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb,
262 STRBUFREG(strbuf_ctl), STRBUF_EN);
263
264 membar_Lookaside();
265
266 /* No streaming buffers? Disable them */
267 if (bus_space_read_8(is->is_bustag,
268 is->is_sb[i]->sb_sb,
269 STRBUFREG(strbuf_ctl)) == 0) {
270 is->is_sb[i]->sb_flush = NULL;
271 } else {
272
273 /*
274 * locate the pa of the flush buffer.
275 */
276 if (pmap_extract(pmap_kernel(),
277 (vaddr_t)is->is_sb[i]->sb_flush,
278 &is->is_sb[i]->sb_flushpa) == FALSE)
279 is->is_sb[i]->sb_flush = NULL;
280 }
281 }
282 }
283
284 if (is->is_flags & IOMMU_FLUSH_CACHE)
285 IOMMUREG_WRITE(is, iommu_cache_invalidate, -1ULL);
286 }
287
288 /*
289 * Here are the iommu control routines.
290 */
291
292 void
293 iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags)
294 {
295 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx flags %x\n",
296 va, (long)pa, flags));
297 if (!CPU_ISSUN4V)
298 iommu_enter_sun4u(sb, va, pa, flags);
299 else
300 iommu_enter_sun4v(sb, va, pa, flags);
301 }
302
303
304 void
305 iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags)
306 {
307 struct iommu_state *is = sb->sb_is;
308 int strbuf = (flags & BUS_DMA_STREAMING);
309 int64_t tte;
310
311 #ifdef DIAGNOSTIC
312 if (va < is->is_dvmabase || va > is->is_dvmaend)
313 panic("iommu_enter: va %#lx not in DVMA space", va);
314 #endif
315
316 /* Is the streamcache flush really needed? */
317 if (sb->sb_flush)
318 iommu_strbuf_flush(sb, va);
319 else
320 /* If we can't flush the strbuf don't enable it. */
321 strbuf = 0;
322
323 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
324 !(flags & BUS_DMA_NOCACHE), (strbuf));
325 #ifdef DEBUG
326 tte |= (flags & 0xff000LL)<<(4*8);
327 #endif
328
329 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
330 bus_space_write_8(is->is_bustag, is->is_iommu,
331 IOMMUREG(iommu_flush), va);
332 DPRINTF(IDB_IOMMU, ("iommu_enter: slot %d va %lx pa %lx "
333 "TSB[%lx]@%p=%lx\n", (int)IOTSBSLOT(va,is->is_tsbsize),
334 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
335 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
336 (u_long)tte));
337 }
338
339 void
340 iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags)
341 {
342 struct iommu_state *is = sb->sb_is;
343 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize);
344 paddr_t page_list[1], addr;
345 u_int64_t attr, nmapped;
346 int err;
347
348 #ifdef DIAGNOSTIC
349 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend)
350 panic("viommu_enter: va %#lx not in DVMA space", va);
351 #endif
352
353 attr = PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE;
354 if (flags & BUS_DMA_READ)
355 attr &= ~PCI_MAP_ATTR_READ;
356 if (flags & BUS_DMA_WRITE)
357 attr &= ~PCI_MAP_ATTR_WRITE;
358
359 page_list[0] = trunc_page(pa);
360 if (!pmap_extract(pmap_kernel(), (vaddr_t)page_list, &addr))
361 panic("viommu_enter: pmap_extract failed");
362 err = hv_pci_iommu_map(is->is_devhandle, tsbid, 1, attr,
363 addr, &nmapped);
364 if (err != H_EOK || nmapped != 1)
365 panic("hv_pci_iommu_map: err=%d, nmapped=%lu", err, (long unsigned int)nmapped);
366 }
367
368 /*
369 * Find the value of a DVMA address (debug routine).
370 */
371 paddr_t
372 iommu_extract(struct iommu_state *is, vaddr_t dva)
373 {
374 int64_t tte = 0;
375
376 if (dva >= is->is_dvmabase && dva <= is->is_dvmaend)
377 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)];
378
379 if ((tte & IOTTE_V) == 0)
380 return ((paddr_t)-1L);
381 return (tte & IOTTE_PAMASK);
382 }
383
384 /*
385 * iommu_remove: removes mappings created by iommu_enter
386 *
387 * Only demap from IOMMU if flag is set.
388 *
389 * XXX: this function needs better internal error checking.
390 */
391
392
393 void
394 iommu_remove(struct iommu_state *is, vaddr_t va, size_t len)
395 {
396 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx len %zu\n", va, len));
397 if (!CPU_ISSUN4V)
398 iommu_remove_sun4u(is, va, len);
399 else
400 iommu_remove_sun4v(is, va, len);
401 }
402
403 void
404 iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len)
405 {
406
407 int slot;
408
409 #ifdef DIAGNOSTIC
410 if (va < is->is_dvmabase || va > is->is_dvmaend)
411 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
412 if ((long)(va + len) < (long)va)
413 panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
414 (long) va, (long) len);
415 if (len & ~0xfffffff)
416 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len);
417 #endif
418
419 va = trunc_page(va);
420 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
421 va, (u_long)IOTSBSLOT(va, is->is_tsbsize),
422 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)]));
423 while (len > 0) {
424 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d "
425 "for va %p size %lx\n",
426 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va,
427 (u_long)len));
428 if (len <= PAGE_SIZE)
429 len = 0;
430 else
431 len -= PAGE_SIZE;
432
433 #if 0
434 /*
435 * XXX Zero-ing the entry would not require RMW
436 *
437 * Disabling valid bit while a page is used by a device
438 * causes an uncorrectable DMA error.
439 * Workaround to avoid an uncorrectable DMA error is
440 * eliminating the next line, but the page is mapped
441 * until the next iommu_enter call.
442 */
443 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V;
444 membar_StoreStore();
445 #endif
446 IOMMUREG_WRITE(is, iommu_flush, va);
447
448 /* Flush cache if necessary. */
449 slot = IOTSBSLOT(trunc_page(va), is->is_tsbsize);
450 if ((is->is_flags & IOMMU_FLUSH_CACHE) &&
451 (len == 0 || (slot % 8) == 7))
452 IOMMUREG_WRITE(is, iommu_cache_flush,
453 is->is_ptsb + slot * 8);
454
455 va += PAGE_SIZE;
456 }
457 }
458
459 void
460 iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len)
461 {
462 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize);
463 u_int64_t ndemapped;
464 int err;
465
466 #ifdef DIAGNOSTIC
467 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend)
468 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
469 if (va != trunc_page(va)) {
470 printf("iommu_remove: unaligned va: %lx\n", va);
471 va = trunc_page(va);
472 }
473 #endif
474
475 err = hv_pci_iommu_demap(is->is_devhandle, tsbid, 1, &ndemapped);
476 if (err != H_EOK || ndemapped != 1)
477 panic("hv_pci_iommu_unmap: err=%d", err);
478 }
479
480 static int
481 iommu_strbuf_flush_done(struct strbuf_ctl *sb)
482 {
483 struct iommu_state *is = sb->sb_is;
484 struct timeval cur, flushtimeout;
485
486 #define BUMPTIME(t, usec) { \
487 register volatile struct timeval *tp = (t); \
488 register long us; \
489 \
490 tp->tv_usec = us = tp->tv_usec + (usec); \
491 if (us >= 1000000) { \
492 tp->tv_usec = us - 1000000; \
493 tp->tv_sec++; \
494 } \
495 }
496
497 if (!sb->sb_flush)
498 return (0);
499
500 /*
501 * Streaming buffer flushes:
502 *
503 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If
504 * we're not on a cache line boundary (64-bits):
505 * 2 Store 0 in flag
506 * 3 Store pointer to flag in flushsync
507 * 4 wait till flushsync becomes 0x1
508 *
509 * If it takes more than .5 sec, something
510 * went wrong.
511 */
512
513 *sb->sb_flush = 0;
514 bus_space_write_8(is->is_bustag, sb->sb_sb,
515 STRBUFREG(strbuf_flushsync), sb->sb_flushpa);
516
517 microtime(&flushtimeout);
518 cur = flushtimeout;
519 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
520
521 DPRINTF(IDB_IOMMU, ("%s: flush = %lx at va = %lx pa = %lx now="
522 "%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n", __func__,
523 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa,
524 cur.tv_sec, cur.tv_usec,
525 flushtimeout.tv_sec, flushtimeout.tv_usec));
526
527 /* Bypass non-coherent D$ */
528 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) &&
529 timercmp(&cur, &flushtimeout, <=))
530 microtime(&cur);
531
532 #ifdef DIAGNOSTIC
533 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) {
534 printf("%s: flush timeout %p, at %p\n", __func__,
535 (void *)(u_long)*sb->sb_flush,
536 (void *)(u_long)sb->sb_flushpa); /* panic? */
537 #ifdef DDB
538 Debugger();
539 #endif
540 }
541 #endif
542 DPRINTF(IDB_IOMMU, ("%s: flushed\n", __func__));
543 return (*sb->sb_flush);
544 }
545
546 /*
547 * IOMMU DVMA operations, common to SBUS and PCI.
548 */
549 int
550 iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
551 bus_size_t buflen, struct proc *p, int flags)
552 {
553 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
554 struct iommu_state *is = sb->sb_is;
555 int err, needsflush;
556 bus_size_t sgsize;
557 paddr_t curaddr;
558 u_long dvmaddr, sgstart, sgend, bmask;
559 bus_size_t align, boundary, len;
560 vaddr_t vaddr = (vaddr_t)buf;
561 int seg;
562 struct pmap *pmap;
563 int slot;
564
565 if (map->dm_nsegs) {
566 /* Already in use?? */
567 #ifdef DIAGNOSTIC
568 printf("iommu_dvmamap_load: map still in use\n");
569 #endif
570 bus_dmamap_unload(t, map);
571 }
572
573 /*
574 * Make sure that on error condition we return "no valid mappings".
575 */
576 map->dm_nsegs = 0;
577 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
578
579 if (buflen > map->_dm_size) {
580 DPRINTF(IDB_BUSDMA,
581 ("iommu_dvmamap_load(): error %d > %d -- "
582 "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
583 return (EINVAL);
584 }
585
586 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
587
588 /*
589 * A boundary presented to bus_dmamem_alloc() takes precedence
590 * over boundary in the map.
591 */
592 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
593 boundary = map->_dm_boundary;
594 align = max(map->dm_segs[0]._ds_align, PAGE_SIZE);
595
596 /*
597 * If our segment size is larger than the boundary we need to
598 * split the transfer up int little pieces ourselves.
599 */
600 KASSERT(is->is_dvmamap);
601 mutex_enter(&is->is_lock);
602 err = extent_alloc(is->is_dvmamap, sgsize, align,
603 (sgsize > boundary) ? 0 : boundary,
604 EX_NOWAIT|EX_BOUNDZERO, &dvmaddr);
605 mutex_exit(&is->is_lock);
606
607 #ifdef DEBUG
608 if (err || (dvmaddr == (u_long)-1)) {
609 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
610 (int)sgsize, flags);
611 #ifdef DDB
612 Debugger();
613 #endif
614 }
615 #endif
616 if (err != 0)
617 return (err);
618
619 if (dvmaddr == (u_long)-1)
620 return (ENOMEM);
621
622 /* Set the active DVMA map */
623 map->_dm_dvmastart = dvmaddr;
624 map->_dm_dvmasize = sgsize;
625
626 /*
627 * Now split the DVMA range into segments, not crossing
628 * the boundary.
629 */
630 seg = 0;
631 sgstart = dvmaddr + (vaddr & PGOFSET);
632 sgend = sgstart + buflen - 1;
633 map->dm_segs[seg].ds_addr = sgstart;
634 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx "
635 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1),
636 (long)~(boundary - 1)));
637 bmask = ~(boundary - 1);
638 while ((sgstart & bmask) != (sgend & bmask) ||
639 sgend - sgstart + 1 > map->dm_maxsegsz) {
640 /* Oops. We crossed a boundary or large seg. Split the xfer. */
641 len = map->dm_maxsegsz;
642 if ((sgstart & bmask) != (sgend & bmask))
643 len = min(len, boundary - (sgstart & (boundary - 1)));
644 map->dm_segs[seg].ds_len = len;
645 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
646 "seg %d start %lx size %lx\n", seg,
647 (long)map->dm_segs[seg].ds_addr,
648 (long)map->dm_segs[seg].ds_len));
649 if (++seg >= map->_dm_segcnt) {
650 /* Too many segments. Fail the operation. */
651 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
652 "too many segments %d\n", seg));
653 mutex_enter(&is->is_lock);
654 err = extent_free(is->is_dvmamap,
655 dvmaddr, sgsize, EX_NOWAIT);
656 map->_dm_dvmastart = 0;
657 map->_dm_dvmasize = 0;
658 mutex_exit(&is->is_lock);
659 if (err != 0)
660 printf("warning: %s: %" PRId64
661 " of DVMA space lost\n", __func__, sgsize);
662 return (EFBIG);
663 }
664 sgstart += len;
665 map->dm_segs[seg].ds_addr = sgstart;
666 }
667 map->dm_segs[seg].ds_len = sgend - sgstart + 1;
668 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
669 "seg %d start %lx size %lx\n", seg,
670 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len));
671 map->dm_nsegs = seg + 1;
672 map->dm_mapsize = buflen;
673
674 if (p != NULL)
675 pmap = p->p_vmspace->vm_map.pmap;
676 else
677 pmap = pmap_kernel();
678
679 needsflush = 0;
680 for (; buflen > 0; ) {
681
682 /*
683 * Get the physical address for this page.
684 */
685 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
686 #ifdef DIAGNOSTIC
687 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr);
688 #endif
689 bus_dmamap_unload(t, map);
690 return (-1);
691 }
692
693 /*
694 * Compute the segment size, and adjust counts.
695 */
696 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
697 if (buflen < sgsize)
698 sgsize = buflen;
699
700 DPRINTF(IDB_BUSDMA,
701 ("iommu_dvmamap_load: map %p loading va %p "
702 "dva %lx at pa %lx\n",
703 map, (void *)vaddr, (long)dvmaddr,
704 (long)trunc_page(curaddr)));
705 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr),
706 flags | IOTTE_DEBUG(0x4000));
707 needsflush = 1;
708
709 vaddr += sgsize;
710 buflen -= sgsize;
711
712 /* Flush cache if necessary. */
713 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize);
714 if ((is->is_flags & IOMMU_FLUSH_CACHE) &&
715 (buflen <= 0 || (slot % 8) == 7))
716 IOMMUREG_WRITE(is, iommu_cache_flush,
717 is->is_ptsb + slot * 8);
718
719 dvmaddr += PAGE_SIZE;
720 }
721 if (needsflush)
722 iommu_strbuf_flush_done(sb);
723 #ifdef DIAGNOSTIC
724 for (seg = 0; seg < map->dm_nsegs; seg++) {
725 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
726 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
727 printf("seg %d dvmaddr %lx out of range %x - %x\n",
728 seg, (long)map->dm_segs[seg].ds_addr,
729 is->is_dvmabase, is->is_dvmaend);
730 #ifdef DDB
731 Debugger();
732 #endif
733 }
734 }
735 #endif
736 return (0);
737 }
738
739
740 void
741 iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
742 {
743 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
744 struct iommu_state *is = sb->sb_is;
745 int error;
746 bus_size_t sgsize = map->_dm_dvmasize;
747
748 /* Flush the iommu */
749 #ifdef DEBUG
750 if (!map->_dm_dvmastart) {
751 printf("iommu_dvmamap_unload: No dvmastart is zero\n");
752 #ifdef DDB
753 Debugger();
754 #endif
755 }
756 #endif
757 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
758
759 /* Flush the caches */
760 bus_dmamap_unload(t->_parent, map);
761
762 mutex_enter(&is->is_lock);
763 error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
764 map->_dm_dvmasize, EX_NOWAIT);
765 map->_dm_dvmastart = 0;
766 map->_dm_dvmasize = 0;
767 mutex_exit(&is->is_lock);
768 if (error != 0)
769 printf("warning: %s: %" PRId64 " of DVMA space lost\n",
770 __func__, sgsize);
771
772 /* Clear the map */
773 }
774
775
776 int
777 iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
778 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
779 {
780 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
781 struct iommu_state *is = sb->sb_is;
782 struct vm_page *pg;
783 int i, j;
784 int left;
785 int err, needsflush;
786 bus_size_t sgsize;
787 paddr_t pa;
788 bus_size_t boundary, align;
789 u_long dvmaddr, sgstart, sgend, bmask;
790 struct pglist *pglist;
791 const int pagesz = PAGE_SIZE;
792 int slot;
793 #ifdef DEBUG
794 int npg = 0;
795 #endif
796
797 if (map->dm_nsegs) {
798 /* Already in use?? */
799 #ifdef DIAGNOSTIC
800 printf("iommu_dvmamap_load_raw: map still in use\n");
801 #endif
802 bus_dmamap_unload(t, map);
803 }
804
805 /*
806 * A boundary presented to bus_dmamem_alloc() takes precedence
807 * over boundary in the map.
808 */
809 if ((boundary = segs[0]._ds_boundary) == 0)
810 boundary = map->_dm_boundary;
811
812 align = max(segs[0]._ds_align, pagesz);
813
814 /*
815 * Make sure that on error condition we return "no valid mappings".
816 */
817 map->dm_nsegs = 0;
818 /* Count up the total number of pages we need */
819 pa = trunc_page(segs[0].ds_addr);
820 sgsize = 0;
821 left = size;
822 for (i = 0; left > 0 && i < nsegs; i++) {
823 if (round_page(pa) != round_page(segs[i].ds_addr))
824 sgsize = round_page(sgsize) +
825 (segs[i].ds_addr & PGOFSET);
826 sgsize += min(left, segs[i].ds_len);
827 left -= segs[i].ds_len;
828 pa = segs[i].ds_addr + segs[i].ds_len;
829 }
830 sgsize = round_page(sgsize);
831
832 mutex_enter(&is->is_lock);
833 /*
834 * If our segment size is larger than the boundary we need to
835 * split the transfer up into little pieces ourselves.
836 */
837 err = extent_alloc(is->is_dvmamap, sgsize, align,
838 (sgsize > boundary) ? 0 : boundary,
839 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
840 EX_BOUNDZERO, &dvmaddr);
841 mutex_exit(&is->is_lock);
842
843 if (err != 0)
844 return (err);
845
846 #ifdef DEBUG
847 if (dvmaddr == (u_long)-1)
848 {
849 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n",
850 (int)sgsize, flags);
851 #ifdef DDB
852 Debugger();
853 #endif
854 }
855 #endif
856 if (dvmaddr == (u_long)-1)
857 return (ENOMEM);
858
859 /* Set the active DVMA map */
860 map->_dm_dvmastart = dvmaddr;
861 map->_dm_dvmasize = sgsize;
862
863 bmask = ~(boundary - 1);
864 if ((pglist = segs[0]._ds_mlist) == NULL) {
865 u_long prev_va = 0UL, last_va = dvmaddr;
866 paddr_t prev_pa = 0;
867 int end = 0, offset;
868 bus_size_t len = size;
869
870 /*
871 * This segs is made up of individual physical
872 * segments, probably by _bus_dmamap_load_uio() or
873 * _bus_dmamap_load_mbuf(). Ignore the mlist and
874 * load each one individually.
875 */
876 j = 0;
877 needsflush = 0;
878 for (i = 0; i < nsegs ; i++) {
879
880 pa = segs[i].ds_addr;
881 offset = (pa & PGOFSET);
882 pa = trunc_page(pa);
883 dvmaddr = trunc_page(dvmaddr);
884 left = min(len, segs[i].ds_len);
885
886 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting "
887 "physseg %d start %lx size %lx\n", i,
888 (long)segs[i].ds_addr, (long)segs[i].ds_len));
889
890 if ((pa == prev_pa) &&
891 ((offset != 0) || (end != offset))) {
892 /* We can re-use this mapping */
893 dvmaddr = prev_va;
894 }
895
896 sgstart = dvmaddr + offset;
897 sgend = sgstart + left - 1;
898
899 /* Are the segments virtually adjacent? */
900 if ((j > 0) && (end == offset) &&
901 ((offset == 0) || (pa == prev_pa)) &&
902 (map->dm_segs[j-1].ds_len + left <=
903 map->dm_maxsegsz)) {
904 /* Just append to the previous segment. */
905 map->dm_segs[--j].ds_len += left;
906 /* Restore sgstart for boundary check */
907 sgstart = map->dm_segs[j].ds_addr;
908 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
909 "appending seg %d start %lx size %lx\n", j,
910 (long)map->dm_segs[j].ds_addr,
911 (long)map->dm_segs[j].ds_len));
912 } else {
913 if (j >= map->_dm_segcnt) {
914 iommu_remove(is, map->_dm_dvmastart,
915 last_va - map->_dm_dvmastart);
916 goto fail;
917 }
918 map->dm_segs[j].ds_addr = sgstart;
919 map->dm_segs[j].ds_len = left;
920 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
921 "seg %d start %lx size %lx\n", j,
922 (long)map->dm_segs[j].ds_addr,
923 (long)map->dm_segs[j].ds_len));
924 }
925 end = (offset + left) & PGOFSET;
926
927 /* Check for boundary issues */
928 while ((sgstart & bmask) != (sgend & bmask)) {
929 /* Need a new segment. */
930 map->dm_segs[j].ds_len =
931 boundary - (sgstart & (boundary - 1));
932 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
933 "seg %d start %lx size %lx\n", j,
934 (long)map->dm_segs[j].ds_addr,
935 (long)map->dm_segs[j].ds_len));
936 if (++j >= map->_dm_segcnt) {
937 iommu_remove(is, map->_dm_dvmastart,
938 last_va - map->_dm_dvmastart);
939 goto fail;
940 }
941 sgstart += map->dm_segs[j-1].ds_len;
942 map->dm_segs[j].ds_addr = sgstart;
943 map->dm_segs[j].ds_len = sgend - sgstart + 1;
944 }
945
946 if (sgsize == 0)
947 panic("iommu_dmamap_load_raw: size botch");
948
949 /* Now map a series of pages. */
950 while (dvmaddr <= sgend) {
951 DPRINTF(IDB_BUSDMA,
952 ("iommu_dvmamap_load_raw: map %p "
953 "loading va %lx at pa %lx\n",
954 map, (long)dvmaddr,
955 (long)(pa)));
956 /* Enter it if we haven't before. */
957 if (prev_va != dvmaddr) {
958 iommu_enter(sb, prev_va = dvmaddr,
959 prev_pa = pa,
960 flags | IOTTE_DEBUG(++npg << 12));
961 needsflush = 1;
962
963 /* Flush cache if necessary. */
964 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize);
965 if ((is->is_flags & IOMMU_FLUSH_CACHE) &&
966 ((dvmaddr + pagesz) > sgend || (slot % 8) == 7))
967 IOMMUREG_WRITE(is, iommu_cache_flush,
968 is->is_ptsb + slot * 8);
969 }
970
971 dvmaddr += pagesz;
972 pa += pagesz;
973 last_va = dvmaddr;
974 }
975
976 len -= left;
977 ++j;
978 }
979 if (needsflush)
980 iommu_strbuf_flush_done(sb);
981
982 map->dm_mapsize = size;
983 map->dm_nsegs = j;
984 #ifdef DIAGNOSTIC
985 { int seg;
986 for (seg = 0; seg < map->dm_nsegs; seg++) {
987 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
988 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
989 printf("seg %d dvmaddr %lx out of range %x - %x\n",
990 seg, (long)map->dm_segs[seg].ds_addr,
991 is->is_dvmabase, is->is_dvmaend);
992 #ifdef DDB
993 Debugger();
994 #endif
995 }
996 }
997 }
998 #endif
999 return (0);
1000 }
1001
1002 /*
1003 * This was allocated with bus_dmamem_alloc.
1004 * The pages are on a `pglist'.
1005 */
1006 i = 0;
1007 sgstart = dvmaddr;
1008 sgend = sgstart + size - 1;
1009 map->dm_segs[i].ds_addr = sgstart;
1010 while ((sgstart & bmask) != (sgend & bmask)) {
1011 /* Oops. We crossed a boundary. Split the xfer. */
1012 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1));
1013 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
1014 "seg %d start %lx size %lx\n", i,
1015 (long)map->dm_segs[i].ds_addr,
1016 (long)map->dm_segs[i].ds_len));
1017 if (++i >= map->_dm_segcnt) {
1018 /* Too many segments. Fail the operation. */
1019 goto fail;
1020 }
1021 sgstart += map->dm_segs[i-1].ds_len;
1022 map->dm_segs[i].ds_addr = sgstart;
1023 }
1024 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
1025 "seg %d start %lx size %lx\n", i,
1026 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len));
1027 map->dm_segs[i].ds_len = sgend - sgstart + 1;
1028
1029 needsflush = 0;
1030 TAILQ_FOREACH(pg, pglist, pageq.queue) {
1031 if (sgsize == 0)
1032 panic("iommu_dmamap_load_raw: size botch");
1033 pa = VM_PAGE_TO_PHYS(pg);
1034
1035 DPRINTF(IDB_BUSDMA,
1036 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
1037 map, (long)dvmaddr, (long)(pa)));
1038 iommu_enter(sb, dvmaddr, pa, flags | IOTTE_DEBUG(0x8000));
1039 needsflush = 1;
1040
1041 sgsize -= pagesz;
1042
1043 /* Flush cache if necessary. */
1044 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize);
1045 if ((is->is_flags & IOMMU_FLUSH_CACHE) &&
1046 (sgsize == 0 || (slot % 8) == 7))
1047 IOMMUREG_WRITE(is, iommu_cache_flush,
1048 is->is_ptsb + slot * 8);
1049
1050 dvmaddr += pagesz;
1051 }
1052 if (needsflush)
1053 iommu_strbuf_flush_done(sb);
1054 map->dm_mapsize = size;
1055 map->dm_nsegs = i+1;
1056 #ifdef DIAGNOSTIC
1057 { int seg;
1058 for (seg = 0; seg < map->dm_nsegs; seg++) {
1059 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
1060 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
1061 printf("seg %d dvmaddr %lx out of range %x - %x\n",
1062 seg, (long)map->dm_segs[seg].ds_addr,
1063 is->is_dvmabase, is->is_dvmaend);
1064 #ifdef DDB
1065 Debugger();
1066 #endif
1067 }
1068 }
1069 }
1070 #endif
1071 return (0);
1072
1073 fail:
1074 mutex_enter(&is->is_lock);
1075 err = extent_free(is->is_dvmamap, map->_dm_dvmastart, sgsize,
1076 EX_NOWAIT);
1077 map->_dm_dvmastart = 0;
1078 map->_dm_dvmasize = 0;
1079 mutex_exit(&is->is_lock);
1080 if (err != 0)
1081 printf("warning: %s: %" PRId64 " of DVMA space lost\n",
1082 __func__, sgsize);
1083 return (EFBIG);
1084 }
1085
1086
1087 /*
1088 * Flush an individual dma segment, returns non-zero if the streaming buffers
1089 * need flushing afterwards.
1090 */
1091 static int
1092 iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len)
1093 {
1094 vaddr_t vaend;
1095 struct iommu_state *is = sb->sb_is;
1096
1097 #ifdef DIAGNOSTIC
1098 if (va < is->is_dvmabase || va > is->is_dvmaend)
1099 panic("invalid va: %llx", (long long)va);
1100 #endif
1101
1102 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) {
1103 DPRINTF(IDB_SYNC,
1104 ("iommu_dvmamap_sync_range: attempting to flush "
1105 "non-streaming entry\n"));
1106 return (0);
1107 }
1108
1109 vaend = round_page(va + len) - 1;
1110 va = trunc_page(va);
1111
1112 #ifdef DIAGNOSTIC
1113 if (va < is->is_dvmabase || vaend > is->is_dvmaend)
1114 panic("invalid va range: %llx to %llx (%x to %x)",
1115 (long long)va, (long long)vaend,
1116 is->is_dvmabase,
1117 is->is_dvmaend);
1118 #endif
1119
1120 for ( ; va <= vaend; va += PAGE_SIZE) {
1121 DPRINTF(IDB_SYNC,
1122 ("iommu_dvmamap_sync_range: flushing va %p\n",
1123 (void *)(u_long)va));
1124 iommu_strbuf_flush(sb, va);
1125 }
1126
1127 return (1);
1128 }
1129
1130 static void
1131 _iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1132 bus_size_t len, int ops)
1133 {
1134 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
1135 bus_size_t count;
1136 int i, needsflush = 0;
1137
1138 if (!sb->sb_flush)
1139 return;
1140
1141 for (i = 0; i < map->dm_nsegs; i++) {
1142 if (offset < map->dm_segs[i].ds_len)
1143 break;
1144 offset -= map->dm_segs[i].ds_len;
1145 }
1146
1147 if (i == map->dm_nsegs)
1148 panic("%s: segment too short %llu", __func__,
1149 (unsigned long long)offset);
1150
1151 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) {
1152 /* Nothing to do */;
1153 }
1154
1155 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) {
1156
1157 for (; len > 0 && i < map->dm_nsegs; i++) {
1158 count = MIN(map->dm_segs[i].ds_len - offset, len);
1159 if (count > 0 &&
1160 iommu_dvmamap_sync_range(sb,
1161 map->dm_segs[i].ds_addr + offset, count))
1162 needsflush = 1;
1163 offset = 0;
1164 len -= count;
1165 }
1166 #ifdef DIAGNOSTIC
1167 if (i == map->dm_nsegs && len > 0)
1168 panic("%s: leftover %llu", __func__,
1169 (unsigned long long)len);
1170 #endif
1171
1172 if (needsflush)
1173 iommu_strbuf_flush_done(sb);
1174 }
1175 }
1176
1177 void
1178 iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1179 bus_size_t len, int ops)
1180 {
1181
1182 /* If len is 0, then there is nothing to do */
1183 if (len == 0)
1184 return;
1185
1186 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
1187 /* Flush the CPU then the IOMMU */
1188 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1189 _iommu_dvmamap_sync(t, map, offset, len, ops);
1190 }
1191 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) {
1192 /* Flush the IOMMU then the CPU */
1193 _iommu_dvmamap_sync(t, map, offset, len, ops);
1194 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1195 }
1196 }
1197
1198 int
1199 iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1200 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1201 int flags)
1202 {
1203
1204 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx "
1205 "segp %p flags %d\n", (unsigned long long)size,
1206 (unsigned long long)alignment, (unsigned long long)boundary,
1207 segs, flags));
1208 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary,
1209 segs, nsegs, rsegs, flags|BUS_DMA_DVMA));
1210 }
1211
1212 void
1213 iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1214 {
1215
1216 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n",
1217 segs, nsegs));
1218 bus_dmamem_free(t->_parent, segs, nsegs);
1219 }
1220
1221 /*
1222 * Map the DVMA mappings into the kernel pmap.
1223 * Check the flags to see whether we're streaming or coherent.
1224 */
1225 int
1226 iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1227 size_t size, void **kvap, int flags)
1228 {
1229 struct vm_page *pg;
1230 vaddr_t va;
1231 bus_addr_t addr;
1232 struct pglist *pglist;
1233 int cbit;
1234 const uvm_flag_t kmflags =
1235 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1236
1237 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
1238 segs, nsegs, size));
1239
1240 /*
1241 * Allocate some space in the kernel map, and then map these pages
1242 * into this space.
1243 */
1244 size = round_page(size);
1245 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1246 if (va == 0)
1247 return (ENOMEM);
1248
1249 *kvap = (void *)va;
1250
1251 /*
1252 * digest flags:
1253 */
1254 cbit = 0;
1255 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1256 cbit |= PMAP_NVC;
1257 if (flags & BUS_DMA_NOCACHE) /* side effects */
1258 cbit |= PMAP_NC;
1259
1260 /*
1261 * Now take this and map it into the CPU.
1262 */
1263 pglist = segs[0]._ds_mlist;
1264 TAILQ_FOREACH(pg, pglist, pageq.queue) {
1265 #ifdef DIAGNOSTIC
1266 if (size == 0)
1267 panic("iommu_dvmamem_map: size botch");
1268 #endif
1269 addr = VM_PAGE_TO_PHYS(pg);
1270 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
1271 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit));
1272 pmap_kenter_pa(va, addr | cbit,
1273 VM_PROT_READ | VM_PROT_WRITE, 0);
1274 va += PAGE_SIZE;
1275 size -= PAGE_SIZE;
1276 }
1277 pmap_update(pmap_kernel());
1278 return (0);
1279 }
1280
1281 /*
1282 * Unmap DVMA mappings from kernel
1283 */
1284 void
1285 iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1286 {
1287
1288 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
1289 kva, size));
1290
1291 #ifdef DIAGNOSTIC
1292 if ((u_long)kva & PGOFSET)
1293 panic("iommu_dvmamem_unmap");
1294 #endif
1295
1296 size = round_page(size);
1297 pmap_kremove((vaddr_t)kva, size);
1298 pmap_update(pmap_kernel());
1299 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1300 }
1301