iommu.c revision 1.89 1 /* $NetBSD: iommu.c,v 1.89 2009/12/05 16:48:26 jdc Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 2001, 2002 Eduardo Horvath
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The name of the author may not be used to endorse or promote products
42 * derived from this software without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57 /*
58 * UltraSPARC IOMMU support; used by both the sbus and pci code.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.89 2009/12/05 16:48:26 jdc Exp $");
63
64 #include "opt_ddb.h"
65
66 #include <sys/param.h>
67 #include <sys/extent.h>
68 #include <sys/malloc.h>
69 #include <sys/systm.h>
70 #include <sys/device.h>
71 #include <sys/proc.h>
72
73 #include <uvm/uvm_extern.h>
74
75 #include <machine/bus.h>
76 #include <sparc64/dev/iommureg.h>
77 #include <sparc64/dev/iommuvar.h>
78
79 #include <machine/autoconf.h>
80 #include <machine/cpu.h>
81
82 #ifdef DEBUG
83 #define IDB_BUSDMA 0x1
84 #define IDB_IOMMU 0x2
85 #define IDB_INFO 0x4
86 #define IDB_SYNC 0x8
87 int iommudebug = 0x0;
88 #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0)
89 #else
90 #define DPRINTF(l, s)
91 #endif
92
93 #define iommu_strbuf_flush(i, v) do { \
94 if ((i)->sb_flush) \
95 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \
96 STRBUFREG(strbuf_pgflush), (v)); \
97 } while (0)
98
99 static int iommu_strbuf_flush_done(struct strbuf_ctl *);
100 static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
101 bus_size_t, int);
102
103 /*
104 * initialise the UltraSPARC IOMMU (SBUS or PCI):
105 * - allocate and setup the iotsb.
106 * - enable the IOMMU
107 * - initialise the streaming buffers (if they exist)
108 * - create a private DVMA map.
109 */
110 void
111 iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase)
112 {
113 psize_t size;
114 vaddr_t va;
115 paddr_t pa;
116 struct vm_page *pg;
117 struct pglist pglist;
118
119 /*
120 * Setup the iommu.
121 *
122 * The sun4u iommu is part of the SBUS or PCI controller so we will
123 * deal with it here..
124 *
125 * For sysio and psycho/psycho+ the IOMMU address space always ends at
126 * 0xffffe000, but the starting address depends on the size of the
127 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each
128 * entry is 8 bytes. The start of the map can be calculated by
129 * (0xffffe000 << (8 + is->is_tsbsize)).
130 *
131 * But sabre and hummingbird use a different scheme that seems to
132 * be hard-wired, so we read the start and size from the PROM and
133 * just use those values.
134 */
135 is->is_cr = (tsbsize << 16) | IOMMUCR_EN;
136 is->is_tsbsize = tsbsize;
137 if (iovabase == -1) {
138 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
139 is->is_dvmaend = IOTSB_VEND;
140 } else {
141 is->is_dvmabase = iovabase;
142 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize);
143 }
144
145 /*
146 * Allocate memory for I/O pagetables. They need to be physically
147 * contiguous.
148 */
149
150 size = PAGE_SIZE << is->is_tsbsize;
151 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
152 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
153 panic("iommu_init: no memory");
154
155 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
156 if (va == 0)
157 panic("iommu_init: no memory");
158 is->is_tsb = (int64_t *)va;
159
160 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
161
162 /* Map the pages */
163 TAILQ_FOREACH(pg, &pglist, pageq.queue) {
164 pa = VM_PAGE_TO_PHYS(pg);
165 pmap_kenter_pa(va, pa | PMAP_NVC,
166 VM_PROT_READ | VM_PROT_WRITE, 0);
167 va += PAGE_SIZE;
168 }
169 pmap_update(pmap_kernel());
170 memset(is->is_tsb, 0, size);
171
172 #ifdef DEBUG
173 if (iommudebug & IDB_INFO)
174 {
175 /* Probe the iommu */
176
177 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n",
178 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
179 offsetof (struct iommureg, iommu_cr)),
180 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
181 offsetof (struct iommureg, iommu_tsb)),
182 (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
183 offsetof (struct iommureg, iommu_flush)));
184 printf("iommu cr=%llx tsb=%llx\n",
185 (unsigned long long)bus_space_read_8(is->is_bustag,
186 is->is_iommu,
187 offsetof (struct iommureg, iommu_cr)),
188 (unsigned long long)bus_space_read_8(is->is_bustag,
189 is->is_iommu,
190 offsetof (struct iommureg, iommu_tsb)));
191 printf("TSB base %p phys %llx\n", (void *)is->is_tsb,
192 (unsigned long long)is->is_ptsb);
193 delay(1000000); /* 1 s */
194 }
195 #endif
196
197 /*
198 * now actually start up the IOMMU
199 */
200 iommu_reset(is);
201
202 /*
203 * Now all the hardware's working we need to allocate a dvma map.
204 */
205 printf("DVMA map: %x to %x\n",
206 (unsigned int)is->is_dvmabase,
207 (unsigned int)is->is_dvmaend);
208 printf("IOTSB: %llx to %llx\n",
209 (unsigned long long)is->is_ptsb,
210 (unsigned long long)(is->is_ptsb + size));
211 is->is_dvmamap = extent_create(name,
212 is->is_dvmabase, is->is_dvmaend - PAGE_SIZE,
213 M_DEVBUF, 0, 0, EX_NOWAIT);
214 }
215
216 /*
217 * Streaming buffers don't exist on the UltraSPARC IIi; we should have
218 * detected that already and disabled them. If not, we will notice that
219 * they aren't there when the STRBUF_EN bit does not remain.
220 */
221 void
222 iommu_reset(struct iommu_state *is)
223 {
224 int i;
225 struct strbuf_ctl *sb;
226
227 /* Need to do 64-bit stores */
228 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_tsb),
229 is->is_ptsb);
230
231 /* Enable IOMMU in diagnostic mode */
232 bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_cr),
233 is->is_cr|IOMMUCR_DE);
234
235 for (i = 0; i < 2; i++) {
236 if ((sb = is->is_sb[i])) {
237
238 /* Enable diagnostics mode? */
239 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb,
240 STRBUFREG(strbuf_ctl), STRBUF_EN);
241
242 /* No streaming buffers? Disable them */
243 if (bus_space_read_8(is->is_bustag,
244 is->is_sb[i]->sb_sb,
245 STRBUFREG(strbuf_ctl)) == 0) {
246 is->is_sb[i]->sb_flush = NULL;
247 } else {
248
249 /*
250 * locate the pa of the flush buffer.
251 */
252 (void)pmap_extract(pmap_kernel(),
253 (vaddr_t)is->is_sb[i]->sb_flush,
254 &is->is_sb[i]->sb_flushpa);
255 }
256 }
257 }
258 }
259
260 /*
261 * Here are the iommu control routines.
262 */
263 void
264 iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags)
265 {
266 struct iommu_state *is = sb->sb_is;
267 int strbuf = (flags & BUS_DMA_STREAMING);
268 int64_t tte;
269
270 #ifdef DIAGNOSTIC
271 if (va < is->is_dvmabase || va > is->is_dvmaend)
272 panic("iommu_enter: va %#lx not in DVMA space", va);
273 #endif
274
275 /* Is the streamcache flush really needed? */
276 if (sb->sb_flush) {
277 iommu_strbuf_flush(sb, va);
278 iommu_strbuf_flush_done(sb);
279 } else
280 /* If we can't flush the strbuf don't enable it. */
281 strbuf = 0;
282
283 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
284 !(flags & BUS_DMA_NOCACHE), (strbuf));
285 #ifdef DEBUG
286 tte |= (flags & 0xff000LL)<<(4*8);
287 #endif
288
289 DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n",
290 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
291 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
292 bus_space_write_8(is->is_bustag, is->is_iommu,
293 IOMMUREG(iommu_flush), va);
294 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
295 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
296 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
297 (u_long)tte));
298 }
299
300 /*
301 * Find the value of a DVMA address (debug routine).
302 */
303 paddr_t
304 iommu_extract(struct iommu_state *is, vaddr_t dva)
305 {
306 int64_t tte = 0;
307
308 if (dva >= is->is_dvmabase && dva < is->is_dvmaend)
309 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)];
310
311 if ((tte & IOTTE_V) == 0)
312 return ((paddr_t)-1L);
313 return (tte & IOTTE_PAMASK);
314 }
315
316 /*
317 * iommu_remove: removes mappings created by iommu_enter
318 *
319 * Only demap from IOMMU if flag is set.
320 *
321 * XXX: this function needs better internal error checking.
322 */
323 void
324 iommu_remove(struct iommu_state *is, vaddr_t va, size_t len)
325 {
326
327 #ifdef DIAGNOSTIC
328 if (va < is->is_dvmabase || va > is->is_dvmaend)
329 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
330 if ((long)(va + len) < (long)va)
331 panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
332 (long) va, (long) len);
333 if (len & ~0xfffffff)
334 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len);
335 #endif
336
337 va = trunc_page(va);
338 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
339 va, (u_long)IOTSBSLOT(va, is->is_tsbsize),
340 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)]));
341 while (len > 0) {
342 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d "
343 "for va %p size %lx\n",
344 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va,
345 (u_long)len));
346 if (len <= PAGE_SIZE)
347 len = 0;
348 else
349 len -= PAGE_SIZE;
350
351 /* XXX Zero-ing the entry would not require RMW */
352 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V;
353 bus_space_write_8(is->is_bustag, is->is_iommu,
354 IOMMUREG(iommu_flush), va);
355 va += PAGE_SIZE;
356 }
357 }
358
359 static int
360 iommu_strbuf_flush_done(struct strbuf_ctl *sb)
361 {
362 struct iommu_state *is = sb->sb_is;
363 struct timeval cur, flushtimeout;
364
365 #define BUMPTIME(t, usec) { \
366 register volatile struct timeval *tp = (t); \
367 register long us; \
368 \
369 tp->tv_usec = us = tp->tv_usec + (usec); \
370 if (us >= 1000000) { \
371 tp->tv_usec = us - 1000000; \
372 tp->tv_sec++; \
373 } \
374 }
375
376 if (!sb->sb_flush)
377 return (0);
378
379 /*
380 * Streaming buffer flushes:
381 *
382 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If
383 * we're not on a cache line boundary (64-bits):
384 * 2 Store 0 in flag
385 * 3 Store pointer to flag in flushsync
386 * 4 wait till flushsync becomes 0x1
387 *
388 * If it takes more than .5 sec, something
389 * went wrong.
390 */
391
392 *sb->sb_flush = 0;
393 bus_space_write_8(is->is_bustag, sb->sb_sb,
394 STRBUFREG(strbuf_flushsync), sb->sb_flushpa);
395
396 microtime(&flushtimeout);
397 cur = flushtimeout;
398 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
399
400 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx "
401 "at va = %lx pa = %lx now=%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n",
402 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa,
403 cur.tv_sec, cur.tv_usec,
404 flushtimeout.tv_sec, flushtimeout.tv_usec));
405
406 /* Bypass non-coherent D$ */
407 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) &&
408 timercmp(&cur, &flushtimeout, <=))
409 microtime(&cur);
410
411 #ifdef DIAGNOSTIC
412 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) {
413 printf("iommu_strbuf_flush_done: flush timeout %p, at %p\n",
414 (void *)(u_long)*sb->sb_flush,
415 (void *)(u_long)sb->sb_flushpa); /* panic? */
416 #ifdef DDB
417 Debugger();
418 #endif
419 }
420 #endif
421 DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n"));
422 return (*sb->sb_flush);
423 }
424
425 /*
426 * IOMMU DVMA operations, common to SBUS and PCI.
427 */
428 int
429 iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
430 bus_size_t buflen, struct proc *p, int flags)
431 {
432 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
433 struct iommu_state *is = sb->sb_is;
434 int s;
435 int err;
436 bus_size_t sgsize;
437 paddr_t curaddr;
438 u_long dvmaddr, sgstart, sgend;
439 bus_size_t align, boundary, len;
440 vaddr_t vaddr = (vaddr_t)buf;
441 int seg;
442 struct pmap *pmap;
443
444 if (map->dm_nsegs) {
445 /* Already in use?? */
446 #ifdef DIAGNOSTIC
447 printf("iommu_dvmamap_load: map still in use\n");
448 #endif
449 bus_dmamap_unload(t, map);
450 }
451
452 /*
453 * Make sure that on error condition we return "no valid mappings".
454 */
455 map->dm_nsegs = 0;
456 if (buflen > map->_dm_size) {
457 DPRINTF(IDB_BUSDMA,
458 ("iommu_dvmamap_load(): error %d > %d -- "
459 "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
460 return (EINVAL);
461 }
462
463 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
464
465 /*
466 * A boundary presented to bus_dmamem_alloc() takes precedence
467 * over boundary in the map.
468 */
469 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
470 boundary = map->_dm_boundary;
471 align = max(map->dm_segs[0]._ds_align, PAGE_SIZE);
472
473 /*
474 * If our segment size is larger than the boundary we need to
475 * split the transfer up int little pieces ourselves.
476 */
477 s = splhigh();
478 err = extent_alloc(is->is_dvmamap, sgsize, align,
479 (sgsize > boundary) ? 0 : boundary,
480 EX_NOWAIT|EX_BOUNDZERO, &dvmaddr);
481 splx(s);
482
483 #ifdef DEBUG
484 if (err || (dvmaddr == (u_long)-1)) {
485 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
486 (int)sgsize, flags);
487 #ifdef DDB
488 Debugger();
489 #endif
490 }
491 #endif
492 if (err != 0)
493 return (err);
494
495 if (dvmaddr == (u_long)-1)
496 return (ENOMEM);
497
498 /* Set the active DVMA map */
499 map->_dm_dvmastart = dvmaddr;
500 map->_dm_dvmasize = sgsize;
501
502 /*
503 * Now split the DVMA range into segments, not crossing
504 * the boundary.
505 */
506 seg = 0;
507 sgstart = dvmaddr + (vaddr & PGOFSET);
508 sgend = sgstart + buflen - 1;
509 map->dm_segs[seg].ds_addr = sgstart;
510 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx "
511 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1),
512 (long)~(boundary - 1)));
513 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
514 /* Oops. We crossed a boundary. Split the xfer. */
515 len = boundary - (sgstart & (boundary - 1));
516 map->dm_segs[seg].ds_len = len;
517 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
518 "seg %d start %lx size %lx\n", seg,
519 (long)map->dm_segs[seg].ds_addr,
520 (long)map->dm_segs[seg].ds_len));
521 if (++seg >= map->_dm_segcnt) {
522 /* Too many segments. Fail the operation. */
523 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
524 "too many segments %d\n", seg));
525 s = splhigh();
526 /* How can this fail? And if it does what can we do? */
527 err = extent_free(is->is_dvmamap,
528 dvmaddr, sgsize, EX_NOWAIT);
529 map->_dm_dvmastart = 0;
530 map->_dm_dvmasize = 0;
531 splx(s);
532 return (EFBIG);
533 }
534 sgstart += len;
535 map->dm_segs[seg].ds_addr = sgstart;
536 }
537 map->dm_segs[seg].ds_len = sgend - sgstart + 1;
538 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
539 "seg %d start %lx size %lx\n", seg,
540 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len));
541 map->dm_nsegs = seg + 1;
542 map->dm_mapsize = buflen;
543
544 if (p != NULL)
545 pmap = p->p_vmspace->vm_map.pmap;
546 else
547 pmap = pmap_kernel();
548
549 for (; buflen > 0; ) {
550
551 /*
552 * Get the physical address for this page.
553 */
554 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
555 #ifdef DIAGNOSTIC
556 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr);
557 #endif
558 bus_dmamap_unload(t, map);
559 return (-1);
560 }
561
562 /*
563 * Compute the segment size, and adjust counts.
564 */
565 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
566 if (buflen < sgsize)
567 sgsize = buflen;
568
569 DPRINTF(IDB_BUSDMA,
570 ("iommu_dvmamap_load: map %p loading va %p "
571 "dva %lx at pa %lx\n",
572 map, (void *)vaddr, (long)dvmaddr,
573 (long)trunc_page(curaddr)));
574 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr),
575 flags|0x4000);
576
577 dvmaddr += PAGE_SIZE;
578 vaddr += sgsize;
579 buflen -= sgsize;
580 }
581 #ifdef DIAGNOSTIC
582 for (seg = 0; seg < map->dm_nsegs; seg++) {
583 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
584 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
585 printf("seg %d dvmaddr %lx out of range %x - %x\n",
586 seg, (long)map->dm_segs[seg].ds_addr,
587 is->is_dvmabase, is->is_dvmaend);
588 #ifdef DDB
589 Debugger();
590 #endif
591 }
592 }
593 #endif
594 return (0);
595 }
596
597
598 void
599 iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
600 {
601 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
602 struct iommu_state *is = sb->sb_is;
603 int error, s;
604 bus_size_t sgsize = map->_dm_dvmasize;
605
606 /* Flush the iommu */
607 #ifdef DEBUG
608 if (!map->_dm_dvmastart) {
609 printf("iommu_dvmamap_unload: No dvmastart is zero\n");
610 #ifdef DDB
611 Debugger();
612 #endif
613 }
614 #endif
615 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
616
617 /* Flush the caches */
618 bus_dmamap_unload(t->_parent, map);
619
620 /* Mark the mappings as invalid. */
621 map->dm_mapsize = 0;
622 map->dm_nsegs = 0;
623
624 s = splhigh();
625 error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
626 map->_dm_dvmasize, EX_NOWAIT);
627 map->_dm_dvmastart = 0;
628 map->_dm_dvmasize = 0;
629 splx(s);
630 if (error != 0)
631 printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
632
633 /* Clear the map */
634 }
635
636
637 int
638 iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
639 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
640 {
641 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
642 struct iommu_state *is = sb->sb_is;
643 struct vm_page *pg;
644 int i, j, s;
645 int left;
646 int err;
647 bus_size_t sgsize;
648 paddr_t pa;
649 bus_size_t boundary, align;
650 u_long dvmaddr, sgstart, sgend;
651 struct pglist *pglist;
652 int pagesz = PAGE_SIZE;
653 int npg = 0; /* DEBUG */
654
655 if (map->dm_nsegs) {
656 /* Already in use?? */
657 #ifdef DIAGNOSTIC
658 printf("iommu_dvmamap_load_raw: map still in use\n");
659 #endif
660 bus_dmamap_unload(t, map);
661 }
662
663 /*
664 * A boundary presented to bus_dmamem_alloc() takes precedence
665 * over boundary in the map.
666 */
667 if ((boundary = segs[0]._ds_boundary) == 0)
668 boundary = map->_dm_boundary;
669
670 align = max(segs[0]._ds_align, pagesz);
671
672 /*
673 * Make sure that on error condition we return "no valid mappings".
674 */
675 map->dm_nsegs = 0;
676 /* Count up the total number of pages we need */
677 pa = segs[0].ds_addr;
678 sgsize = 0;
679 left = size;
680 for (i = 0; left && i < nsegs; i++) {
681 if (round_page(pa) != round_page(segs[i].ds_addr))
682 sgsize = round_page(sgsize);
683 sgsize += min(left, segs[i].ds_len);
684 left -= segs[i].ds_len;
685 pa = segs[i].ds_addr + segs[i].ds_len;
686 }
687 sgsize = round_page(sgsize) + PAGE_SIZE; /* XXX reserve extra dvma page */
688
689 s = splhigh();
690 /*
691 * If our segment size is larger than the boundary we need to
692 * split the transfer up into little pieces ourselves.
693 */
694 err = extent_alloc(is->is_dvmamap, sgsize, align,
695 (sgsize > boundary) ? 0 : boundary,
696 ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
697 EX_BOUNDZERO, &dvmaddr);
698 splx(s);
699
700 if (err != 0)
701 return (err);
702
703 #ifdef DEBUG
704 if (dvmaddr == (u_long)-1)
705 {
706 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n",
707 (int)sgsize, flags);
708 #ifdef DDB
709 Debugger();
710 #endif
711 }
712 #endif
713 if (dvmaddr == (u_long)-1)
714 return (ENOMEM);
715
716 /* Set the active DVMA map */
717 map->_dm_dvmastart = dvmaddr;
718 map->_dm_dvmasize = sgsize;
719
720 if ((pglist = segs[0]._ds_mlist) == NULL) {
721 u_long prev_va = 0UL;
722 paddr_t prev_pa = 0;
723 int end = 0, offset;
724
725 /*
726 * This segs is made up of individual physical
727 * segments, probably by _bus_dmamap_load_uio() or
728 * _bus_dmamap_load_mbuf(). Ignore the mlist and
729 * load each one individually.
730 */
731 map->dm_mapsize = size;
732
733 j = 0;
734 for (i = 0; i < nsegs ; i++) {
735
736 pa = segs[i].ds_addr;
737 offset = (pa & PGOFSET);
738 pa = trunc_page(pa);
739 dvmaddr = trunc_page(dvmaddr);
740 left = min(size, segs[i].ds_len);
741
742 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting "
743 "physseg %d start %lx size %lx\n", i,
744 (long)segs[i].ds_addr, (long)segs[i].ds_len));
745
746 if ((pa == prev_pa) &&
747 ((offset != 0) || (end != offset))) {
748 /* We can re-use this mapping */
749 dvmaddr = prev_va;
750 }
751
752 sgstart = dvmaddr + offset;
753 sgend = sgstart + left - 1;
754
755 /* Are the segments virtually adjacent? */
756 if ((j > 0) && (end == offset) &&
757 ((offset == 0) || (pa == prev_pa))) {
758 /* Just append to the previous segment. */
759 map->dm_segs[--j].ds_len += left;
760 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
761 "appending seg %d start %lx size %lx\n", j,
762 (long)map->dm_segs[j].ds_addr,
763 (long)map->dm_segs[j].ds_len));
764 } else {
765 if (j >= map->_dm_segcnt) {
766 iommu_dvmamap_unload(t, map);
767 return (EFBIG);
768 }
769 map->dm_segs[j].ds_addr = sgstart;
770 map->dm_segs[j].ds_len = left;
771 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
772 "seg %d start %lx size %lx\n", j,
773 (long)map->dm_segs[j].ds_addr,
774 (long)map->dm_segs[j].ds_len));
775 }
776 end = (offset + left) & PGOFSET;
777
778 /* Check for boundary issues */
779 while ((sgstart & ~(boundary - 1)) !=
780 (sgend & ~(boundary - 1))) {
781 /* Need a new segment. */
782 map->dm_segs[j].ds_len =
783 boundary - (sgstart & (boundary - 1));
784 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
785 "seg %d start %lx size %lx\n", j,
786 (long)map->dm_segs[j].ds_addr,
787 (long)map->dm_segs[j].ds_len));
788 if (++j >= map->_dm_segcnt) {
789 iommu_dvmamap_unload(t, map);
790 return (EFBIG);
791 }
792 sgstart = roundup(sgstart, boundary);
793 map->dm_segs[j].ds_addr = sgstart;
794 map->dm_segs[j].ds_len = sgend - sgstart + 1;
795 }
796
797 if (sgsize == 0)
798 panic("iommu_dmamap_load_raw: size botch");
799
800 /* Now map a series of pages. */
801 while (dvmaddr <= sgend) {
802 DPRINTF(IDB_BUSDMA,
803 ("iommu_dvmamap_load_raw: map %p "
804 "loading va %lx at pa %lx\n",
805 map, (long)dvmaddr,
806 (long)(pa)));
807 /* Enter it if we haven't before. */
808 if (prev_va != dvmaddr)
809 iommu_enter(sb, prev_va = dvmaddr,
810 prev_pa = pa,
811 flags | (++npg << 12));
812 dvmaddr += pagesz;
813 pa += pagesz;
814 }
815
816 size -= left;
817 ++j;
818 }
819
820 map->dm_nsegs = j;
821 #ifdef DIAGNOSTIC
822 { int seg;
823 for (seg = 0; seg < map->dm_nsegs; seg++) {
824 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
825 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
826 printf("seg %d dvmaddr %lx out of range %x - %x\n",
827 seg, (long)map->dm_segs[seg].ds_addr,
828 is->is_dvmabase, is->is_dvmaend);
829 #ifdef DDB
830 Debugger();
831 #endif
832 }
833 }
834 }
835 #endif
836 return (0);
837 }
838
839 /*
840 * This was allocated with bus_dmamem_alloc.
841 * The pages are on a `pglist'.
842 */
843 map->dm_mapsize = size;
844 i = 0;
845 sgstart = dvmaddr;
846 sgend = sgstart + size - 1;
847 map->dm_segs[i].ds_addr = sgstart;
848 while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
849 /* Oops. We crossed a boundary. Split the xfer. */
850 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1));
851 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
852 "seg %d start %lx size %lx\n", i,
853 (long)map->dm_segs[i].ds_addr,
854 (long)map->dm_segs[i].ds_len));
855 if (++i >= map->_dm_segcnt) {
856 /* Too many segments. Fail the operation. */
857 s = splhigh();
858 /* How can this fail? And if it does what can we do? */
859 err = extent_free(is->is_dvmamap,
860 dvmaddr, sgsize, EX_NOWAIT);
861 map->_dm_dvmastart = 0;
862 map->_dm_dvmasize = 0;
863 splx(s);
864 return (EFBIG);
865 }
866 sgstart = roundup(sgstart, boundary);
867 map->dm_segs[i].ds_addr = sgstart;
868 }
869 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
870 "seg %d start %lx size %lx\n", i,
871 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len));
872 map->dm_segs[i].ds_len = sgend - sgstart + 1;
873
874 TAILQ_FOREACH(pg, pglist, pageq.queue) {
875 if (sgsize == 0)
876 panic("iommu_dmamap_load_raw: size botch");
877 pa = VM_PAGE_TO_PHYS(pg);
878
879 DPRINTF(IDB_BUSDMA,
880 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
881 map, (long)dvmaddr, (long)(pa)));
882 iommu_enter(sb, dvmaddr, pa, flags|0x8000);
883
884 dvmaddr += pagesz;
885 sgsize -= pagesz;
886 }
887 map->dm_mapsize = size;
888 map->dm_nsegs = i+1;
889 #ifdef DIAGNOSTIC
890 { int seg;
891 for (seg = 0; seg < map->dm_nsegs; seg++) {
892 if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
893 map->dm_segs[seg].ds_addr > is->is_dvmaend) {
894 printf("seg %d dvmaddr %lx out of range %x - %x\n",
895 seg, (long)map->dm_segs[seg].ds_addr,
896 is->is_dvmabase, is->is_dvmaend);
897 #ifdef DDB
898 Debugger();
899 #endif
900 }
901 }
902 }
903 #endif
904 return (0);
905 }
906
907
908 /*
909 * Flush an individual dma segment, returns non-zero if the streaming buffers
910 * need flushing afterwards.
911 */
912 static int
913 iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len)
914 {
915 vaddr_t vaend;
916 struct iommu_state *is = sb->sb_is;
917
918 #ifdef DIAGNOSTIC
919 if (va < is->is_dvmabase || va > is->is_dvmaend)
920 panic("invalid va: %llx", (long long)va);
921 #endif
922
923 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) {
924 DPRINTF(IDB_BUSDMA,
925 ("iommu_dvmamap_sync_range: attempting to flush "
926 "non-streaming entry\n"));
927 return (0);
928 }
929
930 vaend = round_page(va + len);
931 va = trunc_page(va);
932
933 #ifdef DIAGNOSTIC
934 if (va < is->is_dvmabase || vaend > is->is_dvmaend)
935 panic("invalid va range: %llx to %llx (%x to %x)",
936 (long long)va, (long long)vaend,
937 is->is_dvmabase,
938 is->is_dvmaend);
939 #endif
940
941 for ( ; va <= vaend; va += PAGE_SIZE) {
942 DPRINTF(IDB_BUSDMA,
943 ("iommu_dvmamap_sync_range: flushing va %p\n",
944 (void *)(u_long)va));
945 iommu_strbuf_flush(sb, va);
946 }
947
948 return (1);
949 }
950
951 static void
952 _iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
953 bus_size_t len, int ops)
954 {
955 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
956 bus_size_t count;
957 int i, needsflush = 0;
958
959 if (!sb->sb_flush)
960 return;
961
962 for (i = 0; i < map->dm_nsegs; i++) {
963 if (offset < map->dm_segs[i].ds_len)
964 break;
965 offset -= map->dm_segs[i].ds_len;
966 }
967
968 if (i == map->dm_nsegs)
969 panic("iommu_dvmamap_sync: segment too short %llu",
970 (unsigned long long)offset);
971
972 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) {
973 /* Nothing to do */;
974 }
975
976 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) {
977
978 for (; len > 0 && i < map->dm_nsegs; i++) {
979 count = MIN(map->dm_segs[i].ds_len - offset, len);
980 if (count > 0 &&
981 iommu_dvmamap_sync_range(sb,
982 map->dm_segs[i].ds_addr + offset, count))
983 needsflush = 1;
984 offset = 0;
985 len -= count;
986 }
987 #ifdef DIAGNOSTIC
988 if (i == map->dm_nsegs && len > 0)
989 panic("iommu_dvmamap_sync: leftover %llu",
990 (unsigned long long)len);
991 #endif
992
993 if (needsflush)
994 iommu_strbuf_flush_done(sb);
995 }
996 }
997
998 void
999 iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1000 bus_size_t len, int ops)
1001 {
1002
1003 /* If len is 0, then there is nothing to do */
1004 if (len == 0)
1005 return;
1006
1007 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
1008 /* Flush the CPU then the IOMMU */
1009 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1010 _iommu_dvmamap_sync(t, map, offset, len, ops);
1011 }
1012 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) {
1013 /* Flush the IOMMU then the CPU */
1014 _iommu_dvmamap_sync(t, map, offset, len, ops);
1015 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1016 }
1017 }
1018
1019 int
1020 iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1021 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1022 int flags)
1023 {
1024
1025 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx "
1026 "segp %p flags %d\n", (unsigned long long)size,
1027 (unsigned long long)alignment, (unsigned long long)boundary,
1028 segs, flags));
1029 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary,
1030 segs, nsegs, rsegs, flags|BUS_DMA_DVMA));
1031 }
1032
1033 void
1034 iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1035 {
1036
1037 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n",
1038 segs, nsegs));
1039 bus_dmamem_free(t->_parent, segs, nsegs);
1040 }
1041
1042 /*
1043 * Map the DVMA mappings into the kernel pmap.
1044 * Check the flags to see whether we're streaming or coherent.
1045 */
1046 int
1047 iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1048 size_t size, void **kvap, int flags)
1049 {
1050 struct vm_page *pg;
1051 vaddr_t va;
1052 bus_addr_t addr;
1053 struct pglist *pglist;
1054 int cbit;
1055 const uvm_flag_t kmflags =
1056 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1057
1058 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
1059 segs, nsegs, size));
1060
1061 /*
1062 * Allocate some space in the kernel map, and then map these pages
1063 * into this space.
1064 */
1065 size = round_page(size);
1066 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1067 if (va == 0)
1068 return (ENOMEM);
1069
1070 *kvap = (void *)va;
1071
1072 /*
1073 * digest flags:
1074 */
1075 cbit = 0;
1076 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1077 cbit |= PMAP_NVC;
1078 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1079 cbit |= PMAP_NC;
1080
1081 /*
1082 * Now take this and map it into the CPU.
1083 */
1084 pglist = segs[0]._ds_mlist;
1085 TAILQ_FOREACH(pg, pglist, pageq.queue) {
1086 #ifdef DIAGNOSTIC
1087 if (size == 0)
1088 panic("iommu_dvmamem_map: size botch");
1089 #endif
1090 addr = VM_PAGE_TO_PHYS(pg);
1091 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
1092 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit));
1093 pmap_kenter_pa(va, addr | cbit,
1094 VM_PROT_READ | VM_PROT_WRITE, 0);
1095 va += PAGE_SIZE;
1096 size -= PAGE_SIZE;
1097 }
1098 pmap_update(pmap_kernel());
1099 return (0);
1100 }
1101
1102 /*
1103 * Unmap DVMA mappings from kernel
1104 */
1105 void
1106 iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1107 {
1108
1109 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
1110 kva, size));
1111
1112 #ifdef DIAGNOSTIC
1113 if ((u_long)kva & PGOFSET)
1114 panic("iommu_dvmamem_unmap");
1115 #endif
1116
1117 size = round_page(size);
1118 pmap_kremove((vaddr_t)kva, size);
1119 pmap_update(pmap_kernel());
1120 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1121 }
1122