iommu.c revision 1.87 1 1.87 nakayama /* $NetBSD: iommu.c,v 1.87 2009/10/24 14:52:19 nakayama Exp $ */
2 1.82 mrg
3 1.82 mrg /*
4 1.82 mrg * Copyright (c) 1999, 2000 Matthew R. Green
5 1.82 mrg * All rights reserved.
6 1.82 mrg *
7 1.82 mrg * Redistribution and use in source and binary forms, with or without
8 1.82 mrg * modification, are permitted provided that the following conditions
9 1.82 mrg * are met:
10 1.82 mrg * 1. Redistributions of source code must retain the above copyright
11 1.82 mrg * notice, this list of conditions and the following disclaimer.
12 1.82 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.82 mrg * notice, this list of conditions and the following disclaimer in the
14 1.82 mrg * documentation and/or other materials provided with the distribution.
15 1.82 mrg *
16 1.82 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.82 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.82 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.82 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.82 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 1.82 mrg * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 1.82 mrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 1.82 mrg * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 1.82 mrg * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.82 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.82 mrg * SUCH DAMAGE.
27 1.82 mrg */
28 1.7 mrg
29 1.7 mrg /*
30 1.48 eeh * Copyright (c) 2001, 2002 Eduardo Horvath
31 1.7 mrg * All rights reserved.
32 1.7 mrg *
33 1.7 mrg * Redistribution and use in source and binary forms, with or without
34 1.7 mrg * modification, are permitted provided that the following conditions
35 1.7 mrg * are met:
36 1.7 mrg * 1. Redistributions of source code must retain the above copyright
37 1.7 mrg * notice, this list of conditions and the following disclaimer.
38 1.7 mrg * 2. Redistributions in binary form must reproduce the above copyright
39 1.7 mrg * notice, this list of conditions and the following disclaimer in the
40 1.7 mrg * documentation and/or other materials provided with the distribution.
41 1.7 mrg * 3. The name of the author may not be used to endorse or promote products
42 1.7 mrg * derived from this software without specific prior written permission.
43 1.7 mrg *
44 1.7 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 1.7 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 1.7 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 1.7 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 1.7 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 1.7 mrg * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 1.7 mrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 1.7 mrg * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 1.7 mrg * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 1.7 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 1.7 mrg * SUCH DAMAGE.
55 1.7 mrg */
56 1.1 mrg
57 1.7 mrg /*
58 1.7 mrg * UltraSPARC IOMMU support; used by both the sbus and pci code.
59 1.7 mrg */
60 1.66 lukem
61 1.66 lukem #include <sys/cdefs.h>
62 1.87 nakayama __KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.87 2009/10/24 14:52:19 nakayama Exp $");
63 1.66 lukem
64 1.4 mrg #include "opt_ddb.h"
65 1.4 mrg
66 1.1 mrg #include <sys/param.h>
67 1.1 mrg #include <sys/extent.h>
68 1.1 mrg #include <sys/malloc.h>
69 1.1 mrg #include <sys/systm.h>
70 1.1 mrg #include <sys/device.h>
71 1.41 chs #include <sys/proc.h>
72 1.18 mrg
73 1.18 mrg #include <uvm/uvm_extern.h>
74 1.1 mrg
75 1.1 mrg #include <machine/bus.h>
76 1.1 mrg #include <sparc64/dev/iommureg.h>
77 1.1 mrg #include <sparc64/dev/iommuvar.h>
78 1.1 mrg
79 1.1 mrg #include <machine/autoconf.h>
80 1.1 mrg #include <machine/cpu.h>
81 1.1 mrg
82 1.1 mrg #ifdef DEBUG
83 1.22 mrg #define IDB_BUSDMA 0x1
84 1.22 mrg #define IDB_IOMMU 0x2
85 1.22 mrg #define IDB_INFO 0x4
86 1.36 eeh #define IDB_SYNC 0x8
87 1.10 mrg int iommudebug = 0x0;
88 1.4 mrg #define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0)
89 1.4 mrg #else
90 1.4 mrg #define DPRINTF(l, s)
91 1.1 mrg #endif
92 1.1 mrg
93 1.55 eeh #define iommu_strbuf_flush(i, v) do { \
94 1.55 eeh if ((i)->sb_flush) \
95 1.55 eeh bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \
96 1.50 eeh STRBUFREG(strbuf_pgflush), (v)); \
97 1.42 eeh } while (0)
98 1.42 eeh
99 1.78 cdi static int iommu_strbuf_flush_done(struct strbuf_ctl *);
100 1.85 nakayama static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
101 1.85 nakayama bus_size_t, int);
102 1.11 eeh
103 1.1 mrg /*
104 1.1 mrg * initialise the UltraSPARC IOMMU (SBUS or PCI):
105 1.1 mrg * - allocate and setup the iotsb.
106 1.1 mrg * - enable the IOMMU
107 1.7 mrg * - initialise the streaming buffers (if they exist)
108 1.1 mrg * - create a private DVMA map.
109 1.1 mrg */
110 1.1 mrg void
111 1.79 cdi iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase)
112 1.1 mrg {
113 1.11 eeh psize_t size;
114 1.11 eeh vaddr_t va;
115 1.11 eeh paddr_t pa;
116 1.58 chs struct vm_page *pg;
117 1.58 chs struct pglist pglist;
118 1.1 mrg
119 1.1 mrg /*
120 1.1 mrg * Setup the iommu.
121 1.1 mrg *
122 1.45 eeh * The sun4u iommu is part of the SBUS or PCI controller so we will
123 1.45 eeh * deal with it here..
124 1.1 mrg *
125 1.45 eeh * For sysio and psycho/psycho+ the IOMMU address space always ends at
126 1.45 eeh * 0xffffe000, but the starting address depends on the size of the
127 1.45 eeh * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each
128 1.45 eeh * entry is 8 bytes. The start of the map can be calculated by
129 1.45 eeh * (0xffffe000 << (8 + is->is_tsbsize)).
130 1.45 eeh *
131 1.45 eeh * But sabre and hummingbird use a different scheme that seems to
132 1.45 eeh * be hard-wired, so we read the start and size from the PROM and
133 1.45 eeh * just use those values.
134 1.2 eeh */
135 1.11 eeh is->is_cr = (tsbsize << 16) | IOMMUCR_EN;
136 1.11 eeh is->is_tsbsize = tsbsize;
137 1.45 eeh if (iovabase == -1) {
138 1.45 eeh is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
139 1.45 eeh is->is_dvmaend = IOTSB_VEND;
140 1.45 eeh } else {
141 1.45 eeh is->is_dvmabase = iovabase;
142 1.45 eeh is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize);
143 1.45 eeh }
144 1.11 eeh
145 1.11 eeh /*
146 1.15 eeh * Allocate memory for I/O pagetables. They need to be physically
147 1.15 eeh * contiguous.
148 1.11 eeh */
149 1.11 eeh
150 1.64 thorpej size = PAGE_SIZE << is->is_tsbsize;
151 1.11 eeh if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
152 1.64 thorpej (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
153 1.11 eeh panic("iommu_init: no memory");
154 1.11 eeh
155 1.76 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
156 1.11 eeh if (va == 0)
157 1.11 eeh panic("iommu_init: no memory");
158 1.11 eeh is->is_tsb = (int64_t *)va;
159 1.11 eeh
160 1.58 chs is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
161 1.11 eeh
162 1.11 eeh /* Map the pages */
163 1.83 ad TAILQ_FOREACH(pg, &pglist, pageq.queue) {
164 1.58 chs pa = VM_PAGE_TO_PHYS(pg);
165 1.58 chs pmap_kenter_pa(va, pa | PMAP_NVC, VM_PROT_READ | VM_PROT_WRITE);
166 1.64 thorpej va += PAGE_SIZE;
167 1.11 eeh }
168 1.38 chris pmap_update(pmap_kernel());
169 1.58 chs memset(is->is_tsb, 0, size);
170 1.1 mrg
171 1.1 mrg #ifdef DEBUG
172 1.22 mrg if (iommudebug & IDB_INFO)
173 1.1 mrg {
174 1.1 mrg /* Probe the iommu */
175 1.1 mrg
176 1.25 mrg printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n",
177 1.50 eeh (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
178 1.50 eeh offsetof (struct iommureg, iommu_cr)),
179 1.50 eeh (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
180 1.50 eeh offsetof (struct iommureg, iommu_tsb)),
181 1.50 eeh (u_long)bus_space_read_8(is->is_bustag, is->is_iommu,
182 1.50 eeh offsetof (struct iommureg, iommu_flush)));
183 1.50 eeh printf("iommu cr=%llx tsb=%llx\n",
184 1.50 eeh (unsigned long long)bus_space_read_8(is->is_bustag,
185 1.50 eeh is->is_iommu,
186 1.50 eeh offsetof (struct iommureg, iommu_cr)),
187 1.50 eeh (unsigned long long)bus_space_read_8(is->is_bustag,
188 1.50 eeh is->is_iommu,
189 1.50 eeh offsetof (struct iommureg, iommu_tsb)));
190 1.58 chs printf("TSB base %p phys %llx\n", (void *)is->is_tsb,
191 1.50 eeh (unsigned long long)is->is_ptsb);
192 1.1 mrg delay(1000000); /* 1 s */
193 1.1 mrg }
194 1.1 mrg #endif
195 1.1 mrg
196 1.1 mrg /*
197 1.1 mrg * now actually start up the IOMMU
198 1.1 mrg */
199 1.1 mrg iommu_reset(is);
200 1.1 mrg
201 1.1 mrg /*
202 1.1 mrg * Now all the hardware's working we need to allocate a dvma map.
203 1.1 mrg */
204 1.58 chs printf("DVMA map: %x to %x\n",
205 1.11 eeh (unsigned int)is->is_dvmabase,
206 1.45 eeh (unsigned int)is->is_dvmaend);
207 1.58 chs printf("IOTSB: %llx to %llx\n",
208 1.47 eeh (unsigned long long)is->is_ptsb,
209 1.47 eeh (unsigned long long)(is->is_ptsb + size));
210 1.1 mrg is->is_dvmamap = extent_create(name,
211 1.64 thorpej is->is_dvmabase, is->is_dvmaend - PAGE_SIZE,
212 1.64 thorpej M_DEVBUF, 0, 0, EX_NOWAIT);
213 1.1 mrg }
214 1.1 mrg
215 1.8 mrg /*
216 1.8 mrg * Streaming buffers don't exist on the UltraSPARC IIi; we should have
217 1.8 mrg * detected that already and disabled them. If not, we will notice that
218 1.8 mrg * they aren't there when the STRBUF_EN bit does not remain.
219 1.8 mrg */
220 1.1 mrg void
221 1.78 cdi iommu_reset(struct iommu_state *is)
222 1.1 mrg {
223 1.45 eeh int i;
224 1.55 eeh struct strbuf_ctl *sb;
225 1.1 mrg
226 1.1 mrg /* Need to do 64-bit stores */
227 1.58 chs bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_tsb),
228 1.50 eeh is->is_ptsb);
229 1.50 eeh
230 1.11 eeh /* Enable IOMMU in diagnostic mode */
231 1.50 eeh bus_space_write_8(is->is_bustag, is->is_iommu, IOMMUREG(iommu_cr),
232 1.50 eeh is->is_cr|IOMMUCR_DE);
233 1.11 eeh
234 1.58 chs for (i = 0; i < 2; i++) {
235 1.55 eeh if ((sb = is->is_sb[i])) {
236 1.5 mrg
237 1.45 eeh /* Enable diagnostics mode? */
238 1.58 chs bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb,
239 1.50 eeh STRBUFREG(strbuf_ctl), STRBUF_EN);
240 1.45 eeh
241 1.45 eeh /* No streaming buffers? Disable them */
242 1.58 chs if (bus_space_read_8(is->is_bustag,
243 1.58 chs is->is_sb[i]->sb_sb,
244 1.55 eeh STRBUFREG(strbuf_ctl)) == 0) {
245 1.55 eeh is->is_sb[i]->sb_flush = NULL;
246 1.55 eeh } else {
247 1.58 chs
248 1.55 eeh /*
249 1.55 eeh * locate the pa of the flush buffer.
250 1.55 eeh */
251 1.55 eeh (void)pmap_extract(pmap_kernel(),
252 1.55 eeh (vaddr_t)is->is_sb[i]->sb_flush,
253 1.55 eeh &is->is_sb[i]->sb_flushpa);
254 1.55 eeh }
255 1.45 eeh }
256 1.42 eeh }
257 1.2 eeh }
258 1.2 eeh
259 1.2 eeh /*
260 1.58 chs * Here are the iommu control routines.
261 1.2 eeh */
262 1.2 eeh void
263 1.78 cdi iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags)
264 1.2 eeh {
265 1.55 eeh struct iommu_state *is = sb->sb_is;
266 1.55 eeh int strbuf = (flags & BUS_DMA_STREAMING);
267 1.2 eeh int64_t tte;
268 1.2 eeh
269 1.2 eeh #ifdef DIAGNOSTIC
270 1.45 eeh if (va < is->is_dvmabase || va > is->is_dvmaend)
271 1.13 mrg panic("iommu_enter: va %#lx not in DVMA space", va);
272 1.2 eeh #endif
273 1.2 eeh
274 1.55 eeh /* Is the streamcache flush really needed? */
275 1.55 eeh if (sb->sb_flush) {
276 1.55 eeh iommu_strbuf_flush(sb, va);
277 1.55 eeh iommu_strbuf_flush_done(sb);
278 1.55 eeh } else
279 1.55 eeh /* If we can't flush the strbuf don't enable it. */
280 1.55 eeh strbuf = 0;
281 1.55 eeh
282 1.58 chs tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
283 1.55 eeh !(flags & BUS_DMA_NOCACHE), (strbuf));
284 1.50 eeh #ifdef DEBUG
285 1.50 eeh tte |= (flags & 0xff000LL)<<(4*8);
286 1.50 eeh #endif
287 1.58 chs
288 1.58 chs DPRINTF(IDB_IOMMU, ("Clearing TSB slot %d for va %p\n",
289 1.25 mrg (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va));
290 1.2 eeh is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte;
291 1.58 chs bus_space_write_8(is->is_bustag, is->is_iommu,
292 1.50 eeh IOMMUREG(iommu_flush), va);
293 1.22 mrg DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
294 1.50 eeh va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize),
295 1.50 eeh (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)],
296 1.50 eeh (u_long)tte));
297 1.39 eeh }
298 1.39 eeh
299 1.39 eeh /*
300 1.39 eeh * Find the value of a DVMA address (debug routine).
301 1.39 eeh */
302 1.39 eeh paddr_t
303 1.78 cdi iommu_extract(struct iommu_state *is, vaddr_t dva)
304 1.39 eeh {
305 1.39 eeh int64_t tte = 0;
306 1.58 chs
307 1.45 eeh if (dva >= is->is_dvmabase && dva < is->is_dvmaend)
308 1.55 eeh tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)];
309 1.39 eeh
310 1.54 eeh if ((tte & IOTTE_V) == 0)
311 1.39 eeh return ((paddr_t)-1L);
312 1.54 eeh return (tte & IOTTE_PAMASK);
313 1.2 eeh }
314 1.2 eeh
315 1.2 eeh /*
316 1.2 eeh * iommu_remove: removes mappings created by iommu_enter
317 1.2 eeh *
318 1.2 eeh * Only demap from IOMMU if flag is set.
319 1.8 mrg *
320 1.8 mrg * XXX: this function needs better internal error checking.
321 1.2 eeh */
322 1.2 eeh void
323 1.78 cdi iommu_remove(struct iommu_state *is, vaddr_t va, size_t len)
324 1.2 eeh {
325 1.2 eeh
326 1.2 eeh #ifdef DIAGNOSTIC
327 1.45 eeh if (va < is->is_dvmabase || va > is->is_dvmaend)
328 1.25 mrg panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va);
329 1.2 eeh if ((long)(va + len) < (long)va)
330 1.58 chs panic("iommu_remove: va 0x%lx + len 0x%lx wraps",
331 1.2 eeh (long) va, (long) len);
332 1.58 chs if (len & ~0xfffffff)
333 1.72 snj panic("iommu_remove: ridiculous len 0x%lx", (u_long)len);
334 1.2 eeh #endif
335 1.2 eeh
336 1.2 eeh va = trunc_page(va);
337 1.22 mrg DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n",
338 1.50 eeh va, (u_long)IOTSBSLOT(va, is->is_tsbsize),
339 1.50 eeh &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)]));
340 1.2 eeh while (len > 0) {
341 1.50 eeh DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d "
342 1.50 eeh "for va %p size %lx\n",
343 1.50 eeh (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va,
344 1.50 eeh (u_long)len));
345 1.64 thorpej if (len <= PAGE_SIZE)
346 1.10 mrg len = 0;
347 1.10 mrg else
348 1.64 thorpej len -= PAGE_SIZE;
349 1.8 mrg
350 1.47 eeh /* XXX Zero-ing the entry would not require RMW */
351 1.47 eeh is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V;
352 1.58 chs bus_space_write_8(is->is_bustag, is->is_iommu,
353 1.50 eeh IOMMUREG(iommu_flush), va);
354 1.64 thorpej va += PAGE_SIZE;
355 1.2 eeh }
356 1.2 eeh }
357 1.2 eeh
358 1.58 chs static int
359 1.78 cdi iommu_strbuf_flush_done(struct strbuf_ctl *sb)
360 1.2 eeh {
361 1.55 eeh struct iommu_state *is = sb->sb_is;
362 1.2 eeh struct timeval cur, flushtimeout;
363 1.2 eeh
364 1.2 eeh #define BUMPTIME(t, usec) { \
365 1.2 eeh register volatile struct timeval *tp = (t); \
366 1.2 eeh register long us; \
367 1.2 eeh \
368 1.2 eeh tp->tv_usec = us = tp->tv_usec + (usec); \
369 1.2 eeh if (us >= 1000000) { \
370 1.2 eeh tp->tv_usec = us - 1000000; \
371 1.2 eeh tp->tv_sec++; \
372 1.2 eeh } \
373 1.2 eeh }
374 1.5 mrg
375 1.55 eeh if (!sb->sb_flush)
376 1.5 mrg return (0);
377 1.58 chs
378 1.7 mrg /*
379 1.7 mrg * Streaming buffer flushes:
380 1.58 chs *
381 1.7 mrg * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If
382 1.7 mrg * we're not on a cache line boundary (64-bits):
383 1.7 mrg * 2 Store 0 in flag
384 1.7 mrg * 3 Store pointer to flag in flushsync
385 1.7 mrg * 4 wait till flushsync becomes 0x1
386 1.7 mrg *
387 1.7 mrg * If it takes more than .5 sec, something
388 1.7 mrg * went wrong.
389 1.7 mrg */
390 1.2 eeh
391 1.55 eeh *sb->sb_flush = 0;
392 1.58 chs bus_space_write_8(is->is_bustag, sb->sb_sb,
393 1.55 eeh STRBUFREG(strbuf_flushsync), sb->sb_flushpa);
394 1.2 eeh
395 1.58 chs microtime(&flushtimeout);
396 1.2 eeh cur = flushtimeout;
397 1.2 eeh BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
398 1.58 chs
399 1.55 eeh DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flush = %lx "
400 1.86 martin "at va = %lx pa = %lx now=%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n",
401 1.58 chs (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa,
402 1.42 eeh cur.tv_sec, cur.tv_usec,
403 1.42 eeh flushtimeout.tv_sec, flushtimeout.tv_usec));
404 1.42 eeh
405 1.2 eeh /* Bypass non-coherent D$ */
406 1.55 eeh while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) &&
407 1.59 martin timercmp(&cur, &flushtimeout, <=))
408 1.2 eeh microtime(&cur);
409 1.2 eeh
410 1.2 eeh #ifdef DIAGNOSTIC
411 1.55 eeh if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) {
412 1.55 eeh printf("iommu_strbuf_flush_done: flush timeout %p, at %p\n",
413 1.55 eeh (void *)(u_long)*sb->sb_flush,
414 1.55 eeh (void *)(u_long)sb->sb_flushpa); /* panic? */
415 1.2 eeh #ifdef DDB
416 1.2 eeh Debugger();
417 1.2 eeh #endif
418 1.2 eeh }
419 1.2 eeh #endif
420 1.31 eeh DPRINTF(IDB_IOMMU, ("iommu_strbuf_flush_done: flushed\n"));
421 1.55 eeh return (*sb->sb_flush);
422 1.7 mrg }
423 1.7 mrg
424 1.7 mrg /*
425 1.7 mrg * IOMMU DVMA operations, common to SBUS and PCI.
426 1.7 mrg */
427 1.7 mrg int
428 1.85 nakayama iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
429 1.85 nakayama bus_size_t buflen, struct proc *p, int flags)
430 1.7 mrg {
431 1.85 nakayama struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
432 1.55 eeh struct iommu_state *is = sb->sb_is;
433 1.7 mrg int s;
434 1.7 mrg int err;
435 1.7 mrg bus_size_t sgsize;
436 1.7 mrg paddr_t curaddr;
437 1.40 eeh u_long dvmaddr, sgstart, sgend;
438 1.71 tsutsui bus_size_t align, boundary, len;
439 1.7 mrg vaddr_t vaddr = (vaddr_t)buf;
440 1.40 eeh int seg;
441 1.58 chs struct pmap *pmap;
442 1.7 mrg
443 1.7 mrg if (map->dm_nsegs) {
444 1.7 mrg /* Already in use?? */
445 1.7 mrg #ifdef DIAGNOSTIC
446 1.7 mrg printf("iommu_dvmamap_load: map still in use\n");
447 1.7 mrg #endif
448 1.7 mrg bus_dmamap_unload(t, map);
449 1.7 mrg }
450 1.58 chs
451 1.7 mrg /*
452 1.7 mrg * Make sure that on error condition we return "no valid mappings".
453 1.7 mrg */
454 1.7 mrg map->dm_nsegs = 0;
455 1.7 mrg if (buflen > map->_dm_size) {
456 1.22 mrg DPRINTF(IDB_BUSDMA,
457 1.7 mrg ("iommu_dvmamap_load(): error %d > %d -- "
458 1.25 mrg "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
459 1.7 mrg return (EINVAL);
460 1.7 mrg }
461 1.7 mrg
462 1.7 mrg sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
463 1.20 mrg
464 1.7 mrg /*
465 1.21 eeh * A boundary presented to bus_dmamem_alloc() takes precedence
466 1.21 eeh * over boundary in the map.
467 1.7 mrg */
468 1.21 eeh if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
469 1.21 eeh boundary = map->_dm_boundary;
470 1.64 thorpej align = max(map->dm_segs[0]._ds_align, PAGE_SIZE);
471 1.58 chs
472 1.58 chs /*
473 1.58 chs * If our segment size is larger than the boundary we need to
474 1.40 eeh * split the transfer up int little pieces ourselves.
475 1.40 eeh */
476 1.58 chs s = splhigh();
477 1.58 chs err = extent_alloc(is->is_dvmamap, sgsize, align,
478 1.71 tsutsui (sgsize > boundary) ? 0 : boundary,
479 1.71 tsutsui EX_NOWAIT|EX_BOUNDZERO, &dvmaddr);
480 1.7 mrg splx(s);
481 1.7 mrg
482 1.7 mrg #ifdef DEBUG
483 1.71 tsutsui if (err || (dvmaddr == (u_long)-1)) {
484 1.7 mrg printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
485 1.25 mrg (int)sgsize, flags);
486 1.40 eeh #ifdef DDB
487 1.7 mrg Debugger();
488 1.40 eeh #endif
489 1.58 chs }
490 1.58 chs #endif
491 1.11 eeh if (err != 0)
492 1.11 eeh return (err);
493 1.11 eeh
494 1.65 nakayama if (dvmaddr == (u_long)-1)
495 1.7 mrg return (ENOMEM);
496 1.7 mrg
497 1.40 eeh /* Set the active DVMA map */
498 1.40 eeh map->_dm_dvmastart = dvmaddr;
499 1.40 eeh map->_dm_dvmasize = sgsize;
500 1.40 eeh
501 1.40 eeh /*
502 1.40 eeh * Now split the DVMA range into segments, not crossing
503 1.40 eeh * the boundary.
504 1.40 eeh */
505 1.40 eeh seg = 0;
506 1.40 eeh sgstart = dvmaddr + (vaddr & PGOFSET);
507 1.40 eeh sgend = sgstart + buflen - 1;
508 1.40 eeh map->dm_segs[seg].ds_addr = sgstart;
509 1.71 tsutsui DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx "
510 1.71 tsutsui "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1),
511 1.71 tsutsui (long)~(boundary - 1)));
512 1.40 eeh while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
513 1.40 eeh /* Oops. We crossed a boundary. Split the xfer. */
514 1.71 tsutsui len = boundary - (sgstart & (boundary - 1));
515 1.71 tsutsui map->dm_segs[seg].ds_len = len;
516 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
517 1.71 tsutsui "seg %d start %lx size %lx\n", seg,
518 1.71 tsutsui (long)map->dm_segs[seg].ds_addr,
519 1.71 tsutsui (long)map->dm_segs[seg].ds_len));
520 1.53 eeh if (++seg >= map->_dm_segcnt) {
521 1.40 eeh /* Too many segments. Fail the operation. */
522 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
523 1.71 tsutsui "too many segments %d\n", seg));
524 1.40 eeh s = splhigh();
525 1.40 eeh /* How can this fail? And if it does what can we do? */
526 1.40 eeh err = extent_free(is->is_dvmamap,
527 1.71 tsutsui dvmaddr, sgsize, EX_NOWAIT);
528 1.40 eeh map->_dm_dvmastart = 0;
529 1.40 eeh map->_dm_dvmasize = 0;
530 1.43 eeh splx(s);
531 1.80 mrg return (EFBIG);
532 1.40 eeh }
533 1.71 tsutsui sgstart += len;
534 1.40 eeh map->dm_segs[seg].ds_addr = sgstart;
535 1.40 eeh }
536 1.40 eeh map->dm_segs[seg].ds_len = sgend - sgstart + 1;
537 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
538 1.71 tsutsui "seg %d start %lx size %lx\n", seg,
539 1.71 tsutsui (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len));
540 1.71 tsutsui map->dm_nsegs = seg + 1;
541 1.7 mrg map->dm_mapsize = buflen;
542 1.7 mrg
543 1.7 mrg if (p != NULL)
544 1.7 mrg pmap = p->p_vmspace->vm_map.pmap;
545 1.7 mrg else
546 1.7 mrg pmap = pmap_kernel();
547 1.7 mrg
548 1.7 mrg for (; buflen > 0; ) {
549 1.58 chs
550 1.7 mrg /*
551 1.7 mrg * Get the physical address for this page.
552 1.7 mrg */
553 1.7 mrg if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) {
554 1.74 petrov #ifdef DIAGNOSTIC
555 1.74 petrov printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr);
556 1.74 petrov #endif
557 1.7 mrg bus_dmamap_unload(t, map);
558 1.7 mrg return (-1);
559 1.7 mrg }
560 1.7 mrg
561 1.7 mrg /*
562 1.7 mrg * Compute the segment size, and adjust counts.
563 1.7 mrg */
564 1.64 thorpej sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
565 1.7 mrg if (buflen < sgsize)
566 1.7 mrg sgsize = buflen;
567 1.7 mrg
568 1.22 mrg DPRINTF(IDB_BUSDMA,
569 1.36 eeh ("iommu_dvmamap_load: map %p loading va %p "
570 1.71 tsutsui "dva %lx at pa %lx\n",
571 1.71 tsutsui map, (void *)vaddr, (long)dvmaddr,
572 1.87 nakayama (long)trunc_page(curaddr)));
573 1.55 eeh iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr),
574 1.45 eeh flags|0x4000);
575 1.58 chs
576 1.7 mrg dvmaddr += PAGE_SIZE;
577 1.7 mrg vaddr += sgsize;
578 1.7 mrg buflen -= sgsize;
579 1.7 mrg }
580 1.45 eeh #ifdef DIAGNOSTIC
581 1.45 eeh for (seg = 0; seg < map->dm_nsegs; seg++) {
582 1.45 eeh if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
583 1.45 eeh map->dm_segs[seg].ds_addr > is->is_dvmaend) {
584 1.45 eeh printf("seg %d dvmaddr %lx out of range %x - %x\n",
585 1.71 tsutsui seg, (long)map->dm_segs[seg].ds_addr,
586 1.71 tsutsui is->is_dvmabase, is->is_dvmaend);
587 1.57 chs #ifdef DDB
588 1.45 eeh Debugger();
589 1.57 chs #endif
590 1.45 eeh }
591 1.45 eeh }
592 1.45 eeh #endif
593 1.7 mrg return (0);
594 1.7 mrg }
595 1.7 mrg
596 1.7 mrg
597 1.7 mrg void
598 1.85 nakayama iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
599 1.7 mrg {
600 1.85 nakayama struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
601 1.55 eeh struct iommu_state *is = sb->sb_is;
602 1.40 eeh int error, s;
603 1.70 christos bus_size_t sgsize = map->_dm_dvmasize;
604 1.7 mrg
605 1.40 eeh /* Flush the iommu */
606 1.40 eeh #ifdef DEBUG
607 1.40 eeh if (!map->_dm_dvmastart) {
608 1.40 eeh printf("iommu_dvmamap_unload: No dvmastart is zero\n");
609 1.40 eeh #ifdef DDB
610 1.40 eeh Debugger();
611 1.40 eeh #endif
612 1.40 eeh }
613 1.40 eeh #endif
614 1.40 eeh iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
615 1.7 mrg
616 1.23 eeh /* Flush the caches */
617 1.23 eeh bus_dmamap_unload(t->_parent, map);
618 1.23 eeh
619 1.7 mrg /* Mark the mappings as invalid. */
620 1.7 mrg map->dm_mapsize = 0;
621 1.7 mrg map->dm_nsegs = 0;
622 1.58 chs
623 1.7 mrg s = splhigh();
624 1.58 chs error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
625 1.40 eeh map->_dm_dvmasize, EX_NOWAIT);
626 1.43 eeh map->_dm_dvmastart = 0;
627 1.43 eeh map->_dm_dvmasize = 0;
628 1.7 mrg splx(s);
629 1.7 mrg if (error != 0)
630 1.7 mrg printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
631 1.40 eeh
632 1.40 eeh /* Clear the map */
633 1.9 eeh }
634 1.9 eeh
635 1.9 eeh
636 1.9 eeh int
637 1.85 nakayama iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
638 1.85 nakayama bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
639 1.9 eeh {
640 1.85 nakayama struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
641 1.55 eeh struct iommu_state *is = sb->sb_is;
642 1.58 chs struct vm_page *pg;
643 1.40 eeh int i, j, s;
644 1.26 martin int left;
645 1.9 eeh int err;
646 1.9 eeh bus_size_t sgsize;
647 1.9 eeh paddr_t pa;
648 1.21 eeh bus_size_t boundary, align;
649 1.40 eeh u_long dvmaddr, sgstart, sgend;
650 1.58 chs struct pglist *pglist;
651 1.9 eeh int pagesz = PAGE_SIZE;
652 1.45 eeh int npg = 0; /* DEBUG */
653 1.9 eeh
654 1.9 eeh if (map->dm_nsegs) {
655 1.9 eeh /* Already in use?? */
656 1.9 eeh #ifdef DIAGNOSTIC
657 1.9 eeh printf("iommu_dvmamap_load_raw: map still in use\n");
658 1.9 eeh #endif
659 1.9 eeh bus_dmamap_unload(t, map);
660 1.9 eeh }
661 1.40 eeh
662 1.40 eeh /*
663 1.40 eeh * A boundary presented to bus_dmamem_alloc() takes precedence
664 1.40 eeh * over boundary in the map.
665 1.40 eeh */
666 1.40 eeh if ((boundary = segs[0]._ds_boundary) == 0)
667 1.40 eeh boundary = map->_dm_boundary;
668 1.40 eeh
669 1.45 eeh align = max(segs[0]._ds_align, pagesz);
670 1.40 eeh
671 1.9 eeh /*
672 1.9 eeh * Make sure that on error condition we return "no valid mappings".
673 1.9 eeh */
674 1.9 eeh map->dm_nsegs = 0;
675 1.26 martin /* Count up the total number of pages we need */
676 1.26 martin pa = segs[0].ds_addr;
677 1.26 martin sgsize = 0;
678 1.40 eeh left = size;
679 1.58 chs for (i = 0; left && i < nsegs; i++) {
680 1.26 martin if (round_page(pa) != round_page(segs[i].ds_addr))
681 1.26 martin sgsize = round_page(sgsize);
682 1.40 eeh sgsize += min(left, segs[i].ds_len);
683 1.40 eeh left -= segs[i].ds_len;
684 1.26 martin pa = segs[i].ds_addr + segs[i].ds_len;
685 1.26 martin }
686 1.75 petrov sgsize = round_page(sgsize) + PAGE_SIZE; /* XXX reserve extra dvma page */
687 1.9 eeh
688 1.40 eeh s = splhigh();
689 1.58 chs /*
690 1.58 chs * If our segment size is larger than the boundary we need to
691 1.45 eeh * split the transfer up into little pieces ourselves.
692 1.9 eeh */
693 1.40 eeh err = extent_alloc(is->is_dvmamap, sgsize, align,
694 1.40 eeh (sgsize > boundary) ? 0 : boundary,
695 1.40 eeh ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
696 1.54 eeh EX_BOUNDZERO, &dvmaddr);
697 1.9 eeh splx(s);
698 1.9 eeh
699 1.9 eeh if (err != 0)
700 1.9 eeh return (err);
701 1.9 eeh
702 1.9 eeh #ifdef DEBUG
703 1.65 nakayama if (dvmaddr == (u_long)-1)
704 1.58 chs {
705 1.9 eeh printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n",
706 1.25 mrg (int)sgsize, flags);
707 1.57 chs #ifdef DDB
708 1.9 eeh Debugger();
709 1.57 chs #endif
710 1.58 chs }
711 1.58 chs #endif
712 1.65 nakayama if (dvmaddr == (u_long)-1)
713 1.9 eeh return (ENOMEM);
714 1.9 eeh
715 1.40 eeh /* Set the active DVMA map */
716 1.40 eeh map->_dm_dvmastart = dvmaddr;
717 1.40 eeh map->_dm_dvmasize = sgsize;
718 1.40 eeh
719 1.58 chs if ((pglist = segs[0]._ds_mlist) == NULL) {
720 1.69 petrov u_long prev_va = 0UL;
721 1.45 eeh paddr_t prev_pa = 0;
722 1.45 eeh int end = 0, offset;
723 1.45 eeh
724 1.26 martin /*
725 1.45 eeh * This segs is made up of individual physical
726 1.58 chs * segments, probably by _bus_dmamap_load_uio() or
727 1.26 martin * _bus_dmamap_load_mbuf(). Ignore the mlist and
728 1.45 eeh * load each one individually.
729 1.26 martin */
730 1.40 eeh map->dm_mapsize = size;
731 1.40 eeh
732 1.45 eeh j = 0;
733 1.45 eeh for (i = 0; i < nsegs ; i++) {
734 1.40 eeh
735 1.45 eeh pa = segs[i].ds_addr;
736 1.45 eeh offset = (pa & PGOFSET);
737 1.45 eeh pa = trunc_page(pa);
738 1.45 eeh dvmaddr = trunc_page(dvmaddr);
739 1.45 eeh left = min(size, segs[i].ds_len);
740 1.45 eeh
741 1.45 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting "
742 1.58 chs "physseg %d start %lx size %lx\n", i,
743 1.61 martin (long)segs[i].ds_addr, (long)segs[i].ds_len));
744 1.26 martin
745 1.58 chs if ((pa == prev_pa) &&
746 1.47 eeh ((offset != 0) || (end != offset))) {
747 1.45 eeh /* We can re-use this mapping */
748 1.45 eeh dvmaddr = prev_va;
749 1.45 eeh }
750 1.29 martin
751 1.45 eeh sgstart = dvmaddr + offset;
752 1.45 eeh sgend = sgstart + left - 1;
753 1.26 martin
754 1.45 eeh /* Are the segments virtually adjacent? */
755 1.58 chs if ((j > 0) && (end == offset) &&
756 1.45 eeh ((offset == 0) || (pa == prev_pa))) {
757 1.45 eeh /* Just append to the previous segment. */
758 1.45 eeh map->dm_segs[--j].ds_len += left;
759 1.45 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
760 1.45 eeh "appending seg %d start %lx size %lx\n", j,
761 1.58 chs (long)map->dm_segs[j].ds_addr,
762 1.61 martin (long)map->dm_segs[j].ds_len));
763 1.45 eeh } else {
764 1.53 eeh if (j >= map->_dm_segcnt) {
765 1.85 nakayama iommu_dvmamap_unload(t, map);
766 1.80 mrg return (EFBIG);
767 1.53 eeh }
768 1.45 eeh map->dm_segs[j].ds_addr = sgstart;
769 1.45 eeh map->dm_segs[j].ds_len = left;
770 1.45 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
771 1.45 eeh "seg %d start %lx size %lx\n", j,
772 1.48 eeh (long)map->dm_segs[j].ds_addr,
773 1.61 martin (long)map->dm_segs[j].ds_len));
774 1.40 eeh }
775 1.45 eeh end = (offset + left) & PGOFSET;
776 1.40 eeh
777 1.40 eeh /* Check for boundary issues */
778 1.40 eeh while ((sgstart & ~(boundary - 1)) !=
779 1.40 eeh (sgend & ~(boundary - 1))) {
780 1.40 eeh /* Need a new segment. */
781 1.40 eeh map->dm_segs[j].ds_len =
782 1.53 eeh boundary - (sgstart & (boundary - 1));
783 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
784 1.40 eeh "seg %d start %lx size %lx\n", j,
785 1.58 chs (long)map->dm_segs[j].ds_addr,
786 1.61 martin (long)map->dm_segs[j].ds_len));
787 1.53 eeh if (++j >= map->_dm_segcnt) {
788 1.85 nakayama iommu_dvmamap_unload(t, map);
789 1.80 mrg return (EFBIG);
790 1.40 eeh }
791 1.40 eeh sgstart = roundup(sgstart, boundary);
792 1.40 eeh map->dm_segs[j].ds_addr = sgstart;
793 1.40 eeh map->dm_segs[j].ds_len = sgend - sgstart + 1;
794 1.40 eeh }
795 1.40 eeh
796 1.26 martin if (sgsize == 0)
797 1.26 martin panic("iommu_dmamap_load_raw: size botch");
798 1.40 eeh
799 1.45 eeh /* Now map a series of pages. */
800 1.51 eeh while (dvmaddr <= sgend) {
801 1.45 eeh DPRINTF(IDB_BUSDMA,
802 1.45 eeh ("iommu_dvmamap_load_raw: map %p "
803 1.45 eeh "loading va %lx at pa %lx\n",
804 1.45 eeh map, (long)dvmaddr,
805 1.45 eeh (long)(pa)));
806 1.45 eeh /* Enter it if we haven't before. */
807 1.46 eeh if (prev_va != dvmaddr)
808 1.55 eeh iommu_enter(sb, prev_va = dvmaddr,
809 1.45 eeh prev_pa = pa,
810 1.58 chs flags | (++npg << 12));
811 1.45 eeh dvmaddr += pagesz;
812 1.45 eeh pa += pagesz;
813 1.45 eeh }
814 1.45 eeh
815 1.45 eeh size -= left;
816 1.45 eeh ++j;
817 1.26 martin }
818 1.45 eeh
819 1.45 eeh map->dm_nsegs = j;
820 1.45 eeh #ifdef DIAGNOSTIC
821 1.45 eeh { int seg;
822 1.45 eeh for (seg = 0; seg < map->dm_nsegs; seg++) {
823 1.45 eeh if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
824 1.45 eeh map->dm_segs[seg].ds_addr > is->is_dvmaend) {
825 1.45 eeh printf("seg %d dvmaddr %lx out of range %x - %x\n",
826 1.58 chs seg, (long)map->dm_segs[seg].ds_addr,
827 1.45 eeh is->is_dvmabase, is->is_dvmaend);
828 1.57 chs #ifdef DDB
829 1.45 eeh Debugger();
830 1.57 chs #endif
831 1.45 eeh }
832 1.45 eeh }
833 1.45 eeh }
834 1.45 eeh #endif
835 1.26 martin return (0);
836 1.26 martin }
837 1.58 chs
838 1.9 eeh /*
839 1.40 eeh * This was allocated with bus_dmamem_alloc.
840 1.58 chs * The pages are on a `pglist'.
841 1.9 eeh */
842 1.9 eeh map->dm_mapsize = size;
843 1.26 martin i = 0;
844 1.40 eeh sgstart = dvmaddr;
845 1.40 eeh sgend = sgstart + size - 1;
846 1.40 eeh map->dm_segs[i].ds_addr = sgstart;
847 1.40 eeh while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
848 1.40 eeh /* Oops. We crossed a boundary. Split the xfer. */
849 1.53 eeh map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1));
850 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
851 1.40 eeh "seg %d start %lx size %lx\n", i,
852 1.48 eeh (long)map->dm_segs[i].ds_addr,
853 1.61 martin (long)map->dm_segs[i].ds_len));
854 1.53 eeh if (++i >= map->_dm_segcnt) {
855 1.40 eeh /* Too many segments. Fail the operation. */
856 1.40 eeh s = splhigh();
857 1.40 eeh /* How can this fail? And if it does what can we do? */
858 1.40 eeh err = extent_free(is->is_dvmamap,
859 1.40 eeh dvmaddr, sgsize, EX_NOWAIT);
860 1.40 eeh map->_dm_dvmastart = 0;
861 1.40 eeh map->_dm_dvmasize = 0;
862 1.43 eeh splx(s);
863 1.80 mrg return (EFBIG);
864 1.40 eeh }
865 1.40 eeh sgstart = roundup(sgstart, boundary);
866 1.40 eeh map->dm_segs[i].ds_addr = sgstart;
867 1.40 eeh }
868 1.40 eeh DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: "
869 1.40 eeh "seg %d start %lx size %lx\n", i,
870 1.61 martin (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len));
871 1.40 eeh map->dm_segs[i].ds_len = sgend - sgstart + 1;
872 1.9 eeh
873 1.83 ad TAILQ_FOREACH(pg, pglist, pageq.queue) {
874 1.9 eeh if (sgsize == 0)
875 1.9 eeh panic("iommu_dmamap_load_raw: size botch");
876 1.58 chs pa = VM_PAGE_TO_PHYS(pg);
877 1.9 eeh
878 1.22 mrg DPRINTF(IDB_BUSDMA,
879 1.9 eeh ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n",
880 1.9 eeh map, (long)dvmaddr, (long)(pa)));
881 1.55 eeh iommu_enter(sb, dvmaddr, pa, flags|0x8000);
882 1.58 chs
883 1.9 eeh dvmaddr += pagesz;
884 1.9 eeh sgsize -= pagesz;
885 1.9 eeh }
886 1.40 eeh map->dm_mapsize = size;
887 1.40 eeh map->dm_nsegs = i+1;
888 1.45 eeh #ifdef DIAGNOSTIC
889 1.45 eeh { int seg;
890 1.45 eeh for (seg = 0; seg < map->dm_nsegs; seg++) {
891 1.45 eeh if (map->dm_segs[seg].ds_addr < is->is_dvmabase ||
892 1.45 eeh map->dm_segs[seg].ds_addr > is->is_dvmaend) {
893 1.45 eeh printf("seg %d dvmaddr %lx out of range %x - %x\n",
894 1.58 chs seg, (long)map->dm_segs[seg].ds_addr,
895 1.45 eeh is->is_dvmabase, is->is_dvmaend);
896 1.57 chs #ifdef DDB
897 1.45 eeh Debugger();
898 1.57 chs #endif
899 1.45 eeh }
900 1.45 eeh }
901 1.45 eeh }
902 1.45 eeh #endif
903 1.9 eeh return (0);
904 1.7 mrg }
905 1.7 mrg
906 1.67 petrov
907 1.67 petrov /*
908 1.67 petrov * Flush an individual dma segment, returns non-zero if the streaming buffers
909 1.67 petrov * need flushing afterwards.
910 1.67 petrov */
911 1.67 petrov static int
912 1.67 petrov iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len)
913 1.67 petrov {
914 1.67 petrov vaddr_t vaend;
915 1.67 petrov struct iommu_state *is = sb->sb_is;
916 1.67 petrov
917 1.67 petrov #ifdef DIAGNOSTIC
918 1.67 petrov if (va < is->is_dvmabase || va > is->is_dvmaend)
919 1.67 petrov panic("invalid va: %llx", (long long)va);
920 1.67 petrov #endif
921 1.67 petrov
922 1.67 petrov if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) {
923 1.67 petrov DPRINTF(IDB_BUSDMA,
924 1.67 petrov ("iommu_dvmamap_sync_range: attempting to flush "
925 1.67 petrov "non-streaming entry\n"));
926 1.67 petrov return (0);
927 1.67 petrov }
928 1.67 petrov
929 1.87 nakayama vaend = round_page(va + len);
930 1.87 nakayama va = trunc_page(va);
931 1.67 petrov
932 1.67 petrov #ifdef DIAGNOSTIC
933 1.67 petrov if (va < is->is_dvmabase || vaend > is->is_dvmaend)
934 1.67 petrov panic("invalid va range: %llx to %llx (%x to %x)",
935 1.67 petrov (long long)va, (long long)vaend,
936 1.67 petrov is->is_dvmabase,
937 1.67 petrov is->is_dvmaend);
938 1.67 petrov #endif
939 1.67 petrov
940 1.67 petrov for ( ; va <= vaend; va += PAGE_SIZE) {
941 1.67 petrov DPRINTF(IDB_BUSDMA,
942 1.67 petrov ("iommu_dvmamap_sync_range: flushing va %p\n",
943 1.67 petrov (void *)(u_long)va));
944 1.67 petrov iommu_strbuf_flush(sb, va);
945 1.67 petrov }
946 1.67 petrov
947 1.67 petrov return (1);
948 1.67 petrov }
949 1.67 petrov
950 1.85 nakayama static void
951 1.85 nakayama _iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
952 1.85 nakayama bus_size_t len, int ops)
953 1.7 mrg {
954 1.85 nakayama struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie;
955 1.67 petrov bus_size_t count;
956 1.67 petrov int i, needsflush = 0;
957 1.63 petrov
958 1.63 petrov if (!sb->sb_flush)
959 1.63 petrov return;
960 1.7 mrg
961 1.67 petrov for (i = 0; i < map->dm_nsegs; i++) {
962 1.67 petrov if (offset < map->dm_segs[i].ds_len)
963 1.67 petrov break;
964 1.67 petrov offset -= map->dm_segs[i].ds_len;
965 1.67 petrov }
966 1.60 petrov
967 1.67 petrov if (i == map->dm_nsegs)
968 1.68 martin panic("iommu_dvmamap_sync: segment too short %llu",
969 1.68 martin (unsigned long long)offset);
970 1.60 petrov
971 1.62 petrov if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) {
972 1.60 petrov /* Nothing to do */;
973 1.60 petrov }
974 1.60 petrov
975 1.62 petrov if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) {
976 1.67 petrov
977 1.67 petrov for (; len > 0 && i < map->dm_nsegs; i++) {
978 1.67 petrov count = MIN(map->dm_segs[i].ds_len - offset, len);
979 1.67 petrov if (count > 0 &&
980 1.67 petrov iommu_dvmamap_sync_range(sb,
981 1.67 petrov map->dm_segs[i].ds_addr + offset, count))
982 1.67 petrov needsflush = 1;
983 1.67 petrov offset = 0;
984 1.67 petrov len -= count;
985 1.67 petrov }
986 1.60 petrov #ifdef DIAGNOSTIC
987 1.67 petrov if (i == map->dm_nsegs && len > 0)
988 1.73 nakayama panic("iommu_dvmamap_sync: leftover %llu",
989 1.73 nakayama (unsigned long long)len);
990 1.60 petrov #endif
991 1.55 eeh
992 1.67 petrov if (needsflush)
993 1.58 chs iommu_strbuf_flush_done(sb);
994 1.7 mrg }
995 1.7 mrg }
996 1.7 mrg
997 1.85 nakayama void
998 1.85 nakayama iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
999 1.85 nakayama bus_size_t len, int ops)
1000 1.85 nakayama {
1001 1.85 nakayama
1002 1.85 nakayama if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
1003 1.85 nakayama /* Flush the CPU then the IOMMU */
1004 1.85 nakayama bus_dmamap_sync(t->_parent, map, offset, len, ops);
1005 1.85 nakayama _iommu_dvmamap_sync(t, map, offset, len, ops);
1006 1.85 nakayama }
1007 1.85 nakayama if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) {
1008 1.85 nakayama /* Flush the IOMMU then the CPU */
1009 1.85 nakayama _iommu_dvmamap_sync(t, map, offset, len, ops);
1010 1.85 nakayama bus_dmamap_sync(t->_parent, map, offset, len, ops);
1011 1.85 nakayama }
1012 1.85 nakayama }
1013 1.85 nakayama
1014 1.7 mrg int
1015 1.85 nakayama iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1016 1.85 nakayama bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1017 1.85 nakayama int flags)
1018 1.7 mrg {
1019 1.7 mrg
1020 1.25 mrg DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx "
1021 1.25 mrg "segp %p flags %d\n", (unsigned long long)size,
1022 1.25 mrg (unsigned long long)alignment, (unsigned long long)boundary,
1023 1.25 mrg segs, flags));
1024 1.7 mrg return (bus_dmamem_alloc(t->_parent, size, alignment, boundary,
1025 1.21 eeh segs, nsegs, rsegs, flags|BUS_DMA_DVMA));
1026 1.7 mrg }
1027 1.7 mrg
1028 1.7 mrg void
1029 1.85 nakayama iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1030 1.7 mrg {
1031 1.7 mrg
1032 1.22 mrg DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n",
1033 1.7 mrg segs, nsegs));
1034 1.7 mrg bus_dmamem_free(t->_parent, segs, nsegs);
1035 1.7 mrg }
1036 1.7 mrg
1037 1.7 mrg /*
1038 1.7 mrg * Map the DVMA mappings into the kernel pmap.
1039 1.7 mrg * Check the flags to see whether we're streaming or coherent.
1040 1.7 mrg */
1041 1.7 mrg int
1042 1.85 nakayama iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1043 1.85 nakayama size_t size, void **kvap, int flags)
1044 1.7 mrg {
1045 1.58 chs struct vm_page *pg;
1046 1.7 mrg vaddr_t va;
1047 1.7 mrg bus_addr_t addr;
1048 1.58 chs struct pglist *pglist;
1049 1.8 mrg int cbit;
1050 1.77 yamt const uvm_flag_t kmflags =
1051 1.77 yamt (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1052 1.7 mrg
1053 1.22 mrg DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
1054 1.7 mrg segs, nsegs, size));
1055 1.7 mrg
1056 1.7 mrg /*
1057 1.8 mrg * Allocate some space in the kernel map, and then map these pages
1058 1.8 mrg * into this space.
1059 1.7 mrg */
1060 1.8 mrg size = round_page(size);
1061 1.77 yamt va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1062 1.8 mrg if (va == 0)
1063 1.8 mrg return (ENOMEM);
1064 1.7 mrg
1065 1.81 christos *kvap = (void *)va;
1066 1.7 mrg
1067 1.58 chs /*
1068 1.7 mrg * digest flags:
1069 1.7 mrg */
1070 1.7 mrg cbit = 0;
1071 1.7 mrg if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1072 1.7 mrg cbit |= PMAP_NVC;
1073 1.7 mrg if (flags & BUS_DMA_NOCACHE) /* sideffects */
1074 1.7 mrg cbit |= PMAP_NC;
1075 1.7 mrg
1076 1.7 mrg /*
1077 1.8 mrg * Now take this and map it into the CPU.
1078 1.7 mrg */
1079 1.58 chs pglist = segs[0]._ds_mlist;
1080 1.83 ad TAILQ_FOREACH(pg, pglist, pageq.queue) {
1081 1.8 mrg #ifdef DIAGNOSTIC
1082 1.7 mrg if (size == 0)
1083 1.7 mrg panic("iommu_dvmamem_map: size botch");
1084 1.8 mrg #endif
1085 1.58 chs addr = VM_PAGE_TO_PHYS(pg);
1086 1.22 mrg DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
1087 1.25 mrg "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit));
1088 1.58 chs pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE);
1089 1.7 mrg va += PAGE_SIZE;
1090 1.7 mrg size -= PAGE_SIZE;
1091 1.7 mrg }
1092 1.38 chris pmap_update(pmap_kernel());
1093 1.7 mrg return (0);
1094 1.7 mrg }
1095 1.7 mrg
1096 1.7 mrg /*
1097 1.7 mrg * Unmap DVMA mappings from kernel
1098 1.7 mrg */
1099 1.7 mrg void
1100 1.85 nakayama iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1101 1.7 mrg {
1102 1.58 chs
1103 1.22 mrg DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
1104 1.7 mrg kva, size));
1105 1.58 chs
1106 1.7 mrg #ifdef DIAGNOSTIC
1107 1.7 mrg if ((u_long)kva & PGOFSET)
1108 1.7 mrg panic("iommu_dvmamem_unmap");
1109 1.7 mrg #endif
1110 1.58 chs
1111 1.7 mrg size = round_page(size);
1112 1.58 chs pmap_kremove((vaddr_t)kva, size);
1113 1.38 chris pmap_update(pmap_kernel());
1114 1.76 yamt uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1115 1.1 mrg }
1116