sbus.c revision 1.17 1 /* $NetBSD: sbus.c,v 1.17 1999/06/05 05:30:43 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/malloc.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <vm/vm.h>
94
95 #include <machine/bus.h>
96 #include <sparc64/sparc64/vaddrs.h>
97 #include <sparc64/dev/iommureg.h>
98 #include <sparc64/dev/iommuvar.h>
99 #include <sparc64/dev/sbusreg.h>
100 #include <dev/sbus/sbusvar.h>
101
102 #include <machine/autoconf.h>
103 #include <machine/ctlreg.h>
104 #include <machine/cpu.h>
105 #include <machine/sparc64.h>
106
107 #ifdef DEBUG
108 #define SDB_DVMA 0x1
109 #define SDB_INTR 0x2
110 int sbusdebug = 0;
111 #endif
112
113 void sbusreset __P((int));
114 int sbus_flush __P((struct sbus_softc *));
115
116 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
117 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
118 static int sbus_get_intr __P((struct sbus_softc *, int,
119 struct sbus_intr **, int *));
120 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
121 int, bus_space_handle_t *));
122 static int _sbus_bus_map __P((
123 bus_space_tag_t,
124 bus_type_t,
125 bus_addr_t, /*offset*/
126 bus_size_t, /*size*/
127 int, /*flags*/
128 vaddr_t, /*preferred virtual address */
129 bus_space_handle_t *));
130 static void *sbus_intr_establish __P((
131 bus_space_tag_t,
132 int, /*level*/
133 int, /*flags*/
134 int (*) __P((void *)), /*handler*/
135 void *)); /*handler arg*/
136
137
138 /* autoconfiguration driver */
139 int sbus_match __P((struct device *, struct cfdata *, void *));
140 void sbus_attach __P((struct device *, struct device *, void *));
141
142
143 struct cfattach sbus_ca = {
144 sizeof(struct sbus_softc), sbus_match, sbus_attach
145 };
146
147 extern struct cfdriver sbus_cd;
148
149 /*
150 * DVMA routines
151 */
152 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
153 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
154 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
155 bus_size_t, struct proc *, int));
156 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
157 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
158 bus_size_t, int));
159 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
160 bus_size_t alignment, bus_size_t boundary,
161 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
162 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
163 int nsegs));
164 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
165 int nsegs, size_t size, caddr_t *kvap, int flags));
166 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
167 size_t size));
168
169
170 /*
171 * Child devices receive the Sbus interrupt level in their attach
172 * arguments. We translate these to CPU IPLs using the following
173 * tables. Note: obio bus interrupt levels are identical to the
174 * processor IPL.
175 *
176 * The second set of tables is used when the Sbus interrupt level
177 * cannot be had from the PROM as an `interrupt' property. We then
178 * fall back on the `intr' property which contains the CPU IPL.
179 */
180
181 /* Translate Sbus interrupt level to processor IPL */
182 static int intr_sbus2ipl_4c[] = {
183 0, 1, 2, 3, 5, 7, 8, 9
184 };
185 static int intr_sbus2ipl_4m[] = {
186 0, 2, 3, 5, 7, 9, 11, 13
187 };
188
189 /*
190 * This value is or'ed into the attach args' interrupt level cookie
191 * if the interrupt level comes from an `intr' property, i.e. it is
192 * not an Sbus interrupt level.
193 */
194 #define SBUS_INTR_COMPAT 0x80000000
195
196
197 /*
198 * Print the location of some sbus-attached device (called just
199 * before attaching that device). If `sbus' is not NULL, the
200 * device was found but not configured; print the sbus as well.
201 * Return UNCONF (config_find ignores this if the device was configured).
202 */
203 int
204 sbus_print(args, busname)
205 void *args;
206 const char *busname;
207 {
208 struct sbus_attach_args *sa = args;
209 int i;
210
211 if (busname)
212 printf("%s at %s", sa->sa_name, busname);
213 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
214 (u_long)sa->sa_offset);
215 for (i=0; i<sa->sa_nintr; i++) {
216 struct sbus_intr *sbi = &sa->sa_intr[i];
217
218 printf(" vector %lx ipl %ld",
219 (u_long)sbi->sbi_vec,
220 (long)INTLEV(sbi->sbi_pri));
221 }
222 return (UNCONF);
223 }
224
225 int
226 sbus_match(parent, cf, aux)
227 struct device *parent;
228 struct cfdata *cf;
229 void *aux;
230 {
231 struct mainbus_attach_args *ma = aux;
232
233 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
234 }
235
236 /*
237 * Attach an Sbus.
238 */
239 void
240 sbus_attach(parent, self, aux)
241 struct device *parent;
242 struct device *self;
243 void *aux;
244 {
245 struct sbus_softc *sc = (struct sbus_softc *)self;
246 struct mainbus_attach_args *ma = aux;
247 int node = ma->ma_node;
248
249 int node0, error;
250 bus_space_tag_t sbt;
251 struct sbus_attach_args sa;
252 char *busname = "sbus";
253 struct bootpath *bp = ma->ma_bp;
254
255
256 sc->sc_bustag = ma->ma_bustag;
257 sc->sc_dmatag = ma->ma_dmatag;
258 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
259 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
260
261 /* Setup interrupt translation tables */
262 sc->sc_intr2ipl = CPU_ISSUN4C
263 ? intr_sbus2ipl_4c
264 : intr_sbus2ipl_4m;
265
266 /*
267 * Record clock frequency for synchronous SCSI.
268 * IS THIS THE CORRECT DEFAULT??
269 */
270 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
271 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
272
273 sbt = sbus_alloc_bustag(sc);
274 sc->sc_dmatag = sbus_alloc_dmatag(sc);
275
276 /*
277 * Get the SBus burst transfer size if burst transfers are supported
278 */
279 sc->sc_burst = getpropint(node, "burst-sizes", 0);
280
281 /* Propagate bootpath */
282 if (bp != NULL && strcmp(bp->name, busname) == 0)
283 bp++;
284 else
285 bp = NULL;
286
287 /*
288 * Collect address translations from the OBP.
289 */
290 error = getprop(node, "ranges", sizeof(struct sbus_range),
291 &sc->sc_nrange, (void **)&sc->sc_range);
292 if (error)
293 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
294
295 /* initailise the IOMMU */
296
297 /* punch in our copies */
298 sc->sc_is.is_bustag = sc->sc_bustag;
299 sc->sc_is.is_iommu = &sc->sc_sysio->sys_iommu;
300 sc->sc_is.is_sb = &sc->sc_sysio->sys_strbuf;
301
302 #ifdef DEBUG
303 if (sbusdebug & SDB_DVMA)
304 printf("sysio base %p phys %p\n",
305 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio));
306 #endif
307
308 /* XXX should have instance number */
309 iommu_init("SBus dvma", &sc->sc_is, 0);
310
311 /*
312 * Loop through ROM children, fixing any relative addresses
313 * and then configuring each device.
314 * `specials' is an array of device names that are treated
315 * specially:
316 */
317 node0 = firstchild(node);
318 for (node = node0; node; node = nextsibling(node)) {
319 char *name = getpropstring(node, "name");
320
321 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
322 node, bp, &sa) != 0) {
323 printf("sbus_attach: %s: incomplete\n", name);
324 continue;
325 }
326 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
327 sbus_destroy_attach_args(&sa);
328 }
329 }
330
331 int
332 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
333 struct sbus_softc *sc;
334 bus_space_tag_t bustag;
335 bus_dma_tag_t dmatag;
336 int node;
337 struct bootpath *bp;
338 struct sbus_attach_args *sa;
339 {
340 /*struct sbus_reg sbusreg;*/
341 /*int base;*/
342 int error;
343 int n;
344
345 bzero(sa, sizeof(struct sbus_attach_args));
346 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
347 if (error != 0)
348 return (error);
349 sa->sa_name[n] = '\0';
350
351 sa->sa_bustag = bustag;
352 sa->sa_dmatag = dmatag;
353 sa->sa_node = node;
354 sa->sa_bp = bp;
355
356 error = getprop(node, "reg", sizeof(struct sbus_reg),
357 &sa->sa_nreg, (void **)&sa->sa_reg);
358 if (error != 0) {
359 char buf[32];
360 if (error != ENOENT ||
361 !node_has_property(node, "device_type") ||
362 strcmp(getpropstringA(node, "device_type", buf),
363 "hierarchical") != 0)
364 return (error);
365 }
366 for (n = 0; n < sa->sa_nreg; n++) {
367 /* Convert to relative addressing, if necessary */
368 u_int32_t base = sa->sa_reg[n].sbr_offset;
369 if (SBUS_ABS(base)) {
370 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
371 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
372 }
373 }
374
375 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
376 return (error);
377
378 error = getprop(node, "address", sizeof(u_int32_t),
379 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
380 if (error != 0 && error != ENOENT)
381 return (error);
382
383 return (0);
384 }
385
386 void
387 sbus_destroy_attach_args(sa)
388 struct sbus_attach_args *sa;
389 {
390 if (sa->sa_name != NULL)
391 free(sa->sa_name, M_DEVBUF);
392
393 if (sa->sa_nreg != 0)
394 free(sa->sa_reg, M_DEVBUF);
395
396 if (sa->sa_intr)
397 free(sa->sa_intr, M_DEVBUF);
398
399 if (sa->sa_promvaddrs)
400 free((void *)sa->sa_promvaddrs, M_DEVBUF);
401
402 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
403 }
404
405
406 int
407 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
408 bus_space_tag_t t;
409 bus_type_t btype;
410 bus_addr_t offset;
411 bus_size_t size;
412 int flags;
413 vaddr_t vaddr;
414 bus_space_handle_t *hp;
415 {
416 struct sbus_softc *sc = t->cookie;
417 int64_t slot = btype;
418 int i;
419
420 for (i = 0; i < sc->sc_nrange; i++) {
421 bus_addr_t paddr;
422
423 if (sc->sc_range[i].cspace != slot)
424 continue;
425
426 /* We've found the connection to the parent bus */
427 paddr = sc->sc_range[i].poffset + offset;
428 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
429 #ifdef DEBUG
430 if (sbusdebug & SDB_DVMA)
431 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
432 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
433 #endif
434 return (bus_space_map2(sc->sc_bustag, 0, paddr,
435 size, flags, vaddr, hp));
436 }
437
438 return (EINVAL);
439 }
440
441 int
442 sbus_bus_mmap(t, btype, paddr, flags, hp)
443 bus_space_tag_t t;
444 bus_type_t btype;
445 bus_addr_t paddr;
446 int flags;
447 bus_space_handle_t *hp;
448 {
449 bus_addr_t offset = paddr;
450 int slot = (paddr>>32);
451 struct sbus_softc *sc = t->cookie;
452 int i;
453
454 for (i = 0; i < sc->sc_nrange; i++) {
455 bus_addr_t paddr;
456
457 if (sc->sc_range[i].cspace != slot)
458 continue;
459
460 paddr = sc->sc_range[i].poffset + offset;
461 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
462 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
463 flags, hp));
464 }
465
466 return (-1);
467 }
468
469
470 /*
471 * Each attached device calls sbus_establish after it initializes
472 * its sbusdev portion.
473 */
474 void
475 sbus_establish(sd, dev)
476 register struct sbusdev *sd;
477 register struct device *dev;
478 {
479 register struct sbus_softc *sc;
480 register struct device *curdev;
481
482 /*
483 * We have to look for the sbus by name, since it is not necessarily
484 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
485 * We don't just use the device structure of the above-attached
486 * sbus, since we might (in the future) support multiple sbus's.
487 */
488 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
489 if (!curdev || !curdev->dv_xname)
490 panic("sbus_establish: can't find sbus parent for %s",
491 sd->sd_dev->dv_xname
492 ? sd->sd_dev->dv_xname
493 : "<unknown>" );
494
495 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
496 break;
497 }
498 sc = (struct sbus_softc *) curdev;
499
500 sd->sd_dev = dev;
501 sd->sd_bchain = sc->sc_sbdev;
502 sc->sc_sbdev = sd;
503 }
504
505 /*
506 * Reset the given sbus. (???)
507 */
508 void
509 sbusreset(sbus)
510 int sbus;
511 {
512 register struct sbusdev *sd;
513 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
514 struct device *dev;
515
516 printf("reset %s:", sc->sc_dev.dv_xname);
517 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
518 if (sd->sd_reset) {
519 dev = sd->sd_dev;
520 (*sd->sd_reset)(dev);
521 printf(" %s", dev->dv_xname);
522 }
523 }
524 /* Reload iommu regs */
525 iommu_reset(&sc->sc_is);
526 }
527
528 /*
529 * Here are the iommu control routines.
530 */
531 void
532 sbus_enter(sc, va, pa, flags)
533 struct sbus_softc *sc;
534 vaddr_t va;
535 int64_t pa;
536 int flags;
537 {
538 int64_t tte;
539
540 #ifdef DIAGNOSTIC
541 if (va < sc->sc_is.is_dvmabase)
542 panic("sbus_enter: va 0x%lx not in DVMA space",va);
543 #endif
544
545 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
546 !(flags&BUS_DMA_COHERENT));
547
548 /* Is the streamcache flush really needed? */
549 bus_space_write_8(sc->sc_bustag, &sc->sc_is.is_sb->strbuf_pgflush,
550 0, va);
551 sbus_flush(sc);
552 #ifdef DEBUG
553 if (sbusdebug & SDB_DVMA)
554 printf("Clearing TSB slot %d for va %p\n", (int)IOTSBSLOT(va,sc->sc_is.is_tsbsize), va);
555 #endif
556 sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)] = tte;
557 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
558 0, va);
559 #ifdef DEBUG
560 if (sbusdebug & SDB_DVMA)
561 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
562 va, (long)pa, IOTSBSLOT(va,sc->sc_is.is_tsbsize),
563 &sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)],
564 (long)tte);
565 #endif
566 }
567
568 /*
569 * sbus_clear: clears mappings created by sbus_enter
570 *
571 * Only demap from IOMMU if flag is set.
572 */
573 void
574 sbus_remove(sc, va, len)
575 struct sbus_softc *sc;
576 vaddr_t va;
577 size_t len;
578 {
579
580 #ifdef DIAGNOSTIC
581 if (va < sc->sc_is.is_dvmabase)
582 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
583 if ((long)(va + len) < (long)va)
584 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
585 (long) va, (long) len);
586 if (len & ~0xfffffff)
587 panic("sbus_remove: rediculous len 0x%lx", (long)len);
588 #endif
589
590 va = trunc_page(va);
591 while (len > 0) {
592
593 /*
594 * Streaming buffer flushes:
595 *
596 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
597 * If we're not on a cache line boundary (64-bits):
598 * 2 Store 0 in flag
599 * 3 Store pointer to flag in flushsync
600 * 4 wait till flushsync becomes 0x1
601 *
602 * If it takes more than .5 sec, something went wrong.
603 */
604 #ifdef DEBUG
605 if (sbusdebug & SDB_DVMA)
606 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
607 (long)va, (long)IOTSBSLOT(va,sc->sc_is.is_tsbsize),
608 (long)&sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)],
609 (long)(sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)]),
610 (u_long)len);
611 #endif
612 bus_space_write_8(sc->sc_bustag, &sc->sc_is.is_sb->strbuf_pgflush, 0, va);
613 if (len <= NBPG) {
614 sbus_flush(sc);
615 len = 0;
616 } else len -= NBPG;
617 #ifdef DEBUG
618 if (sbusdebug & SDB_DVMA)
619 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
620 (long)va, (long)IOTSBSLOT(va,sc->sc_is.is_tsbsize),
621 (long)&sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)],
622 (long)(sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)]),
623 (u_long)len);
624 #endif
625 sc->sc_is.is_tsb[IOTSBSLOT(va,sc->sc_is.is_tsbsize)] = 0;
626 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
627 va += NBPG;
628 }
629 }
630
631 int
632 sbus_flush(sc)
633 struct sbus_softc *sc;
634 {
635 struct timeval cur, flushtimeout;
636 struct iommu_state *is = &sc->sc_is;
637
638 #define BUMPTIME(t, usec) { \
639 register volatile struct timeval *tp = (t); \
640 register long us; \
641 \
642 tp->tv_usec = us = tp->tv_usec + (usec); \
643 if (us >= 1000000) { \
644 tp->tv_usec = us - 1000000; \
645 tp->tv_sec++; \
646 } \
647 }
648
649 is->is_flush = 0;
650 membar_sync();
651 bus_space_write_8(sc->sc_bustag, &is->is_sb->strbuf_flushsync, 0, is->is_flushpa);
652 membar_sync();
653
654 microtime(&flushtimeout);
655 cur = flushtimeout;
656 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
657
658 #ifdef DEBUG
659 if (sbusdebug & SDB_DVMA)
660 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx:%lx until = %lx:%lx\n",
661 (long)is->is_flush, (long)&is->is_flush,
662 (long)is->is_flushpa, cur.tv_sec, cur.tv_usec,
663 flushtimeout.tv_sec, flushtimeout.tv_usec);
664 #endif
665 /* Bypass non-coherent D$ */
666 while (!ldxa(is->is_flushpa, ASI_PHYS_CACHED) &&
667 ((cur.tv_sec <= flushtimeout.tv_sec) &&
668 (cur.tv_usec <= flushtimeout.tv_usec)))
669 microtime(&cur);
670
671 #ifdef DIAGNOSTIC
672 if (!is->is_flush) {
673 printf("sbus_flush: flush timeout %p at %p\n", (long)is->is_flush,
674 (long)is->is_flushpa); /* panic? */
675 #ifdef DDB
676 Debugger();
677 #endif
678 }
679 #endif
680 #ifdef DEBUG
681 if (sbusdebug & SDB_DVMA)
682 printf("sbus_flush: flushed\n");
683 #endif
684 return (is->is_flush);
685 }
686
687 /*
688 * Get interrupt attributes for an Sbus device.
689 */
690 int
691 sbus_get_intr(sc, node, ipp, np)
692 struct sbus_softc *sc;
693 int node;
694 struct sbus_intr **ipp;
695 int *np;
696 {
697 int *ipl;
698 int i, n, error;
699 char buf[32];
700
701 /*
702 * The `interrupts' property contains the Sbus interrupt level.
703 */
704 ipl = NULL;
705 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
706 /* Change format to an `struct sbus_intr' array */
707 struct sbus_intr *ip;
708 /* Default to interrupt level 2 -- otherwise unused */
709 int pri = INTLEVENCODE(2);
710 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
711 if (ip == NULL)
712 return (ENOMEM);
713 /* Now things get ugly. We need to take this value which is
714 * the interrupt vector number and encode the IPL into it
715 * somehow. Luckily, the interrupt vector has lots of free
716 * space and we can easily stuff the IPL in there for a while.
717 */
718 getpropstringA(node, "device_type", buf);
719 if (!buf[0]) {
720 getpropstringA(node, "name", buf);
721 }
722 for (i=0; intrmap[i].in_class; i++) {
723 if (strcmp(intrmap[i].in_class, buf) == 0) {
724 pri = INTLEVENCODE(intrmap[i].in_lev);
725 break;
726 }
727 }
728 for (n = 0; n < *np; n++) {
729 /*
730 * We encode vector and priority into sbi_pri so we
731 * can pass them as a unit. This will go away if
732 * sbus_establish ever takes an sbus_intr instead
733 * of an integer level.
734 * Stuff the real vector in sbi_vec.
735 */
736 ip[n].sbi_pri = pri|ipl[n];
737 ip[n].sbi_vec = ipl[n];
738 }
739 free(ipl, M_DEVBUF);
740 *ipp = ip;
741 return (0);
742 }
743
744 /* We really don't support the following */
745 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
746 /* And some devices don't even have interrupts */
747 /*
748 * Fall back on `intr' property.
749 */
750 *ipp = NULL;
751 error = getprop(node, "intr", sizeof(struct sbus_intr),
752 np, (void **)ipp);
753 switch (error) {
754 case 0:
755 for (n = *np; n-- > 0;) {
756 /*
757 * Move the interrupt vector into place.
758 * We could remap the level, but the SBUS priorities
759 * are probably good enough.
760 */
761 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
762 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
763 }
764 break;
765 case ENOENT:
766 error = 0;
767 break;
768 }
769
770 return (error);
771 }
772
773
774 /*
775 * Install an interrupt handler for an Sbus device.
776 */
777 void *
778 sbus_intr_establish(t, level, flags, handler, arg)
779 bus_space_tag_t t;
780 int level;
781 int flags;
782 int (*handler) __P((void *));
783 void *arg;
784 {
785 struct sbus_softc *sc = t->cookie;
786 struct intrhand *ih;
787 int ipl;
788 long vec = level;
789
790 ih = (struct intrhand *)
791 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
792 if (ih == NULL)
793 return (NULL);
794
795 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
796 ipl = vec;
797 else if ((vec & SBUS_INTR_COMPAT) != 0)
798 ipl = vec & ~SBUS_INTR_COMPAT;
799 else {
800 /* Decode and remove IPL */
801 ipl = INTLEV(vec);
802 vec = INTVEC(vec);
803 #ifdef DEBUG
804 if (sbusdebug & SDB_INTR) {
805 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
806 intrlev[vec]);
807 printf("Hunting for IRQ...\n");
808 }
809 #endif
810 if ((vec & INTMAP_OBIO) == 0) {
811 /* We're in an SBUS slot */
812 /* Register the map and clear intr registers */
813 #ifdef DEBUG
814 if (sbusdebug & SDB_INTR) {
815 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
816 int64_t intrmap = *intrptr;
817
818 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
819 (long)vec, (long)intrmap,
820 (long)INTSLOT(vec));
821 }
822 #endif
823 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
824 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
825 /* Enable the interrupt */
826 vec |= INTMAP_V;
827 /* Insert IGN */
828 vec |= sc->sc_ign;
829 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
830 } else {
831 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
832 int64_t intrmap = 0;
833 int i;
834
835 /* Insert IGN */
836 vec |= sc->sc_ign;
837 for (i=0;
838 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
839 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
840 i++);
841 if (INTVEC(intrmap) == INTVEC(vec)) {
842 #ifdef DEBUG
843 if (sbusdebug & SDB_INTR)
844 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
845 vec, (long)intrmap, i);
846 #endif
847 /* Register the map and clear intr registers */
848 ih->ih_map = &intrptr[i];
849 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
850 ih->ih_clr = &intrptr[i];
851 /* Enable the interrupt */
852 intrmap |= INTMAP_V;
853 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
854 } else panic("IRQ not found!");
855 }
856 }
857 #ifdef DEBUG
858 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
859 #endif
860
861 ih->ih_fun = handler;
862 ih->ih_arg = arg;
863 ih->ih_number = vec;
864 ih->ih_pil = (1<<ipl);
865 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
866 intr_fasttrap(ipl, (void (*)__P((void)))handler);
867 else
868 intr_establish(ipl, ih);
869 return (ih);
870 }
871
872 static bus_space_tag_t
873 sbus_alloc_bustag(sc)
874 struct sbus_softc *sc;
875 {
876 bus_space_tag_t sbt;
877
878 sbt = (bus_space_tag_t)
879 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
880 if (sbt == NULL)
881 return (NULL);
882
883 bzero(sbt, sizeof *sbt);
884 sbt->cookie = sc;
885 sbt->parent = sc->sc_bustag;
886 sbt->type = SBUS_BUS_SPACE;
887 sbt->sparc_bus_map = _sbus_bus_map;
888 sbt->sparc_bus_mmap = sbus_bus_mmap;
889 sbt->sparc_intr_establish = sbus_intr_establish;
890 return (sbt);
891 }
892
893
894 static bus_dma_tag_t
895 sbus_alloc_dmatag(sc)
896 struct sbus_softc *sc;
897 {
898 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
899
900 sdt = (bus_dma_tag_t)
901 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
902 if (sdt == NULL)
903 /* Panic? */
904 return (psdt);
905
906 sdt->_cookie = sc;
907 sdt->_parent = psdt;
908 #define PCOPY(x) sdt->x = psdt->x
909 PCOPY(_dmamap_create);
910 PCOPY(_dmamap_destroy);
911 sdt->_dmamap_load = sbus_dmamap_load;
912 PCOPY(_dmamap_load_mbuf);
913 PCOPY(_dmamap_load_uio);
914 PCOPY(_dmamap_load_raw);
915 sdt->_dmamap_unload = sbus_dmamap_unload;
916 sdt->_dmamap_sync = sbus_dmamap_sync;
917 sdt->_dmamem_alloc = sbus_dmamem_alloc;
918 sdt->_dmamem_free = sbus_dmamem_free;
919 sdt->_dmamem_map = sbus_dmamem_map;
920 sdt->_dmamem_unmap = sbus_dmamem_unmap;
921 PCOPY(_dmamem_mmap);
922 #undef PCOPY
923 sc->sc_dmatag = sdt;
924 return (sdt);
925 }
926
927 int
928 sbus_dmamap_load(t, map, buf, buflen, p, flags)
929 bus_dma_tag_t t;
930 bus_dmamap_t map;
931 void *buf;
932 bus_size_t buflen;
933 struct proc *p;
934 int flags;
935 {
936 int err, s;
937 bus_size_t sgsize;
938 paddr_t curaddr;
939 u_long dvmaddr;
940 vaddr_t vaddr = (vaddr_t)buf;
941 pmap_t pmap;
942 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
943
944 if (map->dm_nsegs) {
945 /* Already in use?? */
946 #ifdef DIAGNOSTIC
947 printf("sbus_dmamap_load: map still in use\n");
948 #endif
949 bus_dmamap_unload(t, map);
950 }
951 #if 1
952 /*
953 * Make sure that on error condition we return "no valid mappings".
954 */
955 map->dm_nsegs = 0;
956
957 if (buflen > map->_dm_size)
958 #ifdef DEBUG
959 {
960 printf("sbus_dmamap_load(): error %d > %d -- map size exceeded!\n", buflen, map->_dm_size);
961 Debugger();
962 return (EINVAL);
963 }
964 #else
965 return (EINVAL);
966 #endif
967 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
968
969 /*
970 * XXX Need to implement "don't dma across this boundry".
971 */
972
973 s = splhigh();
974 err = extent_alloc(sc->sc_is.is_dvmamap, sgsize, NBPG,
975 map->_dm_boundary, EX_NOWAIT, (u_long *)&dvmaddr);
976 splx(s);
977
978 if (err != 0)
979 return (err);
980
981 #ifdef DEBUG
982 if (dvmaddr == (bus_addr_t)-1)
983 {
984 printf("sbus_dmamap_load(): dvmamap_alloc(%d, %x) failed!\n", sgsize, flags);
985 Debugger();
986 }
987 #endif
988 if (dvmaddr == (bus_addr_t)-1)
989 return (ENOMEM);
990
991 /*
992 * We always use just one segment.
993 */
994 map->dm_mapsize = buflen;
995 map->dm_nsegs = 1;
996 map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
997 map->dm_segs[0].ds_len = sgsize;
998
999 #else
1000 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1001 return (err);
1002 #endif
1003 if (p != NULL)
1004 pmap = p->p_vmspace->vm_map.pmap;
1005 else
1006 pmap = pmap_kernel();
1007
1008 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1009 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1010 for (; buflen > 0; ) {
1011 /*
1012 * Get the physical address for this page.
1013 */
1014 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1015 bus_dmamap_unload(t, map);
1016 return (-1);
1017 }
1018
1019 /*
1020 * Compute the segment size, and adjust counts.
1021 */
1022 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1023 if (buflen < sgsize)
1024 sgsize = buflen;
1025
1026 #ifdef DEBUG
1027 if (sbusdebug & SDB_DVMA)
1028 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1029 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1030 #endif
1031 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1032
1033 dvmaddr += PAGE_SIZE;
1034 vaddr += sgsize;
1035 buflen -= sgsize;
1036 }
1037 return (0);
1038 }
1039
1040 void
1041 sbus_dmamap_unload(t, map)
1042 bus_dma_tag_t t;
1043 bus_dmamap_t map;
1044 {
1045 vaddr_t addr;
1046 int len, error, s;
1047 bus_addr_t dvmaddr;
1048 bus_size_t sgsize;
1049 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1050
1051 if (map->dm_nsegs != 1)
1052 panic("sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1053
1054 addr = trunc_page(map->dm_segs[0].ds_addr);
1055 len = map->dm_segs[0].ds_len;
1056
1057 #ifdef DEBUG
1058 if (sbusdebug & SDB_DVMA)
1059 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1060 map, (long)addr, (long)len);
1061 #endif
1062 sbus_remove(sc, addr, len);
1063 #if 1
1064 dvmaddr = (map->dm_segs[0].ds_addr & ~PGOFSET);
1065 sgsize = map->dm_segs[0].ds_len;
1066
1067 /* Mark the mappings as invalid. */
1068 map->dm_mapsize = 0;
1069 map->dm_nsegs = 0;
1070
1071 /* Unmapping is bus dependent */
1072 s = splhigh();
1073 error = extent_free(sc->sc_is.is_dvmamap, dvmaddr, sgsize, EX_NOWAIT);
1074 splx(s);
1075 if (error != 0)
1076 printf("warning: %ld of DVMA space lost\n", (long)sgsize);
1077
1078 cache_flush((caddr_t)dvmaddr, (u_int) sgsize);
1079 #else
1080 bus_dmamap_unload(t->_parent, map);
1081 #endif
1082 }
1083
1084
1085 void
1086 sbus_dmamap_sync(t, map, offset, len, ops)
1087 bus_dma_tag_t t;
1088 bus_dmamap_t map;
1089 bus_addr_t offset;
1090 bus_size_t len;
1091 int ops;
1092 {
1093 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1094 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1095
1096 /*
1097 * We only support one DMA segment; supporting more makes this code
1098 * too unweildy.
1099 */
1100
1101 if (ops&BUS_DMASYNC_PREREAD) {
1102 #ifdef DEBUG
1103 if (sbusdebug & SDB_DVMA)
1104 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1105 (long)va, (u_long)len);
1106 #endif
1107
1108 /* Nothing to do */;
1109 }
1110 if (ops&BUS_DMASYNC_POSTREAD) {
1111 /*
1112 * We should sync the IOMMU streaming caches here first.
1113 */
1114 #ifdef DEBUG
1115 if (sbusdebug & SDB_DVMA)
1116 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1117 (long)va, (u_long)len);
1118 #endif
1119 while (len > 0) {
1120
1121 /*
1122 * Streaming buffer flushes:
1123 *
1124 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1125 * If we're not on a cache line boundary (64-bits):
1126 * 2 Store 0 in flag
1127 * 3 Store pointer to flag in flushsync
1128 * 4 wait till flushsync becomes 0x1
1129 *
1130 * If it takes more than .5 sec, something went wrong.
1131 */
1132 #ifdef DEBUG
1133 if (sbusdebug & SDB_DVMA)
1134 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1135 (long)va, (u_long)len);
1136 #endif
1137 bus_space_write_8(sc->sc_bustag, &sc->sc_is.is_sb->strbuf_pgflush, 0, va);
1138 if (len <= NBPG) {
1139 sbus_flush(sc);
1140 len = 0;
1141 } else
1142 len -= NBPG;
1143 va += NBPG;
1144 }
1145 }
1146 if (ops&BUS_DMASYNC_PREWRITE) {
1147 #ifdef DEBUG
1148 if (sbusdebug & SDB_DVMA)
1149 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1150 (long)va, (u_long)len);
1151 #endif
1152 /* Nothing to do */;
1153 }
1154 if (ops&BUS_DMASYNC_POSTWRITE) {
1155 #ifdef DEBUG
1156 if (sbusdebug & SDB_DVMA)
1157 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1158 (long)va, (u_long)len);
1159 #endif
1160 /* Nothing to do */;
1161 }
1162 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1163 }
1164
1165
1166 /*
1167 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1168 */
1169 int
1170 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1171 bus_dma_tag_t t;
1172 bus_size_t size, alignment, boundary;
1173 bus_dma_segment_t *segs;
1174 int nsegs;
1175 int *rsegs;
1176 int flags;
1177 {
1178 paddr_t curaddr;
1179 u_long dvmaddr;
1180 vm_page_t m;
1181 struct pglist *mlist;
1182 int error;
1183 int n, s;
1184 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1185
1186 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1187 boundary, segs, nsegs, rsegs, flags)))
1188 return (error);
1189
1190 /*
1191 * Allocate a DVMA mapping for our new memory.
1192 */
1193 for (n = 0; n < *rsegs; n++) {
1194 #if 1
1195 s = splhigh();
1196 if (extent_alloc(sc->sc_is.is_dvmamap, segs[0].ds_len, alignment,
1197 boundary, EX_NOWAIT, (u_long *)&dvmaddr)) {
1198 splx(s);
1199 /* Free what we got and exit */
1200 bus_dmamem_free(t->_parent, segs, nsegs);
1201 return (ENOMEM);
1202 }
1203 splx(s);
1204 #else
1205 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1206 if (dvmaddr == (bus_addr_t)-1) {
1207 /* Free what we got and exit */
1208 bus_dmamem_free(t->_parent, segs, nsegs);
1209 return (ENOMEM);
1210 }
1211 #endif
1212 segs[n].ds_addr = dvmaddr;
1213 size = segs[n].ds_len;
1214 mlist = segs[n]._ds_mlist;
1215
1216 /* Map memory into DVMA space */
1217 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1218 curaddr = VM_PAGE_TO_PHYS(m);
1219 #ifdef DEBUG
1220 if (sbusdebug & SDB_DVMA)
1221 printf("sbus_dmamem_alloc: map %p loading va %lx at pa %lx\n",
1222 (long)m, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1223 #endif
1224 sbus_enter(sc, dvmaddr, curaddr, flags);
1225 dvmaddr += PAGE_SIZE;
1226 }
1227 }
1228 return (0);
1229 }
1230
1231 void
1232 sbus_dmamem_free(t, segs, nsegs)
1233 bus_dma_tag_t t;
1234 bus_dma_segment_t *segs;
1235 int nsegs;
1236 {
1237 vaddr_t addr;
1238 int len;
1239 int n, s, error;
1240 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1241
1242
1243 for (n=0; n<nsegs; n++) {
1244 addr = segs[n].ds_addr;
1245 len = segs[n].ds_len;
1246 sbus_remove(sc, addr, len);
1247 #if 1
1248 s = splhigh();
1249 error = extent_free(sc->sc_is.is_dvmamap, addr, len, EX_NOWAIT);
1250 splx(s);
1251 if (error != 0)
1252 printf("warning: %ld of DVMA space lost\n", (long)len);
1253 #else
1254 dvmamap_free(addr, len);
1255 #endif
1256 }
1257 bus_dmamem_free(t->_parent, segs, nsegs);
1258 }
1259
1260 /*
1261 * Map the DVMA mappings into the kernel pmap.
1262 * Check the flags to see whether we're streaming or coherent.
1263 */
1264 int
1265 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1266 bus_dma_tag_t t;
1267 bus_dma_segment_t *segs;
1268 int nsegs;
1269 size_t size;
1270 caddr_t *kvap;
1271 int flags;
1272 {
1273 vm_page_t m;
1274 vaddr_t va;
1275 bus_addr_t addr;
1276 struct pglist *mlist;
1277 int cbit;
1278
1279 /*
1280 * digest flags:
1281 */
1282 cbit = 0;
1283 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1284 cbit |= PMAP_NVC;
1285 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1286 cbit |= PMAP_NC;
1287 /*
1288 * Now take this and map it into the CPU since it should already
1289 * be in the the IOMMU.
1290 */
1291 *kvap = (caddr_t)va = segs[0].ds_addr;
1292 mlist = segs[0]._ds_mlist;
1293 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1294
1295 if (size == 0)
1296 panic("_bus_dmamem_map: size botch");
1297
1298 addr = VM_PAGE_TO_PHYS(m);
1299 pmap_enter(pmap_kernel(), va, addr | cbit,
1300 VM_PROT_READ | VM_PROT_WRITE, TRUE,
1301 VM_PROT_READ | VM_PROT_WRITE);
1302 va += PAGE_SIZE;
1303 size -= PAGE_SIZE;
1304 }
1305
1306 return (0);
1307 }
1308
1309 /*
1310 * Unmap DVMA mappings from kernel
1311 */
1312 void
1313 sbus_dmamem_unmap(t, kva, size)
1314 bus_dma_tag_t t;
1315 caddr_t kva;
1316 size_t size;
1317 {
1318
1319 #ifdef DIAGNOSTIC
1320 if ((u_long)kva & PGOFSET)
1321 panic("_bus_dmamem_unmap");
1322 #endif
1323
1324 size = round_page(size);
1325 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1326 }
1327