sbus.c revision 1.8 1 /* $NetBSD: sbus.c,v 1.8 1998/09/05 23:57:24 eeh Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/malloc.h>
90 #include <sys/systm.h>
91 #include <sys/device.h>
92 #include <vm/vm.h>
93
94 #include <machine/bus.h>
95 #include <sparc64/sparc64/vaddrs.h>
96 #include <sparc64/dev/sbusreg.h>
97 #include <dev/sbus/sbusvar.h>
98
99 #include <machine/autoconf.h>
100 #include <machine/ctlreg.h>
101 #include <machine/cpu.h>
102 #include <machine/sparc64.h>
103
104 /* XXXXX -- Needed to allow dvma_mapin to work -- need to switch to bus_dma_* */
105 struct sbus_softc *sbus0;
106
107 #ifdef DEBUG
108 #define SDB_DVMA 0x1
109 #define SDB_INTR 0x2
110 int sbusdebug = 0;
111 #endif
112
113 void sbusreset __P((int));
114 int sbus_flush __P((struct sbus_softc *));
115
116 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
117 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
118 static int sbus_get_intr __P((struct sbus_softc *, int,
119 struct sbus_intr **, int *));
120 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
121 int, bus_space_handle_t *));
122 static int _sbus_bus_map __P((
123 bus_space_tag_t,
124 bus_type_t,
125 bus_addr_t, /*offset*/
126 bus_size_t, /*size*/
127 int, /*flags*/
128 vaddr_t, /*preferred virtual address */
129 bus_space_handle_t *));
130 static void *sbus_intr_establish __P((
131 bus_space_tag_t,
132 int, /*level*/
133 int, /*flags*/
134 int (*) __P((void *)), /*handler*/
135 void *)); /*handler arg*/
136
137
138 /* autoconfiguration driver */
139 int sbus_match __P((struct device *, struct cfdata *, void *));
140 void sbus_attach __P((struct device *, struct device *, void *));
141
142
143 struct cfattach sbus_ca = {
144 sizeof(struct sbus_softc), sbus_match, sbus_attach
145 };
146
147 extern struct cfdriver sbus_cd;
148
149 /*
150 * DVMA routines
151 */
152 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
153 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
154 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
155 bus_size_t, struct proc *, int));
156 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
157 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
158 bus_size_t, int));
159 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
160 bus_size_t alignment, bus_size_t boundary,
161 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
162 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
163 int nsegs));
164 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
165 int nsegs, size_t size, caddr_t *kvap, int flags));
166 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
167 size_t size));
168
169
170 /*
171 * Child devices receive the Sbus interrupt level in their attach
172 * arguments. We translate these to CPU IPLs using the following
173 * tables. Note: obio bus interrupt levels are identical to the
174 * processor IPL.
175 *
176 * The second set of tables is used when the Sbus interrupt level
177 * cannot be had from the PROM as an `interrupt' property. We then
178 * fall back on the `intr' property which contains the CPU IPL.
179 */
180
181 /* Translate Sbus interrupt level to processor IPL */
182 static int intr_sbus2ipl_4c[] = {
183 0, 1, 2, 3, 5, 7, 8, 9
184 };
185 static int intr_sbus2ipl_4m[] = {
186 0, 2, 3, 5, 7, 9, 11, 13
187 };
188
189 /*
190 * This value is or'ed into the attach args' interrupt level cookie
191 * if the interrupt level comes from an `intr' property, i.e. it is
192 * not an Sbus interrupt level.
193 */
194 #define SBUS_INTR_COMPAT 0x80000000
195
196
197 /*
198 * Print the location of some sbus-attached device (called just
199 * before attaching that device). If `sbus' is not NULL, the
200 * device was found but not configured; print the sbus as well.
201 * Return UNCONF (config_find ignores this if the device was configured).
202 */
203 int
204 sbus_print(args, busname)
205 void *args;
206 const char *busname;
207 {
208 struct sbus_attach_args *sa = args;
209 int i;
210
211 if (busname)
212 printf("%s at %s", sa->sa_name, busname);
213 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
214 (u_long)sa->sa_offset);
215 for (i=0; i<sa->sa_nintr; i++) {
216 struct sbus_intr *sbi = &sa->sa_intr[i];
217
218 printf(" vector %lx ipl %ld",
219 (u_long)sbi->sbi_vec,
220 (long)INTLEV(sbi->sbi_pri));
221 }
222 return (UNCONF);
223 }
224
225 int
226 sbus_match(parent, cf, aux)
227 struct device *parent;
228 struct cfdata *cf;
229 void *aux;
230 {
231 struct mainbus_attach_args *ma = aux;
232
233 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
234 }
235
236 /*
237 * Attach an Sbus.
238 */
239 void
240 sbus_attach(parent, self, aux)
241 struct device *parent;
242 struct device *self;
243 void *aux;
244 {
245 struct sbus_softc *sc = sbus0 = (struct sbus_softc *)self;
246 struct mainbus_attach_args *ma = aux;
247 int node = ma->ma_node;
248
249 int node0, error;
250 bus_space_tag_t sbt;
251 struct sbus_attach_args sa;
252 char *busname = "sbus";
253 struct bootpath *bp = ma->ma_bp;
254
255
256 sc->sc_bustag = ma->ma_bustag;
257 sc->sc_dmatag = ma->ma_dmatag;
258 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
259 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
260
261 /* Setup interrupt translation tables */
262 sc->sc_intr2ipl = CPU_ISSUN4C
263 ? intr_sbus2ipl_4c
264 : intr_sbus2ipl_4m;
265
266 /*
267 * Record clock frequency for synchronous SCSI.
268 * IS THIS THE CORRECT DEFAULT??
269 */
270 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
271 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
272
273 sbt = sbus_alloc_bustag(sc);
274 sc->sc_dmatag = sbus_alloc_dmatag(sc);
275
276 /*
277 * Get the SBus burst transfer size if burst transfers are supported
278 */
279 sc->sc_burst = getpropint(node, "burst-sizes", 0);
280
281 /* Propagate bootpath */
282 if (bp != NULL && strcmp(bp->name, busname) == 0)
283 bp++;
284 else
285 bp = NULL;
286
287 /*
288 * Collect address translations from the OBP.
289 */
290 error = getprop(node, "ranges", sizeof(struct sbus_range),
291 &sc->sc_nrange, (void **)&sc->sc_range);
292 switch (error) {
293 case 0:
294 break;
295 #if 0
296 case ENOENT:
297 /* Fall back to our own `range' construction */
298 sc->sc_range = sbus_translations;
299 sc->sc_nrange =
300 sizeof(sbus_translations)/sizeof(sbus_translations[0]);
301 break;
302 #endif
303 default:
304 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
305 }
306
307
308 /*
309 * Setup the iommu.
310 *
311 * The sun4u iommu is part of the SBUS controller so we will
312 * deal with it here. We could try to fake a device node so
313 * we can eventually share it with the PCI bus run by psyco,
314 * but I don't want to get into that sort of cruft.
315 */
316
317 /*
318 * All IOMMUs will share the same TSB which is allocated in pmap_bootstrap.
319 *
320 * This makes device management easier.
321 */
322 {
323 extern int64_t *iotsb;
324 extern paddr_t iotsbp;
325 extern int iotsbsize;
326
327 sc->sc_tsbsize = iotsbsize;
328 sc->sc_tsb = iotsb;
329 sc->sc_ptsb = iotsbp;
330 }
331 #if 1
332 /* Need to do 64-bit stores */
333 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
334 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
335 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
336 0, sc->sc_ptsb);
337 #else
338 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
339 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
340 #endif
341 #ifdef DEBUG
342 if (sbusdebug & SDB_DVMA)
343 {
344 /* Probe the iommu */
345 int64_t cr, tsb;
346
347 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", &sc->sc_sysio->sys_iommu.iommu_cr,
348 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
349 cr = sc->sc_sysio->sys_iommu.iommu_cr;
350 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
351 printf("iommu cr=%lx tsb=%lx\n", (long)cr, (long)tsb);
352 printf("sysio base %p phys %p TSB base %p phys %p",
353 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
354 (long)sc->sc_tsb, (long)sc->sc_ptsb);
355 delay(1000000); /* 1 s */
356 }
357 #endif
358
359 /*
360 * Initialize streaming buffer.
361 */
362 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
363 #if 1
364 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
365 0, STRBUF_EN); /* Enable diagnostics mode? */
366 #else
367 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
368 #endif
369
370 /*
371 * Loop through ROM children, fixing any relative addresses
372 * and then configuring each device.
373 * `specials' is an array of device names that are treated
374 * specially:
375 */
376 node0 = firstchild(node);
377 for (node = node0; node; node = nextsibling(node)) {
378 char *name = getpropstring(node, "name");
379
380 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
381 node, bp, &sa) != 0) {
382 printf("sbus_attach: %s: incomplete\n", name);
383 continue;
384 }
385 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
386 sbus_destroy_attach_args(&sa);
387 }
388 }
389
390 int
391 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
392 struct sbus_softc *sc;
393 bus_space_tag_t bustag;
394 bus_dma_tag_t dmatag;
395 int node;
396 struct bootpath *bp;
397 struct sbus_attach_args *sa;
398 {
399 /*struct sbus_reg sbusreg;*/
400 /*int base;*/
401 int error;
402 int n;
403
404 bzero(sa, sizeof(struct sbus_attach_args));
405 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
406 if (error != 0)
407 return (error);
408 sa->sa_name[n] = '\0';
409
410 sa->sa_bustag = bustag;
411 sa->sa_dmatag = dmatag;
412 sa->sa_node = node;
413 sa->sa_bp = bp;
414
415 error = getprop(node, "reg", sizeof(struct sbus_reg),
416 &sa->sa_nreg, (void **)&sa->sa_reg);
417 if (error != 0) {
418 char buf[32];
419 if (error != ENOENT ||
420 !node_has_property(node, "device_type") ||
421 strcmp(getpropstringA(node, "device_type", buf),
422 "hierarchical") != 0)
423 return (error);
424 }
425 for (n = 0; n < sa->sa_nreg; n++) {
426 /* Convert to relative addressing, if necessary */
427 u_int32_t base = sa->sa_reg[n].sbr_offset;
428 if (SBUS_ABS(base)) {
429 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
430 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
431 }
432 }
433
434 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
435 return (error);
436
437 error = getprop(node, "address", sizeof(u_int32_t),
438 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
439 if (error != 0 && error != ENOENT)
440 return (error);
441
442 return (0);
443 }
444
445 void
446 sbus_destroy_attach_args(sa)
447 struct sbus_attach_args *sa;
448 {
449 if (sa->sa_name != NULL)
450 free(sa->sa_name, M_DEVBUF);
451
452 if (sa->sa_nreg != 0)
453 free(sa->sa_reg, M_DEVBUF);
454
455 if (sa->sa_intr)
456 free(sa->sa_intr, M_DEVBUF);
457
458 if (sa->sa_promvaddrs)
459 free((void *)sa->sa_promvaddrs, M_DEVBUF);
460
461 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
462 }
463
464
465 int
466 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
467 bus_space_tag_t t;
468 bus_type_t btype;
469 bus_addr_t offset;
470 bus_size_t size;
471 int flags;
472 vaddr_t vaddr;
473 bus_space_handle_t *hp;
474 {
475 struct sbus_softc *sc = t->cookie;
476 int64_t slot = btype;
477 int i;
478
479 for (i = 0; i < sc->sc_nrange; i++) {
480 bus_addr_t paddr;
481
482 if (sc->sc_range[i].cspace != slot)
483 continue;
484
485 /* We've found the connection to the parent bus */
486 paddr = sc->sc_range[i].poffset + offset;
487 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
488 #ifdef DEBUG
489 if (sbusdebug & SDB_DVMA)
490 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
491 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
492 #endif
493 return (bus_space_map2(sc->sc_bustag, 0, paddr,
494 size, flags, vaddr, hp));
495 }
496
497 return (EINVAL);
498 }
499
500 int
501 sbus_bus_mmap(t, btype, paddr, flags, hp)
502 bus_space_tag_t t;
503 bus_type_t btype;
504 bus_addr_t paddr;
505 int flags;
506 bus_space_handle_t *hp;
507 {
508 bus_addr_t offset = paddr;
509 int slot = (paddr>>32);
510 struct sbus_softc *sc = t->cookie;
511 int i;
512
513 for (i = 0; i < sc->sc_nrange; i++) {
514 bus_addr_t paddr;
515
516 if (sc->sc_range[i].cspace != slot)
517 continue;
518
519 paddr = sc->sc_range[i].poffset + offset;
520 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
521 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
522 flags, hp));
523 }
524
525 return (-1);
526 }
527
528
529 /*
530 * Each attached device calls sbus_establish after it initializes
531 * its sbusdev portion.
532 */
533 void
534 sbus_establish(sd, dev)
535 register struct sbusdev *sd;
536 register struct device *dev;
537 {
538 register struct sbus_softc *sc;
539 register struct device *curdev;
540
541 /*
542 * We have to look for the sbus by name, since it is not necessarily
543 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
544 * We don't just use the device structure of the above-attached
545 * sbus, since we might (in the future) support multiple sbus's.
546 */
547 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
548 if (!curdev || !curdev->dv_xname)
549 panic("sbus_establish: can't find sbus parent for %s",
550 sd->sd_dev->dv_xname
551 ? sd->sd_dev->dv_xname
552 : "<unknown>" );
553
554 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
555 break;
556 }
557 sc = (struct sbus_softc *) curdev;
558
559 sd->sd_dev = dev;
560 sd->sd_bchain = sc->sc_sbdev;
561 sc->sc_sbdev = sd;
562 }
563
564 /*
565 * Reset the given sbus. (???)
566 */
567 void
568 sbusreset(sbus)
569 int sbus;
570 {
571 register struct sbusdev *sd;
572 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
573 struct device *dev;
574
575 printf("reset %s:", sc->sc_dev.dv_xname);
576 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
577 if (sd->sd_reset) {
578 dev = sd->sd_dev;
579 (*sd->sd_reset)(dev);
580 printf(" %s", dev->dv_xname);
581 }
582 }
583 #if 1
584 /* Reload iommu regs */
585 bus_space_write_8(sc->ma_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
586 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
587 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
588 0, sc->sc_ptsb);
589 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
590 0, STRBUF_EN); /* Enable diagnostics mode? */
591 #else
592 /* Reload iommu regs */
593 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
594 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
595 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
596 #endif
597 }
598
599 /*
600 * Here are the iommu control routines.
601 */
602 void
603 sbus_enter(sc, va, pa, flags)
604 struct sbus_softc *sc;
605 vaddr_t va;
606 int64_t pa;
607 int flags;
608 {
609 int64_t tte;
610
611 #ifdef DIAGNOSTIC
612 if (va < sc->sc_dvmabase)
613 panic("sbus_enter: va 0x%lx not in DVMA space",va);
614 #endif
615
616 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
617 !(flags&BUS_DMA_COHERENT));
618
619 /* Is the streamcache flush really needed? */
620 #if 1
621 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush,
622 0, va);
623 #else
624 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
625 #endif
626 sbus_flush(sc);
627 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
628 #if 1
629 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
630 0, va);
631 #else
632 stxa(&sc->sc_sysio->sys_iommu.iommu_flush,ASI_NUCLEUS,va);
633 #endif
634 #ifdef DEBUG
635 if (sbusdebug & SDB_DVMA)
636 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
637 va, (long)pa, IOTSBSLOT(va,sc->sc_tsbsize),
638 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
639 (long)tte);
640 #endif
641 }
642
643 /*
644 * sbus_clear: clears mappings created by sbus_enter
645 *
646 * Only demap from IOMMU if flag is set.
647 */
648 void
649 sbus_remove(sc, va, len)
650 struct sbus_softc *sc;
651 vaddr_t va;
652 size_t len;
653 {
654
655 #ifdef DIAGNOSTIC
656 if (va < sc->sc_dvmabase)
657 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
658 if ((long)(va + len) < (long)va)
659 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
660 (long) va, (long) len);
661 if (len & ~0xfffffff)
662 panic("sbus_remove: rediculous len 0x%lx", (long)len);
663 #endif
664
665 va = trunc_page(va);
666 while (len > 0) {
667
668 /*
669 * Streaming buffer flushes:
670 *
671 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
672 * If we're not on a cache line boundary (64-bits):
673 * 2 Store 0 in flag
674 * 3 Store pointer to flag in flushsync
675 * 4 wait till flushsync becomes 0x1
676 *
677 * If it takes more than .5 sec, something went wrong.
678 */
679 #ifdef DEBUG
680 if (sbusdebug & SDB_DVMA)
681 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
682 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
683 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
684 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
685 (u_long)len);
686 #endif
687 #if 1
688 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
689 #else
690 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
691 #endif
692 if (len <= NBPG) {
693 sbus_flush(sc);
694 len = 0;
695 } else len -= NBPG;
696 #ifdef DEBUG
697 if (sbusdebug & SDB_DVMA)
698 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
699 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
700 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
701 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
702 (u_long)len);
703 #endif
704 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
705 #if 1
706 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
707 #else
708 stxa(&sc->sc_sysio->sys_iommu.iommu_flush, ASI_NUCLEUS, va);
709 #endif
710 va += NBPG;
711 }
712 }
713
714 int
715 sbus_flush(sc)
716 struct sbus_softc *sc;
717 {
718 extern u_int64_t cpu_clockrate;
719 u_int64_t flushtimeout;
720
721 sc->sc_flush = 0;
722 membar_sync();
723 #if 1
724 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_flushsync, 0, sc->sc_flushpa);
725 #else
726 stxa(&sc->sc_sysio->sys_strbuf.strbuf_flushsync, ASI_NUCLEUS, sc->sc_flushpa);
727 #endif
728 membar_sync();
729 flushtimeout = tick() + cpu_clockrate/2; /* .5 sec after *now* */
730 #ifdef DEBUG
731 if (sbusdebug & SDB_DVMA)
732 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx until = %lx\n",
733 (long)sc->sc_flush, (long)&sc->sc_flush,
734 (long)sc->sc_flushpa, (long)tick(), flushtimeout);
735 #endif
736 /* Bypass non-coherent D$ */
737 #if 0
738 while( !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && flushtimeout > tick()) membar_sync();
739 #else
740 { int i; for(i=140000000/2; !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && i; i--) membar_sync(); }
741 #endif
742 #ifdef DIAGNOSTIC
743 if( !sc->sc_flush ) {
744 printf("sbus_flush: flush timeout %p at %p\n", (long)sc->sc_flush,
745 (long)sc->sc_flushpa); /* panic? */
746 #ifdef DDB
747 Debugger();
748 #endif
749 }
750 #endif
751 #ifdef DEBUG
752 if (sbusdebug & SDB_DVMA)
753 printf("sbus_flush: flushed\n");
754 #endif
755 return (sc->sc_flush);
756 }
757 /*
758 * Get interrupt attributes for an Sbus device.
759 */
760 int
761 sbus_get_intr(sc, node, ipp, np)
762 struct sbus_softc *sc;
763 int node;
764 struct sbus_intr **ipp;
765 int *np;
766 {
767 int *ipl;
768 int i, n, error;
769 char buf[32];
770
771 /*
772 * The `interrupts' property contains the Sbus interrupt level.
773 */
774 ipl = NULL;
775 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
776 /* Change format to an `struct sbus_intr' array */
777 struct sbus_intr *ip;
778 int pri = 0;
779 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
780 if (ip == NULL)
781 return (ENOMEM);
782 /* Now things get ugly. We need to take this value which is
783 * the interrupt vector number and encode the IPL into it
784 * somehow. Luckily, the interrupt vector has lots of free
785 * space and we can easily stuff the IPL in there for a while.
786 */
787 getpropstringA(node, "device_type", buf);
788 for (i=0; intrmap[i].in_class; i++) {
789 if (strcmp(intrmap[i].in_class, buf) == 0) {
790 pri = INTLEVENCODE(intrmap[i].in_lev);
791 break;
792 }
793 }
794 for (n = 0; n < *np; n++) {
795 /*
796 * We encode vector and priority into sbi_pri so we
797 * can pass them as a unit. This will go away if
798 * sbus_establish ever takes an sbus_intr instead
799 * of an integer level.
800 * Stuff the real vector in sbi_vec.
801 */
802 ip[n].sbi_pri = pri|ipl[n];
803 ip[n].sbi_vec = ipl[n];
804 }
805 free(ipl, M_DEVBUF);
806 *ipp = ip;
807 return (0);
808 }
809
810 /* We really don't support the following */
811 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
812 /* And some devices don't even have interrupts */
813 /*
814 * Fall back on `intr' property.
815 */
816 *ipp = NULL;
817 error = getprop(node, "intr", sizeof(struct sbus_intr),
818 np, (void **)ipp);
819 switch (error) {
820 case 0:
821 for (n = *np; n-- > 0;) {
822 /*
823 * Move the interrupt vector into place.
824 * We could remap the level, but the SBUS priorities
825 * are probably good enough.
826 */
827 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
828 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
829 }
830 break;
831 case ENOENT:
832 error = 0;
833 break;
834 }
835
836 return (error);
837 }
838
839
840 /*
841 * Install an interrupt handler for an Sbus device.
842 */
843 void *
844 sbus_intr_establish(t, level, flags, handler, arg)
845 bus_space_tag_t t;
846 int level;
847 int flags;
848 int (*handler) __P((void *));
849 void *arg;
850 {
851 struct sbus_softc *sc = t->cookie;
852 struct intrhand *ih;
853 int ipl;
854 long vec = level;
855
856 ih = (struct intrhand *)
857 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
858 if (ih == NULL)
859 return (NULL);
860
861 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
862 ipl = vec;
863 else if ((vec & SBUS_INTR_COMPAT) != 0)
864 ipl = vec & ~SBUS_INTR_COMPAT;
865 else {
866 /* Decode and remove IPL */
867 ipl = INTLEV(vec);
868 vec = INTVEC(vec);
869 #ifdef DEBUG
870 if (sbusdebug & SDB_INTR) {
871 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
872 intrlev[vec]);
873 printf("Hunting for IRQ...\n");
874 }
875 #endif
876 if ((vec & INTMAP_OBIO) == 0) {
877 /* We're in an SBUS slot */
878 /* Register the map and clear intr registers */
879 #ifdef DEBUG
880 if (sbusdebug & SDB_INTR) {
881 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
882 int64_t intrmap = *intrptr;
883
884 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
885 (long)vec, (long)intrmap,
886 (long)INTSLOT(vec));
887 }
888 #endif
889 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
890 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
891 /* Enable the interrupt */
892 vec |= INTMAP_V;
893 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
894 } else {
895 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
896 int64_t intrmap = 0;
897 int i;
898
899 /* Insert IGN */
900 vec |= sc->sc_ign;
901 for (i=0;
902 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
903 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
904 i++);
905 if (INTVEC(intrmap) == INTVEC(vec)) {
906 #ifdef DEBUG
907 if (sbusdebug & SDB_INTR)
908 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
909 vec, (long)intrmap, i);
910 #endif
911 /* Register the map and clear intr registers */
912 ih->ih_map = &intrptr[i];
913 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
914 ih->ih_clr = &intrptr[i];
915 /* Enable the interrupt */
916 intrmap |= INTMAP_V;
917 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
918 } else panic("IRQ not found!");
919 }
920 }
921 #ifdef DEBUG
922 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
923 #endif
924
925 ih->ih_fun = handler;
926 ih->ih_arg = arg;
927 ih->ih_number = vec;
928 ih->ih_pil = (1<<ipl);
929 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
930 intr_fasttrap(ipl, (void (*)__P((void)))handler);
931 else
932 intr_establish(ipl, ih);
933 return (ih);
934 }
935
936 static bus_space_tag_t
937 sbus_alloc_bustag(sc)
938 struct sbus_softc *sc;
939 {
940 bus_space_tag_t sbt;
941
942 sbt = (bus_space_tag_t)
943 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
944 if (sbt == NULL)
945 return (NULL);
946
947 bzero(sbt, sizeof *sbt);
948 sbt->cookie = sc;
949 sbt->parent = sc->sc_bustag;
950 sbt->type = ASI_PRIMARY;
951 sbt->sparc_bus_map = _sbus_bus_map;
952 sbt->sparc_bus_mmap = sbus_bus_mmap;
953 sbt->sparc_intr_establish = sbus_intr_establish;
954 return (sbt);
955 }
956
957
958 static bus_dma_tag_t
959 sbus_alloc_dmatag(sc)
960 struct sbus_softc *sc;
961 {
962 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
963
964 sdt = (bus_dma_tag_t)
965 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
966 if (sdt == NULL)
967 /* Panic? */
968 return (psdt);
969
970 sdt->_cookie = sc;
971 sdt->_parent = psdt;
972 #define PCOPY(x) sdt->x = psdt->x
973 PCOPY(_dmamap_create);
974 PCOPY(_dmamap_destroy);
975 sdt->_dmamap_load = sbus_dmamap_load;
976 PCOPY(_dmamap_load_mbuf);
977 PCOPY(_dmamap_load_uio);
978 PCOPY(_dmamap_load_raw);
979 sdt->_dmamap_unload = sbus_dmamap_unload;
980 sdt->_dmamap_sync = sbus_dmamap_sync;
981 sdt->_dmamem_alloc = sbus_dmamem_alloc;
982 sdt->_dmamem_free = sbus_dmamem_free;
983 sdt->_dmamem_map = sbus_dmamem_map;
984 sdt->_dmamem_unmap = sbus_dmamem_unmap;
985 PCOPY(_dmamem_mmap);
986 #undef PCOPY
987 sc->sc_dmatag = sdt;
988 return (sdt);
989 }
990
991 int
992 sbus_dmamap_load(t, map, buf, buflen, p, flags)
993 bus_dma_tag_t t;
994 bus_dmamap_t map;
995 void *buf;
996 bus_size_t buflen;
997 struct proc *p;
998 int flags;
999 {
1000 int err;
1001 bus_size_t sgsize;
1002 paddr_t curaddr;
1003 vaddr_t dvmaddr, vaddr = (vaddr_t)buf;
1004 pmap_t pmap;
1005 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1006
1007 if (map->dm_nsegs) {
1008 /* Already in use?? */
1009 #ifdef DIAGNOSTIC
1010 printf("sbus_dmamap_load: map still in use\n");
1011 #endif
1012 bus_dmamap_unload(t, map);
1013 }
1014 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1015 return (err);
1016
1017 if (p != NULL)
1018 pmap = p->p_vmspace->vm_map.pmap;
1019 else
1020 pmap = pmap_kernel();
1021
1022 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1023 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1024 for (; buflen > 0; ) {
1025 /*
1026 * Get the physical address for this page.
1027 */
1028 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1029 bus_dmamap_unload(t, map);
1030 return (-1);
1031 }
1032
1033 /*
1034 * Compute the segment size, and adjust counts.
1035 */
1036 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1037 if (buflen < sgsize)
1038 sgsize = buflen;
1039
1040 #ifdef DEBUG
1041 if (sbusdebug & SDB_DVMA)
1042 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1043 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1044 #endif
1045 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1046
1047 dvmaddr += PAGE_SIZE;
1048 vaddr += sgsize;
1049 buflen -= sgsize;
1050 }
1051 return (0);
1052 }
1053
1054 void
1055 sbus_dmamap_unload(t, map)
1056 bus_dma_tag_t t;
1057 bus_dmamap_t map;
1058 {
1059 vaddr_t addr;
1060 int len;
1061 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1062
1063 if (map->dm_nsegs != 1)
1064 panic("_sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1065
1066 addr = trunc_page(map->dm_segs[0].ds_addr);
1067 len = map->dm_segs[0].ds_len;
1068
1069 #ifdef DEBUG
1070 if (sbusdebug & SDB_DVMA)
1071 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1072 map, (long)addr, (long)len);
1073 #endif
1074 sbus_remove(sc, addr, len);
1075 bus_dmamap_unload(t->_parent, map);
1076 }
1077
1078
1079 void
1080 sbus_dmamap_sync(t, map, offset, len, ops)
1081 bus_dma_tag_t t;
1082 bus_dmamap_t map;
1083 bus_addr_t offset;
1084 bus_size_t len;
1085 int ops;
1086 {
1087 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1088 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1089
1090 /*
1091 * We only support one DMA segment; supporting more makes this code
1092 * too unweildy.
1093 */
1094
1095 if (ops&BUS_DMASYNC_PREREAD) {
1096 #ifdef DEBUG
1097 if (sbusdebug & SDB_DVMA)
1098 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1099 (long)va, (u_long)len);
1100 #endif
1101
1102 /* Nothing to do */;
1103 }
1104 if (ops&BUS_DMASYNC_POSTREAD) {
1105 /*
1106 * We should sync the IOMMU streaming caches here first.
1107 */
1108 #ifdef DEBUG
1109 if (sbusdebug & SDB_DVMA)
1110 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1111 (long)va, (u_long)len);
1112 #endif
1113 while (len > 0) {
1114
1115 /*
1116 * Streaming buffer flushes:
1117 *
1118 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1119 * If we're not on a cache line boundary (64-bits):
1120 * 2 Store 0 in flag
1121 * 3 Store pointer to flag in flushsync
1122 * 4 wait till flushsync becomes 0x1
1123 *
1124 * If it takes more than .5 sec, something went wrong.
1125 */
1126 #ifdef DEBUG
1127 if (sbusdebug & SDB_DVMA)
1128 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1129 (long)va, (u_long)len);
1130 #endif
1131 #if 1
1132 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
1133 #else
1134 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
1135 #endif
1136 if (len <= NBPG) {
1137 sbus_flush(sc);
1138 len = 0;
1139 } else
1140 len -= NBPG;
1141 va += NBPG;
1142 }
1143 }
1144 if (ops&BUS_DMASYNC_PREWRITE) {
1145 #ifdef DEBUG
1146 if (sbusdebug & SDB_DVMA)
1147 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1148 (long)va, (u_long)len);
1149 #endif
1150 /* Nothing to do */;
1151 }
1152 if (ops&BUS_DMASYNC_POSTWRITE) {
1153 #ifdef DEBUG
1154 if (sbusdebug & SDB_DVMA)
1155 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1156 (long)va, (u_long)len);
1157 #endif
1158 /* Nothing to do */;
1159 }
1160 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1161 }
1162
1163
1164 /*
1165 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1166 */
1167 int
1168 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1169 bus_dma_tag_t t;
1170 bus_size_t size, alignment, boundary;
1171 bus_dma_segment_t *segs;
1172 int nsegs;
1173 int *rsegs;
1174 int flags;
1175 {
1176 paddr_t curaddr;
1177 bus_addr_t dvmaddr;
1178 vm_page_t m;
1179 struct pglist *mlist;
1180 int error;
1181 int n;
1182 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1183
1184 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1185 boundary, segs, nsegs, rsegs, flags)))
1186 return (error);
1187
1188 /*
1189 * Allocate a DVMA mapping for our new memory.
1190 */
1191 for (n=0; n<*rsegs; n++) {
1192 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1193 if (dvmaddr == (bus_addr_t)-1) {
1194 /* Free what we got and exit */
1195 bus_dmamem_free(t->_parent, segs, nsegs);
1196 return (ENOMEM);
1197 }
1198 segs[n].ds_addr = dvmaddr;
1199 size = segs[n].ds_len;
1200 mlist = segs[n]._ds_mlist;
1201
1202 /* Map memory into DVMA space */
1203 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1204 curaddr = VM_PAGE_TO_PHYS(m);
1205 sbus_enter(sc, dvmaddr, curaddr, flags);
1206 dvmaddr += PAGE_SIZE;
1207 }
1208 }
1209 return (0);
1210 }
1211
1212 void
1213 sbus_dmamem_free(t, segs, nsegs)
1214 bus_dma_tag_t t;
1215 bus_dma_segment_t *segs;
1216 int nsegs;
1217 {
1218 vaddr_t addr;
1219 int len;
1220 int n;
1221 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1222
1223
1224 for (n=0; n<nsegs; n++) {
1225 addr = segs[n].ds_addr;
1226 len = segs[n].ds_len;
1227 sbus_remove(sc, addr, len);
1228 dvmamap_free(addr, len);
1229 }
1230 bus_dmamem_free(t->_parent, segs, nsegs);
1231 }
1232
1233 /*
1234 * Map the DVMA mappings into the kernel pmap.
1235 * Check the flags to see whether we're streaming or coherent.
1236 */
1237 int
1238 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1239 bus_dma_tag_t t;
1240 bus_dma_segment_t *segs;
1241 int nsegs;
1242 size_t size;
1243 caddr_t *kvap;
1244 int flags;
1245 {
1246 vm_page_t m;
1247 vaddr_t va;
1248 bus_addr_t addr;
1249 struct pglist *mlist;
1250 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1251 int cbit;
1252
1253 /*
1254 * digest flags:
1255 */
1256 cbit = 0;
1257 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1258 cbit |= PMAP_NVC;
1259 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1260 cbit |= PMAP_NC;
1261 /*
1262 * Now take this and map it into the CPU since it should already
1263 * be in the the IOMMU.
1264 */
1265 *kvap = (caddr_t)va = segs[0].ds_addr;
1266 mlist = segs[0]._ds_mlist;
1267 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1268
1269 if (size == 0)
1270 panic("_bus_dmamem_map: size botch");
1271
1272 addr = VM_PAGE_TO_PHYS(m);
1273 pmap_enter(pmap_kernel(), va, addr | cbit,
1274 VM_PROT_READ | VM_PROT_WRITE, TRUE);
1275 va += PAGE_SIZE;
1276 size -= PAGE_SIZE;
1277 }
1278
1279 return (0);
1280 }
1281
1282 /*
1283 * Unmap DVMA mappings from kernel
1284 */
1285 void
1286 sbus_dmamem_unmap(t, kva, size)
1287 bus_dma_tag_t t;
1288 caddr_t kva;
1289 size_t size;
1290 {
1291 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1292
1293 #ifdef DIAGNOSTIC
1294 if ((u_long)kva & PGOFSET)
1295 panic("_bus_dmamem_unmap");
1296 #endif
1297
1298 size = round_page(size);
1299 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1300 }
1301