sbus.c revision 1.11 1 /* $NetBSD: sbus.c,v 1.11 1999/03/26 23:41:36 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/malloc.h>
90 #include <sys/systm.h>
91 #include <sys/device.h>
92 #include <vm/vm.h>
93
94 #include <machine/bus.h>
95 #include <sparc64/sparc64/vaddrs.h>
96 #include <sparc64/dev/sbusreg.h>
97 #include <dev/sbus/sbusvar.h>
98
99 #include <machine/autoconf.h>
100 #include <machine/ctlreg.h>
101 #include <machine/cpu.h>
102 #include <machine/sparc64.h>
103
104 #ifdef DEBUG
105 #define SDB_DVMA 0x1
106 #define SDB_INTR 0x2
107 int sbusdebug = 0;
108 #endif
109
110 void sbusreset __P((int));
111 int sbus_flush __P((struct sbus_softc *));
112
113 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
114 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
115 static int sbus_get_intr __P((struct sbus_softc *, int,
116 struct sbus_intr **, int *));
117 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
118 int, bus_space_handle_t *));
119 static int _sbus_bus_map __P((
120 bus_space_tag_t,
121 bus_type_t,
122 bus_addr_t, /*offset*/
123 bus_size_t, /*size*/
124 int, /*flags*/
125 vaddr_t, /*preferred virtual address */
126 bus_space_handle_t *));
127 static void *sbus_intr_establish __P((
128 bus_space_tag_t,
129 int, /*level*/
130 int, /*flags*/
131 int (*) __P((void *)), /*handler*/
132 void *)); /*handler arg*/
133
134
135 /* autoconfiguration driver */
136 int sbus_match __P((struct device *, struct cfdata *, void *));
137 void sbus_attach __P((struct device *, struct device *, void *));
138
139
140 struct cfattach sbus_ca = {
141 sizeof(struct sbus_softc), sbus_match, sbus_attach
142 };
143
144 extern struct cfdriver sbus_cd;
145
146 /*
147 * DVMA routines
148 */
149 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
150 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
151 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
152 bus_size_t, struct proc *, int));
153 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
154 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
155 bus_size_t, int));
156 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
157 bus_size_t alignment, bus_size_t boundary,
158 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
159 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
160 int nsegs));
161 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
162 int nsegs, size_t size, caddr_t *kvap, int flags));
163 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
164 size_t size));
165
166
167 /*
168 * Child devices receive the Sbus interrupt level in their attach
169 * arguments. We translate these to CPU IPLs using the following
170 * tables. Note: obio bus interrupt levels are identical to the
171 * processor IPL.
172 *
173 * The second set of tables is used when the Sbus interrupt level
174 * cannot be had from the PROM as an `interrupt' property. We then
175 * fall back on the `intr' property which contains the CPU IPL.
176 */
177
178 /* Translate Sbus interrupt level to processor IPL */
179 static int intr_sbus2ipl_4c[] = {
180 0, 1, 2, 3, 5, 7, 8, 9
181 };
182 static int intr_sbus2ipl_4m[] = {
183 0, 2, 3, 5, 7, 9, 11, 13
184 };
185
186 /*
187 * This value is or'ed into the attach args' interrupt level cookie
188 * if the interrupt level comes from an `intr' property, i.e. it is
189 * not an Sbus interrupt level.
190 */
191 #define SBUS_INTR_COMPAT 0x80000000
192
193
194 /*
195 * Print the location of some sbus-attached device (called just
196 * before attaching that device). If `sbus' is not NULL, the
197 * device was found but not configured; print the sbus as well.
198 * Return UNCONF (config_find ignores this if the device was configured).
199 */
200 int
201 sbus_print(args, busname)
202 void *args;
203 const char *busname;
204 {
205 struct sbus_attach_args *sa = args;
206 int i;
207
208 if (busname)
209 printf("%s at %s", sa->sa_name, busname);
210 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
211 (u_long)sa->sa_offset);
212 for (i=0; i<sa->sa_nintr; i++) {
213 struct sbus_intr *sbi = &sa->sa_intr[i];
214
215 printf(" vector %lx ipl %ld",
216 (u_long)sbi->sbi_vec,
217 (long)INTLEV(sbi->sbi_pri));
218 }
219 return (UNCONF);
220 }
221
222 int
223 sbus_match(parent, cf, aux)
224 struct device *parent;
225 struct cfdata *cf;
226 void *aux;
227 {
228 struct mainbus_attach_args *ma = aux;
229
230 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
231 }
232
233 /*
234 * Attach an Sbus.
235 */
236 void
237 sbus_attach(parent, self, aux)
238 struct device *parent;
239 struct device *self;
240 void *aux;
241 {
242 struct sbus_softc *sc = (struct sbus_softc *)self;
243 struct mainbus_attach_args *ma = aux;
244 int node = ma->ma_node;
245
246 int node0, error;
247 bus_space_tag_t sbt;
248 struct sbus_attach_args sa;
249 char *busname = "sbus";
250 struct bootpath *bp = ma->ma_bp;
251
252
253 sc->sc_bustag = ma->ma_bustag;
254 sc->sc_dmatag = ma->ma_dmatag;
255 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
256 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
257
258 /* Setup interrupt translation tables */
259 sc->sc_intr2ipl = CPU_ISSUN4C
260 ? intr_sbus2ipl_4c
261 : intr_sbus2ipl_4m;
262
263 /*
264 * Record clock frequency for synchronous SCSI.
265 * IS THIS THE CORRECT DEFAULT??
266 */
267 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
268 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
269
270 sbt = sbus_alloc_bustag(sc);
271 sc->sc_dmatag = sbus_alloc_dmatag(sc);
272
273 /*
274 * Get the SBus burst transfer size if burst transfers are supported
275 */
276 sc->sc_burst = getpropint(node, "burst-sizes", 0);
277
278 /* Propagate bootpath */
279 if (bp != NULL && strcmp(bp->name, busname) == 0)
280 bp++;
281 else
282 bp = NULL;
283
284 /*
285 * Collect address translations from the OBP.
286 */
287 error = getprop(node, "ranges", sizeof(struct sbus_range),
288 &sc->sc_nrange, (void **)&sc->sc_range);
289 switch (error) {
290 case 0:
291 break;
292 #if 0
293 case ENOENT:
294 /* Fall back to our own `range' construction */
295 sc->sc_range = sbus_translations;
296 sc->sc_nrange =
297 sizeof(sbus_translations)/sizeof(sbus_translations[0]);
298 break;
299 #endif
300 default:
301 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
302 }
303
304
305 /*
306 * Setup the iommu.
307 *
308 * The sun4u iommu is part of the SBUS controller so we will
309 * deal with it here. We could try to fake a device node so
310 * we can eventually share it with the PCI bus run by psyco,
311 * but I don't want to get into that sort of cruft.
312 */
313
314 /*
315 * All IOMMUs will share the same TSB which is allocated in pmap_bootstrap.
316 *
317 * This makes device management easier.
318 */
319 {
320 extern int64_t *iotsb;
321 extern paddr_t iotsbp;
322 extern int iotsbsize;
323
324 sc->sc_tsbsize = iotsbsize;
325 sc->sc_tsb = iotsb;
326 sc->sc_ptsb = iotsbp;
327 }
328 #if 1
329 /* Need to do 64-bit stores */
330 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
331 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
332 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
333 0, sc->sc_ptsb);
334 #else
335 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
336 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
337 #endif
338 #ifdef DEBUG
339 if (sbusdebug & SDB_DVMA)
340 {
341 /* Probe the iommu */
342 int64_t cr, tsb;
343
344 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", &sc->sc_sysio->sys_iommu.iommu_cr,
345 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
346 cr = sc->sc_sysio->sys_iommu.iommu_cr;
347 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
348 printf("iommu cr=%lx tsb=%lx\n", (long)cr, (long)tsb);
349 printf("sysio base %p phys %p TSB base %p phys %p",
350 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
351 (long)sc->sc_tsb, (long)sc->sc_ptsb);
352 delay(1000000); /* 1 s */
353 }
354 #endif
355
356 /*
357 * Initialize streaming buffer.
358 */
359 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
360 #if 1
361 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
362 0, STRBUF_EN); /* Enable diagnostics mode? */
363 #else
364 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
365 #endif
366
367 /*
368 * Loop through ROM children, fixing any relative addresses
369 * and then configuring each device.
370 * `specials' is an array of device names that are treated
371 * specially:
372 */
373 node0 = firstchild(node);
374 for (node = node0; node; node = nextsibling(node)) {
375 char *name = getpropstring(node, "name");
376
377 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
378 node, bp, &sa) != 0) {
379 printf("sbus_attach: %s: incomplete\n", name);
380 continue;
381 }
382 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
383 sbus_destroy_attach_args(&sa);
384 }
385 }
386
387 int
388 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
389 struct sbus_softc *sc;
390 bus_space_tag_t bustag;
391 bus_dma_tag_t dmatag;
392 int node;
393 struct bootpath *bp;
394 struct sbus_attach_args *sa;
395 {
396 /*struct sbus_reg sbusreg;*/
397 /*int base;*/
398 int error;
399 int n;
400
401 bzero(sa, sizeof(struct sbus_attach_args));
402 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
403 if (error != 0)
404 return (error);
405 sa->sa_name[n] = '\0';
406
407 sa->sa_bustag = bustag;
408 sa->sa_dmatag = dmatag;
409 sa->sa_node = node;
410 sa->sa_bp = bp;
411
412 error = getprop(node, "reg", sizeof(struct sbus_reg),
413 &sa->sa_nreg, (void **)&sa->sa_reg);
414 if (error != 0) {
415 char buf[32];
416 if (error != ENOENT ||
417 !node_has_property(node, "device_type") ||
418 strcmp(getpropstringA(node, "device_type", buf),
419 "hierarchical") != 0)
420 return (error);
421 }
422 for (n = 0; n < sa->sa_nreg; n++) {
423 /* Convert to relative addressing, if necessary */
424 u_int32_t base = sa->sa_reg[n].sbr_offset;
425 if (SBUS_ABS(base)) {
426 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
427 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
428 }
429 }
430
431 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
432 return (error);
433
434 error = getprop(node, "address", sizeof(u_int32_t),
435 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
436 if (error != 0 && error != ENOENT)
437 return (error);
438
439 return (0);
440 }
441
442 void
443 sbus_destroy_attach_args(sa)
444 struct sbus_attach_args *sa;
445 {
446 if (sa->sa_name != NULL)
447 free(sa->sa_name, M_DEVBUF);
448
449 if (sa->sa_nreg != 0)
450 free(sa->sa_reg, M_DEVBUF);
451
452 if (sa->sa_intr)
453 free(sa->sa_intr, M_DEVBUF);
454
455 if (sa->sa_promvaddrs)
456 free((void *)sa->sa_promvaddrs, M_DEVBUF);
457
458 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
459 }
460
461
462 int
463 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
464 bus_space_tag_t t;
465 bus_type_t btype;
466 bus_addr_t offset;
467 bus_size_t size;
468 int flags;
469 vaddr_t vaddr;
470 bus_space_handle_t *hp;
471 {
472 struct sbus_softc *sc = t->cookie;
473 int64_t slot = btype;
474 int i;
475
476 for (i = 0; i < sc->sc_nrange; i++) {
477 bus_addr_t paddr;
478
479 if (sc->sc_range[i].cspace != slot)
480 continue;
481
482 /* We've found the connection to the parent bus */
483 paddr = sc->sc_range[i].poffset + offset;
484 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
485 #ifdef DEBUG
486 if (sbusdebug & SDB_DVMA)
487 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
488 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
489 #endif
490 return (bus_space_map2(sc->sc_bustag, 0, paddr,
491 size, flags, vaddr, hp));
492 }
493
494 return (EINVAL);
495 }
496
497 int
498 sbus_bus_mmap(t, btype, paddr, flags, hp)
499 bus_space_tag_t t;
500 bus_type_t btype;
501 bus_addr_t paddr;
502 int flags;
503 bus_space_handle_t *hp;
504 {
505 bus_addr_t offset = paddr;
506 int slot = (paddr>>32);
507 struct sbus_softc *sc = t->cookie;
508 int i;
509
510 for (i = 0; i < sc->sc_nrange; i++) {
511 bus_addr_t paddr;
512
513 if (sc->sc_range[i].cspace != slot)
514 continue;
515
516 paddr = sc->sc_range[i].poffset + offset;
517 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
518 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
519 flags, hp));
520 }
521
522 return (-1);
523 }
524
525
526 /*
527 * Each attached device calls sbus_establish after it initializes
528 * its sbusdev portion.
529 */
530 void
531 sbus_establish(sd, dev)
532 register struct sbusdev *sd;
533 register struct device *dev;
534 {
535 register struct sbus_softc *sc;
536 register struct device *curdev;
537
538 /*
539 * We have to look for the sbus by name, since it is not necessarily
540 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
541 * We don't just use the device structure of the above-attached
542 * sbus, since we might (in the future) support multiple sbus's.
543 */
544 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
545 if (!curdev || !curdev->dv_xname)
546 panic("sbus_establish: can't find sbus parent for %s",
547 sd->sd_dev->dv_xname
548 ? sd->sd_dev->dv_xname
549 : "<unknown>" );
550
551 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
552 break;
553 }
554 sc = (struct sbus_softc *) curdev;
555
556 sd->sd_dev = dev;
557 sd->sd_bchain = sc->sc_sbdev;
558 sc->sc_sbdev = sd;
559 }
560
561 /*
562 * Reset the given sbus. (???)
563 */
564 void
565 sbusreset(sbus)
566 int sbus;
567 {
568 register struct sbusdev *sd;
569 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
570 struct device *dev;
571
572 printf("reset %s:", sc->sc_dev.dv_xname);
573 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
574 if (sd->sd_reset) {
575 dev = sd->sd_dev;
576 (*sd->sd_reset)(dev);
577 printf(" %s", dev->dv_xname);
578 }
579 }
580 #if 1
581 /* Reload iommu regs */
582 bus_space_write_8(sc->ma_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
583 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
584 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
585 0, sc->sc_ptsb);
586 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
587 0, STRBUF_EN); /* Enable diagnostics mode? */
588 #else
589 /* Reload iommu regs */
590 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
591 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
592 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
593 #endif
594 }
595
596 /*
597 * Here are the iommu control routines.
598 */
599 void
600 sbus_enter(sc, va, pa, flags)
601 struct sbus_softc *sc;
602 vaddr_t va;
603 int64_t pa;
604 int flags;
605 {
606 int64_t tte;
607
608 #ifdef DIAGNOSTIC
609 if (va < sc->sc_dvmabase)
610 panic("sbus_enter: va 0x%lx not in DVMA space",va);
611 #endif
612
613 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
614 !(flags&BUS_DMA_COHERENT));
615
616 /* Is the streamcache flush really needed? */
617 #if 1
618 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush,
619 0, va);
620 #else
621 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
622 #endif
623 sbus_flush(sc);
624 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
625 #if 1
626 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
627 0, va);
628 #else
629 stxa(&sc->sc_sysio->sys_iommu.iommu_flush,ASI_NUCLEUS,va);
630 #endif
631 #ifdef DEBUG
632 if (sbusdebug & SDB_DVMA)
633 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
634 va, (long)pa, IOTSBSLOT(va,sc->sc_tsbsize),
635 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
636 (long)tte);
637 #endif
638 }
639
640 /*
641 * sbus_clear: clears mappings created by sbus_enter
642 *
643 * Only demap from IOMMU if flag is set.
644 */
645 void
646 sbus_remove(sc, va, len)
647 struct sbus_softc *sc;
648 vaddr_t va;
649 size_t len;
650 {
651
652 #ifdef DIAGNOSTIC
653 if (va < sc->sc_dvmabase)
654 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
655 if ((long)(va + len) < (long)va)
656 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
657 (long) va, (long) len);
658 if (len & ~0xfffffff)
659 panic("sbus_remove: rediculous len 0x%lx", (long)len);
660 #endif
661
662 va = trunc_page(va);
663 while (len > 0) {
664
665 /*
666 * Streaming buffer flushes:
667 *
668 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
669 * If we're not on a cache line boundary (64-bits):
670 * 2 Store 0 in flag
671 * 3 Store pointer to flag in flushsync
672 * 4 wait till flushsync becomes 0x1
673 *
674 * If it takes more than .5 sec, something went wrong.
675 */
676 #ifdef DEBUG
677 if (sbusdebug & SDB_DVMA)
678 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
679 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
680 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
681 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
682 (u_long)len);
683 #endif
684 #if 1
685 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
686 #else
687 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
688 #endif
689 if (len <= NBPG) {
690 sbus_flush(sc);
691 len = 0;
692 } else len -= NBPG;
693 #ifdef DEBUG
694 if (sbusdebug & SDB_DVMA)
695 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
696 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
697 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
698 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
699 (u_long)len);
700 #endif
701 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
702 #if 1
703 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
704 #else
705 stxa(&sc->sc_sysio->sys_iommu.iommu_flush, ASI_NUCLEUS, va);
706 #endif
707 va += NBPG;
708 }
709 }
710
711 int
712 sbus_flush(sc)
713 struct sbus_softc *sc;
714 {
715 extern u_int64_t cpu_clockrate;
716 u_int64_t flushtimeout;
717
718 sc->sc_flush = 0;
719 membar_sync();
720 #if 1
721 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_flushsync, 0, sc->sc_flushpa);
722 #else
723 stxa(&sc->sc_sysio->sys_strbuf.strbuf_flushsync, ASI_NUCLEUS, sc->sc_flushpa);
724 #endif
725 membar_sync();
726 flushtimeout = tick() + cpu_clockrate/2; /* .5 sec after *now* */
727 #ifdef DEBUG
728 if (sbusdebug & SDB_DVMA)
729 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx until = %lx\n",
730 (long)sc->sc_flush, (long)&sc->sc_flush,
731 (long)sc->sc_flushpa, (long)tick(), flushtimeout);
732 #endif
733 /* Bypass non-coherent D$ */
734 #if 0
735 while( !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && flushtimeout > tick()) membar_sync();
736 #else
737 { int i; for(i=140000000/2; !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && i; i--) membar_sync(); }
738 #endif
739 #ifdef DIAGNOSTIC
740 if( !sc->sc_flush ) {
741 printf("sbus_flush: flush timeout %p at %p\n", (long)sc->sc_flush,
742 (long)sc->sc_flushpa); /* panic? */
743 #ifdef DDB
744 Debugger();
745 #endif
746 }
747 #endif
748 #ifdef DEBUG
749 if (sbusdebug & SDB_DVMA)
750 printf("sbus_flush: flushed\n");
751 #endif
752 return (sc->sc_flush);
753 }
754 /*
755 * Get interrupt attributes for an Sbus device.
756 */
757 int
758 sbus_get_intr(sc, node, ipp, np)
759 struct sbus_softc *sc;
760 int node;
761 struct sbus_intr **ipp;
762 int *np;
763 {
764 int *ipl;
765 int i, n, error;
766 char buf[32];
767
768 /*
769 * The `interrupts' property contains the Sbus interrupt level.
770 */
771 ipl = NULL;
772 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
773 /* Change format to an `struct sbus_intr' array */
774 struct sbus_intr *ip;
775 /* Default to interrupt level 2 -- otherwise unused */
776 int pri = INTLEVENCODE(2);
777 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
778 if (ip == NULL)
779 return (ENOMEM);
780 /* Now things get ugly. We need to take this value which is
781 * the interrupt vector number and encode the IPL into it
782 * somehow. Luckily, the interrupt vector has lots of free
783 * space and we can easily stuff the IPL in there for a while.
784 */
785 getpropstringA(node, "device_type", buf);
786 if (!buf[0]) {
787 getpropstringA(node, "name", buf);
788 }
789 for (i=0; intrmap[i].in_class; i++) {
790 if (strcmp(intrmap[i].in_class, buf) == 0) {
791 pri = INTLEVENCODE(intrmap[i].in_lev);
792 break;
793 }
794 }
795 for (n = 0; n < *np; n++) {
796 /*
797 * We encode vector and priority into sbi_pri so we
798 * can pass them as a unit. This will go away if
799 * sbus_establish ever takes an sbus_intr instead
800 * of an integer level.
801 * Stuff the real vector in sbi_vec.
802 */
803 ip[n].sbi_pri = pri|ipl[n];
804 ip[n].sbi_vec = ipl[n];
805 }
806 free(ipl, M_DEVBUF);
807 *ipp = ip;
808 return (0);
809 }
810
811 /* We really don't support the following */
812 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
813 /* And some devices don't even have interrupts */
814 /*
815 * Fall back on `intr' property.
816 */
817 *ipp = NULL;
818 error = getprop(node, "intr", sizeof(struct sbus_intr),
819 np, (void **)ipp);
820 switch (error) {
821 case 0:
822 for (n = *np; n-- > 0;) {
823 /*
824 * Move the interrupt vector into place.
825 * We could remap the level, but the SBUS priorities
826 * are probably good enough.
827 */
828 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
829 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
830 }
831 break;
832 case ENOENT:
833 error = 0;
834 break;
835 }
836
837 return (error);
838 }
839
840
841 /*
842 * Install an interrupt handler for an Sbus device.
843 */
844 void *
845 sbus_intr_establish(t, level, flags, handler, arg)
846 bus_space_tag_t t;
847 int level;
848 int flags;
849 int (*handler) __P((void *));
850 void *arg;
851 {
852 struct sbus_softc *sc = t->cookie;
853 struct intrhand *ih;
854 int ipl;
855 long vec = level;
856
857 ih = (struct intrhand *)
858 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
859 if (ih == NULL)
860 return (NULL);
861
862 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
863 ipl = vec;
864 else if ((vec & SBUS_INTR_COMPAT) != 0)
865 ipl = vec & ~SBUS_INTR_COMPAT;
866 else {
867 /* Decode and remove IPL */
868 ipl = INTLEV(vec);
869 vec = INTVEC(vec);
870 #ifdef DEBUG
871 if (sbusdebug & SDB_INTR) {
872 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
873 intrlev[vec]);
874 printf("Hunting for IRQ...\n");
875 }
876 #endif
877 if ((vec & INTMAP_OBIO) == 0) {
878 /* We're in an SBUS slot */
879 /* Register the map and clear intr registers */
880 #ifdef DEBUG
881 if (sbusdebug & SDB_INTR) {
882 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
883 int64_t intrmap = *intrptr;
884
885 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
886 (long)vec, (long)intrmap,
887 (long)INTSLOT(vec));
888 }
889 #endif
890 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
891 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
892 /* Enable the interrupt */
893 vec |= INTMAP_V;
894 /* Insert IGN */
895 vec |= sc->sc_ign;
896 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
897 } else {
898 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
899 int64_t intrmap = 0;
900 int i;
901
902 /* Insert IGN */
903 vec |= sc->sc_ign;
904 for (i=0;
905 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
906 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
907 i++);
908 if (INTVEC(intrmap) == INTVEC(vec)) {
909 #ifdef DEBUG
910 if (sbusdebug & SDB_INTR)
911 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
912 vec, (long)intrmap, i);
913 #endif
914 /* Register the map and clear intr registers */
915 ih->ih_map = &intrptr[i];
916 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
917 ih->ih_clr = &intrptr[i];
918 /* Enable the interrupt */
919 intrmap |= INTMAP_V;
920 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
921 } else panic("IRQ not found!");
922 }
923 }
924 #ifdef DEBUG
925 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
926 #endif
927
928 ih->ih_fun = handler;
929 ih->ih_arg = arg;
930 ih->ih_number = vec;
931 ih->ih_pil = (1<<ipl);
932 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
933 intr_fasttrap(ipl, (void (*)__P((void)))handler);
934 else
935 intr_establish(ipl, ih);
936 return (ih);
937 }
938
939 static bus_space_tag_t
940 sbus_alloc_bustag(sc)
941 struct sbus_softc *sc;
942 {
943 bus_space_tag_t sbt;
944
945 sbt = (bus_space_tag_t)
946 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
947 if (sbt == NULL)
948 return (NULL);
949
950 bzero(sbt, sizeof *sbt);
951 sbt->cookie = sc;
952 sbt->parent = sc->sc_bustag;
953 sbt->type = ASI_PRIMARY;
954 sbt->sparc_bus_map = _sbus_bus_map;
955 sbt->sparc_bus_mmap = sbus_bus_mmap;
956 sbt->sparc_intr_establish = sbus_intr_establish;
957 return (sbt);
958 }
959
960
961 static bus_dma_tag_t
962 sbus_alloc_dmatag(sc)
963 struct sbus_softc *sc;
964 {
965 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
966
967 sdt = (bus_dma_tag_t)
968 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
969 if (sdt == NULL)
970 /* Panic? */
971 return (psdt);
972
973 sdt->_cookie = sc;
974 sdt->_parent = psdt;
975 #define PCOPY(x) sdt->x = psdt->x
976 PCOPY(_dmamap_create);
977 PCOPY(_dmamap_destroy);
978 sdt->_dmamap_load = sbus_dmamap_load;
979 PCOPY(_dmamap_load_mbuf);
980 PCOPY(_dmamap_load_uio);
981 PCOPY(_dmamap_load_raw);
982 sdt->_dmamap_unload = sbus_dmamap_unload;
983 sdt->_dmamap_sync = sbus_dmamap_sync;
984 sdt->_dmamem_alloc = sbus_dmamem_alloc;
985 sdt->_dmamem_free = sbus_dmamem_free;
986 sdt->_dmamem_map = sbus_dmamem_map;
987 sdt->_dmamem_unmap = sbus_dmamem_unmap;
988 PCOPY(_dmamem_mmap);
989 #undef PCOPY
990 sc->sc_dmatag = sdt;
991 return (sdt);
992 }
993
994 int
995 sbus_dmamap_load(t, map, buf, buflen, p, flags)
996 bus_dma_tag_t t;
997 bus_dmamap_t map;
998 void *buf;
999 bus_size_t buflen;
1000 struct proc *p;
1001 int flags;
1002 {
1003 int err;
1004 bus_size_t sgsize;
1005 paddr_t curaddr;
1006 vaddr_t dvmaddr, vaddr = (vaddr_t)buf;
1007 pmap_t pmap;
1008 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1009
1010 if (map->dm_nsegs) {
1011 /* Already in use?? */
1012 #ifdef DIAGNOSTIC
1013 printf("sbus_dmamap_load: map still in use\n");
1014 #endif
1015 bus_dmamap_unload(t, map);
1016 }
1017 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1018 return (err);
1019
1020 if (p != NULL)
1021 pmap = p->p_vmspace->vm_map.pmap;
1022 else
1023 pmap = pmap_kernel();
1024
1025 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1026 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1027 for (; buflen > 0; ) {
1028 /*
1029 * Get the physical address for this page.
1030 */
1031 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1032 bus_dmamap_unload(t, map);
1033 return (-1);
1034 }
1035
1036 /*
1037 * Compute the segment size, and adjust counts.
1038 */
1039 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1040 if (buflen < sgsize)
1041 sgsize = buflen;
1042
1043 #ifdef DEBUG
1044 if (sbusdebug & SDB_DVMA)
1045 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1046 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1047 #endif
1048 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1049
1050 dvmaddr += PAGE_SIZE;
1051 vaddr += sgsize;
1052 buflen -= sgsize;
1053 }
1054 return (0);
1055 }
1056
1057 void
1058 sbus_dmamap_unload(t, map)
1059 bus_dma_tag_t t;
1060 bus_dmamap_t map;
1061 {
1062 vaddr_t addr;
1063 int len;
1064 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1065
1066 if (map->dm_nsegs != 1)
1067 panic("_sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1068
1069 addr = trunc_page(map->dm_segs[0].ds_addr);
1070 len = map->dm_segs[0].ds_len;
1071
1072 #ifdef DEBUG
1073 if (sbusdebug & SDB_DVMA)
1074 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1075 map, (long)addr, (long)len);
1076 #endif
1077 sbus_remove(sc, addr, len);
1078 bus_dmamap_unload(t->_parent, map);
1079 }
1080
1081
1082 void
1083 sbus_dmamap_sync(t, map, offset, len, ops)
1084 bus_dma_tag_t t;
1085 bus_dmamap_t map;
1086 bus_addr_t offset;
1087 bus_size_t len;
1088 int ops;
1089 {
1090 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1091 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1092
1093 /*
1094 * We only support one DMA segment; supporting more makes this code
1095 * too unweildy.
1096 */
1097
1098 if (ops&BUS_DMASYNC_PREREAD) {
1099 #ifdef DEBUG
1100 if (sbusdebug & SDB_DVMA)
1101 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1102 (long)va, (u_long)len);
1103 #endif
1104
1105 /* Nothing to do */;
1106 }
1107 if (ops&BUS_DMASYNC_POSTREAD) {
1108 /*
1109 * We should sync the IOMMU streaming caches here first.
1110 */
1111 #ifdef DEBUG
1112 if (sbusdebug & SDB_DVMA)
1113 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1114 (long)va, (u_long)len);
1115 #endif
1116 while (len > 0) {
1117
1118 /*
1119 * Streaming buffer flushes:
1120 *
1121 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1122 * If we're not on a cache line boundary (64-bits):
1123 * 2 Store 0 in flag
1124 * 3 Store pointer to flag in flushsync
1125 * 4 wait till flushsync becomes 0x1
1126 *
1127 * If it takes more than .5 sec, something went wrong.
1128 */
1129 #ifdef DEBUG
1130 if (sbusdebug & SDB_DVMA)
1131 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1132 (long)va, (u_long)len);
1133 #endif
1134 #if 1
1135 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
1136 #else
1137 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
1138 #endif
1139 if (len <= NBPG) {
1140 sbus_flush(sc);
1141 len = 0;
1142 } else
1143 len -= NBPG;
1144 va += NBPG;
1145 }
1146 }
1147 if (ops&BUS_DMASYNC_PREWRITE) {
1148 #ifdef DEBUG
1149 if (sbusdebug & SDB_DVMA)
1150 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1151 (long)va, (u_long)len);
1152 #endif
1153 /* Nothing to do */;
1154 }
1155 if (ops&BUS_DMASYNC_POSTWRITE) {
1156 #ifdef DEBUG
1157 if (sbusdebug & SDB_DVMA)
1158 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1159 (long)va, (u_long)len);
1160 #endif
1161 /* Nothing to do */;
1162 }
1163 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1164 }
1165
1166
1167 /*
1168 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1169 */
1170 int
1171 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1172 bus_dma_tag_t t;
1173 bus_size_t size, alignment, boundary;
1174 bus_dma_segment_t *segs;
1175 int nsegs;
1176 int *rsegs;
1177 int flags;
1178 {
1179 paddr_t curaddr;
1180 bus_addr_t dvmaddr;
1181 vm_page_t m;
1182 struct pglist *mlist;
1183 int error;
1184 int n;
1185 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1186
1187 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1188 boundary, segs, nsegs, rsegs, flags)))
1189 return (error);
1190
1191 /*
1192 * Allocate a DVMA mapping for our new memory.
1193 */
1194 for (n=0; n<*rsegs; n++) {
1195 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1196 if (dvmaddr == (bus_addr_t)-1) {
1197 /* Free what we got and exit */
1198 bus_dmamem_free(t->_parent, segs, nsegs);
1199 return (ENOMEM);
1200 }
1201 segs[n].ds_addr = dvmaddr;
1202 size = segs[n].ds_len;
1203 mlist = segs[n]._ds_mlist;
1204
1205 /* Map memory into DVMA space */
1206 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1207 curaddr = VM_PAGE_TO_PHYS(m);
1208 sbus_enter(sc, dvmaddr, curaddr, flags);
1209 dvmaddr += PAGE_SIZE;
1210 }
1211 }
1212 return (0);
1213 }
1214
1215 void
1216 sbus_dmamem_free(t, segs, nsegs)
1217 bus_dma_tag_t t;
1218 bus_dma_segment_t *segs;
1219 int nsegs;
1220 {
1221 vaddr_t addr;
1222 int len;
1223 int n;
1224 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1225
1226
1227 for (n=0; n<nsegs; n++) {
1228 addr = segs[n].ds_addr;
1229 len = segs[n].ds_len;
1230 sbus_remove(sc, addr, len);
1231 dvmamap_free(addr, len);
1232 }
1233 bus_dmamem_free(t->_parent, segs, nsegs);
1234 }
1235
1236 /*
1237 * Map the DVMA mappings into the kernel pmap.
1238 * Check the flags to see whether we're streaming or coherent.
1239 */
1240 int
1241 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1242 bus_dma_tag_t t;
1243 bus_dma_segment_t *segs;
1244 int nsegs;
1245 size_t size;
1246 caddr_t *kvap;
1247 int flags;
1248 {
1249 vm_page_t m;
1250 vaddr_t va;
1251 bus_addr_t addr;
1252 struct pglist *mlist;
1253 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1254 int cbit;
1255
1256 /*
1257 * digest flags:
1258 */
1259 cbit = 0;
1260 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1261 cbit |= PMAP_NVC;
1262 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1263 cbit |= PMAP_NC;
1264 /*
1265 * Now take this and map it into the CPU since it should already
1266 * be in the the IOMMU.
1267 */
1268 *kvap = (caddr_t)va = segs[0].ds_addr;
1269 mlist = segs[0]._ds_mlist;
1270 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1271
1272 if (size == 0)
1273 panic("_bus_dmamem_map: size botch");
1274
1275 addr = VM_PAGE_TO_PHYS(m);
1276 pmap_enter(pmap_kernel(), va, addr | cbit,
1277 VM_PROT_READ | VM_PROT_WRITE, TRUE, 0);
1278 va += PAGE_SIZE;
1279 size -= PAGE_SIZE;
1280 }
1281
1282 return (0);
1283 }
1284
1285 /*
1286 * Unmap DVMA mappings from kernel
1287 */
1288 void
1289 sbus_dmamem_unmap(t, kva, size)
1290 bus_dma_tag_t t;
1291 caddr_t kva;
1292 size_t size;
1293 {
1294 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1295
1296 #ifdef DIAGNOSTIC
1297 if ((u_long)kva & PGOFSET)
1298 panic("_bus_dmamem_unmap");
1299 #endif
1300
1301 size = round_page(size);
1302 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1303 }
1304