sbus.c revision 1.6 1 /* $NetBSD: sbus.c,v 1.6 1998/09/05 16:23:09 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86
87 #include <sys/param.h>
88 #include <sys/malloc.h>
89 #include <sys/systm.h>
90 #include <sys/device.h>
91 #include <vm/vm.h>
92
93 #include <machine/bus.h>
94 #include <sparc64/sparc64/vaddrs.h>
95 #include <sparc64/dev/sbusreg.h>
96 #include <sparc64/dev/sbusvar.h>
97 #include <sparc64/sparc64/asm.h>
98
99 #include <machine/autoconf.h>
100 #include <machine/ctlreg.h>
101 #include <machine/cpu.h>
102
103 /* XXXXX -- Needed to allow dvma_mapin to work -- need to switch to bus_dma_* */
104 struct sbus_softc *sbus0;
105
106 #ifdef DEBUG
107 #define SDB_DVMA 0x1
108 #define SDB_INTR 0x2
109 int sbusdebug = 0;
110 #endif
111
112 void sbusreset __P((int));
113 int sbus_flush __P((struct sbus_softc *));
114
115 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
116 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
117 static int sbus_get_intr __P((struct sbus_softc *, int,
118 struct sbus_intr **, int *));
119 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
120 int, bus_space_handle_t *));
121 static int _sbus_bus_map __P((
122 bus_space_tag_t,
123 bus_type_t,
124 bus_addr_t, /*offset*/
125 bus_size_t, /*size*/
126 int, /*flags*/
127 vaddr_t, /*preferred virtual address */
128 bus_space_handle_t *));
129 static void *sbus_intr_establish __P((
130 bus_space_tag_t,
131 int, /*level*/
132 int, /*flags*/
133 int (*) __P((void *)), /*handler*/
134 void *)); /*handler arg*/
135
136
137 /* autoconfiguration driver */
138 int sbus_match __P((struct device *, struct cfdata *, void *));
139 void sbus_attach __P((struct device *, struct device *, void *));
140
141
142 struct cfattach sbus_ca = {
143 sizeof(struct sbus_softc), sbus_match, sbus_attach
144 };
145
146 extern struct cfdriver sbus_cd;
147
148 /*
149 * DVMA routines
150 */
151 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
152 void sbus_remove __P((struct sbus_softc *, vaddr_t, int));
153 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
154 bus_size_t, struct proc *, int));
155 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
156 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
157 bus_size_t, int));
158 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
159 bus_size_t alignment, bus_size_t boundary,
160 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
161 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
162 int nsegs));
163 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
164 int nsegs, size_t size, caddr_t *kvap, int flags));
165 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
166 size_t size));
167
168
169 /*
170 * Child devices receive the Sbus interrupt level in their attach
171 * arguments. We translate these to CPU IPLs using the following
172 * tables. Note: obio bus interrupt levels are identical to the
173 * processor IPL.
174 *
175 * The second set of tables is used when the Sbus interrupt level
176 * cannot be had from the PROM as an `interrupt' property. We then
177 * fall back on the `intr' property which contains the CPU IPL.
178 */
179
180 /* Translate Sbus interrupt level to processor IPL */
181 static int intr_sbus2ipl_4c[] = {
182 0, 1, 2, 3, 5, 7, 8, 9
183 };
184 static int intr_sbus2ipl_4m[] = {
185 0, 2, 3, 5, 7, 9, 11, 13
186 };
187
188 /*
189 * This value is or'ed into the attach args' interrupt level cookie
190 * if the interrupt level comes from an `intr' property, i.e. it is
191 * not an Sbus interrupt level.
192 */
193 #define SBUS_INTR_COMPAT 0x80000000
194
195
196 /*
197 * Print the location of some sbus-attached device (called just
198 * before attaching that device). If `sbus' is not NULL, the
199 * device was found but not configured; print the sbus as well.
200 * Return UNCONF (config_find ignores this if the device was configured).
201 */
202 int
203 sbus_print(args, busname)
204 void *args;
205 const char *busname;
206 {
207 struct sbus_attach_args *sa = args;
208 int i;
209
210 if (busname)
211 printf("%s at %s", sa->sa_name, busname);
212 printf(" slot %d offset 0x%x", sa->sa_slot, sa->sa_offset);
213 for (i=0; i<sa->sa_nintr; i++) {
214 struct sbus_intr *sbi = &sa->sa_intr[i];
215
216 printf(" vector %x ipl %d", (int)sbi->sbi_vec, (int)INTLEV(sbi->sbi_pri));
217 }
218 return (UNCONF);
219 }
220
221 int
222 sbus_match(parent, cf, aux)
223 struct device *parent;
224 struct cfdata *cf;
225 void *aux;
226 {
227 struct mainbus_attach_args *ma = aux;
228
229 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
230 }
231
232 /*
233 * Attach an Sbus.
234 */
235 void
236 sbus_attach(parent, self, aux)
237 struct device *parent;
238 struct device *self;
239 void *aux;
240 {
241 struct sbus_softc *sc = sbus0 = (struct sbus_softc *)self;
242 struct mainbus_attach_args *ma = aux;
243 int node = ma->ma_node;
244
245 int node0, error;
246 bus_space_tag_t sbt;
247 struct sbus_attach_args sa;
248 char *busname = "sbus";
249 struct bootpath *bp = ma->ma_bp;
250
251
252 sc->sc_bustag = ma->ma_bustag;
253 sc->sc_dmatag = ma->ma_dmatag;
254 sc->sc_sysio = (struct sysioreg*)(long)ma->ma_address[0]; /* Use prom mapping for sysio. */
255 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
256
257 /* Setup interrupt translation tables */
258 sc->sc_intr2ipl = CPU_ISSUN4C
259 ? intr_sbus2ipl_4c
260 : intr_sbus2ipl_4m;
261
262 /*
263 * Record clock frequency for synchronous SCSI.
264 * IS THIS THE CORRECT DEFAULT??
265 */
266 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
267 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
268
269 sbt = sbus_alloc_bustag(sc);
270 sc->sc_dmatag = sbus_alloc_dmatag(sc);
271
272 /*
273 * Get the SBus burst transfer size if burst transfers are supported
274 */
275 sc->sc_burst = getpropint(node, "burst-sizes", 0);
276
277 /* Propagate bootpath */
278 if (bp != NULL && strcmp(bp->name, busname) == 0)
279 bp++;
280 else
281 bp = NULL;
282
283 /*
284 * Collect address translations from the OBP.
285 */
286 error = getprop(node, "ranges", sizeof(struct sbus_range),
287 &sc->sc_nrange, (void **)&sc->sc_range);
288 switch (error) {
289 case 0:
290 break;
291 #if 0
292 case ENOENT:
293 /* Fall back to our own `range' construction */
294 sc->sc_range = sbus_translations;
295 sc->sc_nrange =
296 sizeof(sbus_translations)/sizeof(sbus_translations[0]);
297 break;
298 #endif
299 default:
300 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
301 }
302
303
304 /*
305 * Setup the iommu.
306 *
307 * The sun4u iommu is part of the SBUS controller so we will
308 * deal with it here. We could try to fake a device node so
309 * we can eventually share it with the PCI bus run by psyco,
310 * but I don't want to get into that sort of cruft.
311 */
312
313 /*
314 * All IOMMUs will share the same TSB which is allocated in pmap_bootstrap.
315 *
316 * This makes device management easier.
317 */
318 {
319 extern int64_t *iotsb;
320 extern paddr_t iotsbp;
321 extern int iotsbsize;
322
323 sc->sc_tsbsize = iotsbsize;
324 sc->sc_tsb = iotsb;
325 sc->sc_ptsb = iotsbp;
326 }
327 #if 0
328 /* Need to do 64-bit stores */
329 sc->sc_sysio->sys_iommu.iommu_cr = (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN);
330 sc->sc_sysio->sys_iommu.iommu_tsb = sc->sc_ptsb;
331 #else
332 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
333 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
334 #endif
335 #ifdef DEBUG
336 if (sbusdebug & SDB_DVMA)
337 {
338 /* Probe the iommu */
339 int64_t cr, tsb;
340
341 printf("iommu regs at: cr=%x tsb=%x flush=%x\n", &sc->sc_sysio->sys_iommu.iommu_cr,
342 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
343 cr = sc->sc_sysio->sys_iommu.iommu_cr;
344 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
345 printf("iommu cr=%x:%x tsb=%x:%x\n", (long)(cr>>32), (long)cr, (long)(tsb>>32), (long)tsb);
346 printf("sysio base %p phys %p TSB base %p phys %p",
347 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
348 (long)sc->sc_tsb, (long)sc->sc_ptsb);
349 delay(1000000); /* 1 s */
350 }
351 #endif
352
353 /*
354 * Initialize streaming buffer.
355 */
356 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
357 #if 0
358 sc->sc_sysio->sys_strbuf.strbuf_ctl = STRBUF_EN; /* Enable diagnostics mode? */
359 #else
360 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
361 #endif
362
363 /*
364 * Loop through ROM children, fixing any relative addresses
365 * and then configuring each device.
366 * `specials' is an array of device names that are treated
367 * specially:
368 */
369 node0 = firstchild(node);
370 for (node = node0; node; node = nextsibling(node)) {
371 char *name = getpropstring(node, "name");
372
373 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
374 node, bp, &sa) != 0) {
375 printf("sbus_attach: %s: incomplete\n", name);
376 continue;
377 }
378 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
379 sbus_destroy_attach_args(&sa);
380 }
381 }
382
383 int
384 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
385 struct sbus_softc *sc;
386 bus_space_tag_t bustag;
387 bus_dma_tag_t dmatag;
388 int node;
389 struct bootpath *bp;
390 struct sbus_attach_args *sa;
391 {
392 /*struct sbus_reg sbusreg;*/
393 /*int base;*/
394 int error;
395 int n;
396
397 bzero(sa, sizeof(struct sbus_attach_args));
398 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
399 if (error != 0)
400 return (error);
401 sa->sa_name[n] = '\0';
402
403 sa->sa_bustag = bustag;
404 sa->sa_dmatag = dmatag;
405 sa->sa_node = node;
406 sa->sa_bp = bp;
407
408 error = getprop(node, "reg", sizeof(struct sbus_reg),
409 &sa->sa_nreg, (void **)&sa->sa_reg);
410 if (error != 0) {
411 char buf[32];
412 if (error != ENOENT ||
413 !node_has_property(node, "device_type") ||
414 strcmp(getpropstringA(node, "device_type", buf),
415 "hierarchical") != 0)
416 return (error);
417 }
418 for (n = 0; n < sa->sa_nreg; n++) {
419 /* Convert to relative addressing, if necessary */
420 u_int32_t base = sa->sa_reg[n].sbr_offset;
421 if (SBUS_ABS(base)) {
422 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
423 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
424 }
425 }
426
427 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
428 return (error);
429
430 error = getprop(node, "address", sizeof(u_int32_t),
431 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
432 if (error != 0 && error != ENOENT)
433 return (error);
434
435 return (0);
436 }
437
438 void
439 sbus_destroy_attach_args(sa)
440 struct sbus_attach_args *sa;
441 {
442 if (sa->sa_name != NULL)
443 free(sa->sa_name, M_DEVBUF);
444
445 if (sa->sa_nreg != 0)
446 free(sa->sa_reg, M_DEVBUF);
447
448 if (sa->sa_intr)
449 free(sa->sa_intr, M_DEVBUF);
450
451 if (sa->sa_promvaddrs)
452 free(sa->sa_promvaddrs, M_DEVBUF);
453
454 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
455 }
456
457
458 int
459 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
460 bus_space_tag_t t;
461 bus_type_t btype;
462 bus_addr_t offset;
463 bus_size_t size;
464 int flags;
465 vaddr_t vaddr;
466 bus_space_handle_t *hp;
467 {
468 struct sbus_softc *sc = t->cookie;
469 int64_t slot = btype;
470 int i;
471
472 for (i = 0; i < sc->sc_nrange; i++) {
473 bus_addr_t paddr;
474
475 if (sc->sc_range[i].cspace != slot)
476 continue;
477
478 /* We've found the connection to the parent bus */
479 paddr = sc->sc_range[i].poffset + offset;
480 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
481 #ifdef DEBUG
482 if (sbusdebug & SDB_DVMA)
483 printf("\n_sbus_bus_map: mapping paddr slot %x offset %x:%x poffset %x paddr %x:%x\n",
484 (int)slot, (int)(offset>>32), (int)offset, (int)sc->sc_range[i].poffset, (int)(paddr>>32), (int)paddr);
485 #endif
486 return (bus_space_map2(sc->sc_bustag, 0, paddr,
487 size, flags, vaddr, hp));
488 }
489
490 return (EINVAL);
491 }
492
493 int
494 sbus_bus_mmap(t, btype, paddr, flags, hp)
495 bus_space_tag_t t;
496 bus_type_t btype;
497 bus_addr_t paddr;
498 int flags;
499 bus_space_handle_t *hp;
500 {
501 bus_addr_t offset = paddr;
502 int slot = (paddr>>32);
503 struct sbus_softc *sc = t->cookie;
504 int i;
505
506 for (i = 0; i < sc->sc_nrange; i++) {
507 bus_addr_t paddr;
508
509 if (sc->sc_range[i].cspace != slot)
510 continue;
511
512 paddr = sc->sc_range[i].poffset + offset;
513 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
514 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
515 flags, hp));
516 }
517
518 return (-1);
519 }
520
521
522 /*
523 * Each attached device calls sbus_establish after it initializes
524 * its sbusdev portion.
525 */
526 void
527 sbus_establish(sd, dev)
528 register struct sbusdev *sd;
529 register struct device *dev;
530 {
531 register struct sbus_softc *sc;
532 register struct device *curdev;
533
534 /*
535 * We have to look for the sbus by name, since it is not necessarily
536 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
537 * We don't just use the device structure of the above-attached
538 * sbus, since we might (in the future) support multiple sbus's.
539 */
540 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
541 if (!curdev || !curdev->dv_xname)
542 panic("sbus_establish: can't find sbus parent for %s",
543 sd->sd_dev->dv_xname
544 ? sd->sd_dev->dv_xname
545 : "<unknown>" );
546
547 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
548 break;
549 }
550 sc = (struct sbus_softc *) curdev;
551
552 sd->sd_dev = dev;
553 sd->sd_bchain = sc->sc_sbdev;
554 sc->sc_sbdev = sd;
555 }
556
557 /*
558 * Reset the given sbus. (???)
559 */
560 void
561 sbusreset(sbus)
562 int sbus;
563 {
564 register struct sbusdev *sd;
565 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
566 struct device *dev;
567
568 printf("reset %s:", sc->sc_dev.dv_xname);
569 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
570 if (sd->sd_reset) {
571 dev = sd->sd_dev;
572 (*sd->sd_reset)(dev);
573 printf(" %s", dev->dv_xname);
574 }
575 }
576 #if 0
577 /* Reload iommu regs */
578 sc->sc_sysio->sys_iommu.iommu_cr = (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN);
579 sc->sc_sysio->sys_iommu.iommu_tsb = sc->sc_ptsb;
580 sc->sc_sysio->sys_strbuf.strbuf_ctl = STRBUF_EN; /* Enable diagnostics mode? */
581 #else
582 /* Reload iommu regs */
583 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
584 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
585 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
586 #endif
587 }
588
589 /*
590 * Here are the iommu control routines.
591 */
592 void
593 sbus_enter(sc, va, pa, flags)
594 struct sbus_softc *sc;
595 vaddr_t va;
596 int64_t pa;
597 int flags;
598 {
599 int64_t tte;
600
601 #ifdef DIAGNOSTIC
602 if (va < sc->sc_dvmabase)
603 panic("sbus_enter: va 0x%x not in DVMA space",va);
604 #endif
605
606 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
607 !(flags&BUS_DMA_COHERENT));
608
609 /* Is the streamcache flush really needed? */
610 #if 0
611 sc->sc_sysio->sys_strbuf.strbuf_pgflush = va;
612 #else
613 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
614 #endif
615 sbus_flush(sc);
616 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
617 #if 0
618 sc->sc_sysio->sys_iommu.iommu_flush = va;
619 #else
620 stxa(&sc->sc_sysio->sys_iommu.iommu_flush,ASI_NUCLEUS,va);
621 #endif
622 #ifdef DEBUG
623 if (sbusdebug & SDB_DVMA)
624 printf("sbus_enter: va %x pa %x:%x TSB[%x]@%p=%x:%x\n",
625 va, (int)(pa>>32), (int)pa, IOTSBSLOT(va,sc->sc_tsbsize),
626 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
627 (int)(tte>>32), (int)tte);
628 #endif
629 }
630
631 /*
632 * sbus_clear: clears mappings created by sbus_enter
633 *
634 * Only demap from IOMMU if flag is set.
635 */
636 void
637 sbus_remove(sc, va, len)
638 struct sbus_softc *sc;
639 vaddr_t va;
640 int len;
641 {
642
643 #ifdef DIAGNOSTIC
644 if (va < sc->sc_dvmabase)
645 panic("sbus_remove: va 0x%x not in DVMA space", (int)va);
646 if ((int)(va + len) < (int)va)
647 panic("sbus_remove: va 0x%x + len 0x%x wraps",
648 (int) va, (int) len);
649 #endif
650
651 va = trunc_page(va);
652 while (len > 0) {
653
654 /*
655 * Streaming buffer flushes:
656 *
657 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
658 * If we're not on a cache line boundary (64-bits):
659 * 2 Store 0 in flag
660 * 3 Store pointer to flag in flushsync
661 * 4 wait till flushsync becomes 0x1
662 *
663 * If it takes more than .5 sec, something went wrong.
664 */
665 #if 0
666 sc->sc_sysio->sys_strbuf.strbuf_pgflush = va;
667 #else
668 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
669 #endif
670 if (len <= NBPG) {
671 sbus_flush(sc);
672 }
673 #ifdef DEBUG
674 if (sbusdebug & SDB_DVMA)
675 printf("sbus_remove: flushed va %p TSB[%x]@%p=%lx:%lx\n",
676 (long)va, (int)IOTSBSLOT(va,sc->sc_tsbsize),
677 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
678 (long)((sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)])>>32),
679 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]));
680 #endif
681 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
682 #if 0
683 sc->sc_sysio->sys_iommu.iommu_flush = va;
684 #else
685 stxa(&sc->sc_sysio->sys_iommu.iommu_flush, ASI_NUCLEUS, va);
686 #endif
687 len -= NBPG;
688 va += NBPG;
689 }
690 }
691
692 int
693 sbus_flush(sc)
694 struct sbus_softc *sc;
695 {
696 extern u_int64_t cpu_clockrate;
697 u_int64_t flushtimeout;
698
699 sc->sc_flush = 0;
700 /*
701 * KLUGE ALERT KLUGE ALERT
702 *
703 * In order not to bother with pmap_extract() to do the vtop
704 * translation, flushdone is a static variable that resides in
705 * the kernel's 4MB locked TTE. This means that this routine
706 * is NOT re-entrant. Since we're single-threaded and poll
707 * on this value, this is currently not a problem.
708 */
709 #ifdef NOTDEF_DEBUG
710 if (sbusdebug & SDB_DVMA)
711 printf("sbus_remove: flush = %x at va = %x pa = %x:%x\n",
712 (int)sc->sc_flush, (int)&sc->sc_flush,
713 (int)(sc->sc_flushpa>>32), (int)sc->sc_flushpa);
714 #endif
715 #if 0
716 sc->sc_sysio->sys_strbuf.strbuf_flushsync = sc->sc_flushpa;
717 #else
718 stxa(&sc->sc_sysio->sys_strbuf.strbuf_flushsync, ASI_NUCLEUS, sc->sc_flushpa);
719 #endif
720 membar_sync();
721 flushtimeout = tick() + cpu_clockrate/2; /* .5 sec after *now* */
722 while( !sc->sc_flush && flushtimeout > tick()) membar_sync();
723 #ifdef DIAGNOSTIC
724 if( !sc->sc_flush )
725 printf("sbus_remove: flush timeout %p at %p\n", (long)sc->sc_flush,
726 (long)sc->sc_flushpa); /* panic? */
727 #endif
728 return (sc->sc_flush);
729 }
730 /*
731 * Get interrupt attributes for an Sbus device.
732 */
733 int
734 sbus_get_intr(sc, node, ipp, np)
735 struct sbus_softc *sc;
736 int node;
737 struct sbus_intr **ipp;
738 int *np;
739 {
740 int *ipl;
741 int i, n, error;
742 char buf[32];
743
744 /*
745 * The `interrupts' property contains the Sbus interrupt level.
746 */
747 ipl = NULL;
748 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
749 /* Change format to an `struct sbus_intr' array */
750 struct sbus_intr *ip;
751 int pri = 0;
752 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
753 if (ip == NULL)
754 return (ENOMEM);
755 /* Now things get ugly. We need to take this value which is
756 * the interrupt vector number and encode the IPL into it
757 * somehow. Luckily, the interrupt vector has lots of free
758 * space and we can easily stuff the IPL in there for a while.
759 */
760 getpropstringA(node, "device_type", buf);
761 for (i=0; intrmap[i].in_class; i++) {
762 if (strcmp(intrmap[i].in_class, buf) == 0) {
763 pri = INTLEVENCODE(intrmap[i].in_lev);
764 break;
765 }
766 }
767 for (n = 0; n < *np; n++) {
768 /*
769 * We encode vector and priority into sbi_pri so we
770 * can pass them as a unit. This will go away if
771 * sbus_establish ever takes an sbus_intr instead
772 * of an integer level.
773 * Stuff the real vector in sbi_vec.
774 */
775 ip[n].sbi_pri = pri|ipl[n];
776 ip[n].sbi_vec = ipl[n];
777 }
778 free(ipl, M_DEVBUF);
779 *ipp = ip;
780 return (0);
781 }
782
783 /* We really don't support the following */
784 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
785 /* And some devices don't even have interrupts */
786 /*
787 * Fall back on `intr' property.
788 */
789 *ipp = NULL;
790 error = getprop(node, "intr", sizeof(struct sbus_intr),
791 np, (void **)ipp);
792 switch (error) {
793 case 0:
794 for (n = *np; n-- > 0;) {
795 /*
796 * Move the interrupt vector into place.
797 * We could remap the level, but the SBUS priorities
798 * are probably good enough.
799 */
800 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
801 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
802 }
803 break;
804 case ENOENT:
805 error = 0;
806 break;
807 }
808
809 return (error);
810 }
811
812
813 /*
814 * Install an interrupt handler for an Sbus device.
815 */
816 void *
817 sbus_intr_establish(t, level, flags, handler, arg)
818 bus_space_tag_t t;
819 int level;
820 int flags;
821 int (*handler) __P((void *));
822 void *arg;
823 {
824 struct sbus_softc *sc = t->cookie;
825 struct intrhand *ih;
826 int ipl;
827
828 ih = (struct intrhand *)
829 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
830 if (ih == NULL)
831 return (NULL);
832
833 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
834 ipl = level;
835 else if ((level & SBUS_INTR_COMPAT) != 0)
836 ipl = level & ~SBUS_INTR_COMPAT;
837 else {
838 /* Decode and remove IPL */
839 ipl = INTLEV(level);
840 level = INTVEC(level);
841 #ifdef DEBUG
842 if (sbusdebug & SDB_INTR) {
843 printf("\nsbus: intr[%d]%x: %x\n", ipl, level,
844 intrlev[level]);
845 printf("Hunting for IRQ...\n");
846 }
847 #endif
848 if ((level & INTMAP_OBIO) == 0) {
849 /* We're in an SBUS slot */
850 /* Register the map and clear intr registers */
851 #ifdef DEBUG
852 if (sbusdebug & SDB_INTR) {
853 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(level)];
854 int64_t intrmap = *intrptr;
855
856 printf("Found SBUS %x IRQ as %x:%x in slot %d\n",
857 level, (int)(intrmap>>32), (int)intrmap,
858 INTSLOT(level));
859 }
860 #endif
861 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(level)];
862 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(level)];
863 /* Enable the interrupt */
864 level |= INTMAP_V;
865 stxa(ih->ih_map, ASI_NUCLEUS, level);
866 } else {
867 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
868 int64_t intrmap = 0;
869 int i;
870
871 /* Insert IGN */
872 level |= sc->sc_ign;
873 for (i=0;
874 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
875 INTVEC(intrmap=intrptr[i]) != INTVEC(level);
876 i++);
877 if (INTVEC(intrmap) == INTVEC(level)) {
878 #ifdef DEBUG
879 if (sbusdebug & SDB_INTR)
880 printf("Found OBIO %x IRQ as %x:%x in slot %d\n",
881 level, (int)(intrmap>>32), (int)intrmap,
882 i);
883 #endif
884 /* Register the map and clear intr registers */
885 ih->ih_map = &intrptr[i];
886 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
887 ih->ih_clr = &intrptr[i];
888 /* Enable the interrupt */
889 intrmap |= INTMAP_V;
890 stxa(ih->ih_map, ASI_NUCLEUS, intrmap);
891 } else panic("IRQ not found!");
892 }
893 }
894 #ifdef DEBUG
895 if (sbusdebug & SDB_INTR) { int i; for (i=0; i<140000000; i++); }
896 #endif
897
898 ih->ih_fun = handler;
899 ih->ih_arg = arg;
900 ih->ih_number = level;
901 ih->ih_pil = (1<<ipl);
902 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
903 intr_fasttrap(ipl, (void (*)__P((void)))handler);
904 else
905 intr_establish(ipl, ih);
906 return (ih);
907 }
908
909 static bus_space_tag_t
910 sbus_alloc_bustag(sc)
911 struct sbus_softc *sc;
912 {
913 bus_space_tag_t sbt;
914
915 sbt = (bus_space_tag_t)
916 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
917 if (sbt == NULL)
918 return (NULL);
919
920 bzero(sbt, sizeof *sbt);
921 sbt->cookie = sc;
922 sbt->parent = sc->sc_bustag;
923 sbt->type = ASI_PRIMARY;
924 sbt->sparc_bus_map = _sbus_bus_map;
925 sbt->sparc_bus_mmap = sbus_bus_mmap;
926 sbt->sparc_intr_establish = sbus_intr_establish;
927 return (sbt);
928 }
929
930
931 static bus_dma_tag_t
932 sbus_alloc_dmatag(sc)
933 struct sbus_softc *sc;
934 {
935 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
936
937 sdt = (bus_dma_tag_t)
938 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
939 if (sdt == NULL)
940 /* Panic? */
941 return (psdt);
942
943 sdt->_cookie = sc;
944 sdt->_parent = psdt;
945 #define PCOPY(x) sdt->x = psdt->x
946 PCOPY(_dmamap_create);
947 PCOPY(_dmamap_destroy);
948 sdt->_dmamap_load = sbus_dmamap_load;
949 PCOPY(_dmamap_load_mbuf);
950 PCOPY(_dmamap_load_uio);
951 PCOPY(_dmamap_load_raw);
952 sdt->_dmamap_unload = sbus_dmamap_unload;
953 sdt->_dmamap_sync = sbus_dmamap_sync;
954 sdt->_dmamem_alloc = sbus_dmamem_alloc;
955 sdt->_dmamem_free = sbus_dmamem_free;
956 sdt->_dmamem_map = sbus_dmamem_map;
957 sdt->_dmamem_unmap = sbus_dmamem_unmap;
958 PCOPY(_dmamem_mmap);
959 #undef PCOPY
960 sc->sc_dmatag = sdt;
961 return (sdt);
962 }
963
964 int
965 sbus_dmamap_load(t, map, buf, buflen, p, flags)
966 bus_dma_tag_t t;
967 bus_dmamap_t map;
968 void *buf;
969 bus_size_t buflen;
970 struct proc *p;
971 int flags;
972 {
973 int err;
974 bus_size_t sgsize;
975 paddr_t curaddr;
976 vaddr_t dvmaddr, vaddr = (vaddr_t)buf;
977 pmap_t pmap;
978 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
979
980 if (map->dm_nsegs) {
981 /* Already in use?? */
982 #ifdef DIAGNOSTIC
983 printf("sbus_dmamap_load: map still in use\n");
984 #endif
985 bus_dmamap_unload(t, map);
986 }
987 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
988 return (err);
989
990 if (p != NULL)
991 pmap = p->p_vmspace->vm_map.pmap;
992 else
993 pmap = pmap_kernel();
994
995 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
996 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
997 for (; buflen > 0; ) {
998 /*
999 * Get the physical address for this page.
1000 */
1001 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1002 bus_dmamap_unload(t, map);
1003 return (-1);
1004 }
1005
1006 /*
1007 * Compute the segment size, and adjust counts.
1008 */
1009 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1010 if (buflen < sgsize)
1011 sgsize = buflen;
1012
1013 #ifdef DEBUG
1014 if (sbusdebug & SDB_DVMA)
1015 printf("sbus_dmamap_load: map %p loading va %x at pa %x\n",
1016 map, (int)dvmaddr, (int)(curaddr & ~(NBPG-1)));
1017 #endif
1018 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1019
1020 dvmaddr += PAGE_SIZE;
1021 vaddr += sgsize;
1022 buflen -= sgsize;
1023 }
1024 return (0);
1025 }
1026
1027 void
1028 sbus_dmamap_unload(t, map)
1029 bus_dma_tag_t t;
1030 bus_dmamap_t map;
1031 {
1032 vaddr_t addr;
1033 int len;
1034 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1035
1036 if (map->dm_nsegs != 1)
1037 panic("_sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1038
1039 addr = trunc_page(map->dm_segs[0].ds_addr);
1040 len = map->dm_segs[0].ds_len;
1041
1042 #ifdef DEBUG
1043 if (sbusdebug & SDB_DVMA)
1044 printf("sbus_dmamap_unload: map %p removing va %x size %x\n",
1045 map, (int)addr, (int)len);
1046 #endif
1047 sbus_remove(sc, addr, len);
1048 bus_dmamap_unload(t->_parent, map);
1049 }
1050
1051
1052 void
1053 sbus_dmamap_sync(t, map, offset, len, ops)
1054 bus_dma_tag_t t;
1055 bus_dmamap_t map;
1056 bus_addr_t offset;
1057 bus_size_t len;
1058 int ops;
1059 {
1060 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1061 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1062
1063
1064 /*
1065 * We only support one DMA segment; supporting more makes this code
1066 * too unweildy.
1067 */
1068
1069 if (ops&BUS_DMASYNC_PREREAD)
1070 /* Nothing to do */;
1071 if (ops&BUS_DMASYNC_POSTREAD) {
1072 /*
1073 * We should sync the IOMMU streaming caches here first.
1074 */
1075 while (len > 0) {
1076
1077 /*
1078 * Streaming buffer flushes:
1079 *
1080 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1081 * If we're not on a cache line boundary (64-bits):
1082 * 2 Store 0 in flag
1083 * 3 Store pointer to flag in flushsync
1084 * 4 wait till flushsync becomes 0x1
1085 *
1086 * If it takes more than .5 sec, something went wrong.
1087 */
1088 #if 0
1089 sc->sc_sysio->sys_strbuf.strbuf_pgflush = va;
1090 #else
1091 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
1092 #endif
1093 if (len <= NBPG) {
1094 sbus_flush(sc);
1095 }
1096 len -= NBPG;
1097 va += NBPG;
1098 }
1099 }
1100 if (ops&BUS_DMASYNC_PREWRITE)
1101 /* Nothing to do */;
1102 if (ops&BUS_DMASYNC_POSTWRITE)
1103 /* Nothing to do */;
1104 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1105 }
1106
1107 int
1108 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1109 bus_dma_tag_t t;
1110 bus_size_t size, alignment, boundary;
1111 bus_dma_segment_t *segs;
1112 int nsegs;
1113 int *rsegs;
1114 int flags;
1115 {
1116 paddr_t curaddr;
1117 bus_addr_t dvmaddr;
1118 vm_page_t m;
1119 struct pglist *mlist;
1120 int error;
1121 int n;
1122 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1123
1124 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1125 boundary, segs, nsegs, rsegs, flags)))
1126 return (error);
1127
1128 for (n=0; n<*rsegs; n++) {
1129 dvmaddr = segs[n].ds_addr;
1130 size = segs[n].ds_len;
1131 mlist = segs[n]._ds_mlist;
1132
1133 /* Map memory into DVMA space */
1134 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1135 curaddr = VM_PAGE_TO_PHYS(m);
1136 sbus_enter(sc, dvmaddr, curaddr, flags);
1137 dvmaddr += PAGE_SIZE;
1138 }
1139 }
1140 return (0);
1141 }
1142
1143 void
1144 sbus_dmamem_free(t, segs, nsegs)
1145 bus_dma_tag_t t;
1146 bus_dma_segment_t *segs;
1147 int nsegs;
1148 {
1149 vaddr_t addr;
1150 int len;
1151 int n;
1152 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1153
1154
1155 for (n=0; n<nsegs; n++) {
1156 addr = segs[n].ds_addr;
1157 len = segs[n].ds_len;
1158 sbus_remove(sc, addr, len);
1159 }
1160 bus_dmamem_free(t->_parent, segs, nsegs);
1161 }
1162
1163 /*
1164 * Call bus_dmamem_map() to map it into the kernel, then map it into the IOTSB.
1165 * Check the flags to see whether we're streaming or coherent.
1166 */
1167 int
1168 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1169 bus_dma_tag_t t;
1170 bus_dma_segment_t *segs;
1171 int nsegs;
1172 size_t size;
1173 caddr_t *kvap;
1174 int flags;
1175 {
1176 vm_page_t m;
1177 vaddr_t va;
1178 bus_addr_t addr;
1179 struct pglist *mlist;
1180 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1181 int cbit;
1182 int rval;
1183
1184 /*
1185 * First have the parent driver allocate some address space in DVMA space.
1186 */
1187 if ((rval = bus_dmamem_map(t->_parent, segs, nsegs, size, kvap, flags)))
1188 return (rval);
1189
1190 /*
1191 * digest flags:
1192 */
1193 cbit = 0;
1194 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1195 cbit |= PMAP_NVC;
1196 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1197 cbit |= PMAP_NC;
1198 /*
1199 * Now take this and map it both into the CPU and into the IOMMU.
1200 */
1201 va = (vaddr_t)*kvap;
1202 mlist = segs[0]._ds_mlist;
1203 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1204
1205 if (size == 0)
1206 panic("_bus_dmamem_map: size botch");
1207
1208 addr = VM_PAGE_TO_PHYS(m);
1209 pmap_enter(pmap_kernel(), va, addr | cbit,
1210 VM_PROT_READ | VM_PROT_WRITE, TRUE);
1211 sbus_enter(sc, va, addr, flags);
1212 va += PAGE_SIZE;
1213 size -= PAGE_SIZE;
1214 }
1215
1216 return (0);
1217 }
1218
1219 /*
1220 * Common function for unmapping DMA-safe memory. May be called by
1221 * bus-specific DMA memory unmapping functions.
1222 */
1223 void
1224 sbus_dmamem_unmap(t, kva, size)
1225 bus_dma_tag_t t;
1226 caddr_t kva;
1227 size_t size;
1228 {
1229 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1230
1231 #ifdef DIAGNOSTIC
1232 if ((u_long)kva & PGOFSET)
1233 panic("_bus_dmamem_unmap");
1234 #endif
1235
1236 size = round_page(size);
1237 sbus_remove(sc, (vaddr_t)kva, size);
1238 bus_dmamem_unmap(t->_parent, kva, size);
1239 }
1240