sbus.c revision 1.12 1 /* $NetBSD: sbus.c,v 1.12 1999/05/22 20:33:56 eeh Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/malloc.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <vm/vm.h>
94
95 #include <machine/bus.h>
96 #include <sparc64/sparc64/vaddrs.h>
97 #include <sparc64/dev/sbusreg.h>
98 #include <dev/sbus/sbusvar.h>
99
100 #include <machine/autoconf.h>
101 #include <machine/ctlreg.h>
102 #include <machine/cpu.h>
103 #include <machine/sparc64.h>
104
105 #ifdef DEBUG
106 #define SDB_DVMA 0x1
107 #define SDB_INTR 0x2
108 int sbusdebug = 0;
109 #endif
110
111 void sbusreset __P((int));
112 int sbus_flush __P((struct sbus_softc *));
113
114 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
115 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
116 static int sbus_get_intr __P((struct sbus_softc *, int,
117 struct sbus_intr **, int *));
118 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
119 int, bus_space_handle_t *));
120 static int _sbus_bus_map __P((
121 bus_space_tag_t,
122 bus_type_t,
123 bus_addr_t, /*offset*/
124 bus_size_t, /*size*/
125 int, /*flags*/
126 vaddr_t, /*preferred virtual address */
127 bus_space_handle_t *));
128 static void *sbus_intr_establish __P((
129 bus_space_tag_t,
130 int, /*level*/
131 int, /*flags*/
132 int (*) __P((void *)), /*handler*/
133 void *)); /*handler arg*/
134
135
136 /* autoconfiguration driver */
137 int sbus_match __P((struct device *, struct cfdata *, void *));
138 void sbus_attach __P((struct device *, struct device *, void *));
139
140
141 struct cfattach sbus_ca = {
142 sizeof(struct sbus_softc), sbus_match, sbus_attach
143 };
144
145 extern struct cfdriver sbus_cd;
146
147 /*
148 * DVMA routines
149 */
150 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
151 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
152 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
153 bus_size_t, struct proc *, int));
154 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
155 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
156 bus_size_t, int));
157 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
158 bus_size_t alignment, bus_size_t boundary,
159 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
160 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
161 int nsegs));
162 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
163 int nsegs, size_t size, caddr_t *kvap, int flags));
164 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
165 size_t size));
166
167
168 /*
169 * Child devices receive the Sbus interrupt level in their attach
170 * arguments. We translate these to CPU IPLs using the following
171 * tables. Note: obio bus interrupt levels are identical to the
172 * processor IPL.
173 *
174 * The second set of tables is used when the Sbus interrupt level
175 * cannot be had from the PROM as an `interrupt' property. We then
176 * fall back on the `intr' property which contains the CPU IPL.
177 */
178
179 /* Translate Sbus interrupt level to processor IPL */
180 static int intr_sbus2ipl_4c[] = {
181 0, 1, 2, 3, 5, 7, 8, 9
182 };
183 static int intr_sbus2ipl_4m[] = {
184 0, 2, 3, 5, 7, 9, 11, 13
185 };
186
187 /*
188 * This value is or'ed into the attach args' interrupt level cookie
189 * if the interrupt level comes from an `intr' property, i.e. it is
190 * not an Sbus interrupt level.
191 */
192 #define SBUS_INTR_COMPAT 0x80000000
193
194
195 /*
196 * Print the location of some sbus-attached device (called just
197 * before attaching that device). If `sbus' is not NULL, the
198 * device was found but not configured; print the sbus as well.
199 * Return UNCONF (config_find ignores this if the device was configured).
200 */
201 int
202 sbus_print(args, busname)
203 void *args;
204 const char *busname;
205 {
206 struct sbus_attach_args *sa = args;
207 int i;
208
209 if (busname)
210 printf("%s at %s", sa->sa_name, busname);
211 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
212 (u_long)sa->sa_offset);
213 for (i=0; i<sa->sa_nintr; i++) {
214 struct sbus_intr *sbi = &sa->sa_intr[i];
215
216 printf(" vector %lx ipl %ld",
217 (u_long)sbi->sbi_vec,
218 (long)INTLEV(sbi->sbi_pri));
219 }
220 return (UNCONF);
221 }
222
223 int
224 sbus_match(parent, cf, aux)
225 struct device *parent;
226 struct cfdata *cf;
227 void *aux;
228 {
229 struct mainbus_attach_args *ma = aux;
230
231 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
232 }
233
234 /*
235 * Attach an Sbus.
236 */
237 void
238 sbus_attach(parent, self, aux)
239 struct device *parent;
240 struct device *self;
241 void *aux;
242 {
243 struct sbus_softc *sc = (struct sbus_softc *)self;
244 struct mainbus_attach_args *ma = aux;
245 int node = ma->ma_node;
246
247 int node0, error;
248 bus_space_tag_t sbt;
249 struct sbus_attach_args sa;
250 char *busname = "sbus";
251 struct bootpath *bp = ma->ma_bp;
252
253
254 sc->sc_bustag = ma->ma_bustag;
255 sc->sc_dmatag = ma->ma_dmatag;
256 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
257 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
258
259 /* Setup interrupt translation tables */
260 sc->sc_intr2ipl = CPU_ISSUN4C
261 ? intr_sbus2ipl_4c
262 : intr_sbus2ipl_4m;
263
264 /*
265 * Record clock frequency for synchronous SCSI.
266 * IS THIS THE CORRECT DEFAULT??
267 */
268 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
269 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
270
271 sbt = sbus_alloc_bustag(sc);
272 sc->sc_dmatag = sbus_alloc_dmatag(sc);
273
274 /*
275 * Get the SBus burst transfer size if burst transfers are supported
276 */
277 sc->sc_burst = getpropint(node, "burst-sizes", 0);
278
279 /* Propagate bootpath */
280 if (bp != NULL && strcmp(bp->name, busname) == 0)
281 bp++;
282 else
283 bp = NULL;
284
285 /*
286 * Collect address translations from the OBP.
287 */
288 error = getprop(node, "ranges", sizeof(struct sbus_range),
289 &sc->sc_nrange, (void **)&sc->sc_range);
290 switch (error) {
291 case 0:
292 break;
293 #if 0
294 case ENOENT:
295 /* Fall back to our own `range' construction */
296 sc->sc_range = sbus_translations;
297 sc->sc_nrange =
298 sizeof(sbus_translations)/sizeof(sbus_translations[0]);
299 break;
300 #endif
301 default:
302 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
303 }
304
305
306 /*
307 * Setup the iommu.
308 *
309 * The sun4u iommu is part of the SBUS controller so we will
310 * deal with it here. We could try to fake a device node so
311 * we can eventually share it with the PCI bus run by psycho,
312 * but I don't want to get into that sort of cruft.
313 *
314 * First we need to allocate a IOTSB. Problem is that the IOMMU
315 * can only access the IOTSB by physical address, so all the
316 * pages must be contiguous. Luckily, the smallest IOTSB size
317 * is one 8K page.
318 */
319 #if 1
320 sc->sc_tsbsize = 0;
321 sc->sc_tsb = malloc(NBPG, M_DMAMAP, M_WAITOK);
322 sc->sc_ptsb = pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_tsb);
323 #else
324
325 /*
326 * All IOMMUs will share the same TSB which is allocated in pmap_bootstrap.
327 *
328 * This makes device management easier.
329 */
330 {
331 extern int64_t *iotsb;
332 extern paddr_t iotsbp;
333 extern int iotsbsize;
334
335 sc->sc_tsbsize = iotsbsize;
336 sc->sc_tsb = iotsb;
337 sc->sc_ptsb = iotsbp;
338 }
339 #endif
340 #if 1
341 /* Need to do 64-bit stores */
342 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
343 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
344 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
345 0, sc->sc_ptsb);
346 #else
347 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
348 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
349 #endif
350 #ifdef DEBUG
351 if (sbusdebug & SDB_DVMA)
352 {
353 /* Probe the iommu */
354 int64_t cr, tsb;
355
356 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", &sc->sc_sysio->sys_iommu.iommu_cr,
357 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
358 cr = sc->sc_sysio->sys_iommu.iommu_cr;
359 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
360 printf("iommu cr=%lx tsb=%lx\n", (long)cr, (long)tsb);
361 printf("sysio base %p phys %p TSB base %p phys %p",
362 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
363 (long)sc->sc_tsb, (long)sc->sc_ptsb);
364 delay(1000000); /* 1 s */
365 }
366 #endif
367
368 /*
369 * Initialize streaming buffer.
370 */
371 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
372 #if 1
373 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
374 0, STRBUF_EN); /* Enable diagnostics mode? */
375 #else
376 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
377 #endif
378
379 /*
380 * Now all the hardware's working we need to allocate a dvma map.
381 *
382 * The IOMMU address space always ends at 0xffffe000, but the starting
383 * address depends on the size of the map. The map size is 1024 * 2 ^
384 * sc->sc_tsbsize entries, where each entry is 8 bytes. The start of
385 * the map can be calculated by (0xffffe000 << (8 + sc->sc_tsbsize)).
386 *
387 * Note: the stupid IOMMU ignores the high bits of an address, so a
388 * NULL DMA pointer will be translated by the first page of the IOTSB.
389 * To trap bugs we'll skip the first entry in the IOTSB.
390 */
391 sc->sc_dvmamap = extent_create("SBus dvma", /* XXXX should have instance number */
392 IOTSB_VSTART(sc->sc_tsbsize) + NBPG, IOTSB_VEND,
393 M_DEVBUF, 0, 0, EX_NOWAIT);
394
395 /*
396 * Loop through ROM children, fixing any relative addresses
397 * and then configuring each device.
398 * `specials' is an array of device names that are treated
399 * specially:
400 */
401 node0 = firstchild(node);
402 for (node = node0; node; node = nextsibling(node)) {
403 char *name = getpropstring(node, "name");
404
405 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
406 node, bp, &sa) != 0) {
407 printf("sbus_attach: %s: incomplete\n", name);
408 continue;
409 }
410 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
411 sbus_destroy_attach_args(&sa);
412 }
413 }
414
415 int
416 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
417 struct sbus_softc *sc;
418 bus_space_tag_t bustag;
419 bus_dma_tag_t dmatag;
420 int node;
421 struct bootpath *bp;
422 struct sbus_attach_args *sa;
423 {
424 /*struct sbus_reg sbusreg;*/
425 /*int base;*/
426 int error;
427 int n;
428
429 bzero(sa, sizeof(struct sbus_attach_args));
430 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
431 if (error != 0)
432 return (error);
433 sa->sa_name[n] = '\0';
434
435 sa->sa_bustag = bustag;
436 sa->sa_dmatag = dmatag;
437 sa->sa_node = node;
438 sa->sa_bp = bp;
439
440 error = getprop(node, "reg", sizeof(struct sbus_reg),
441 &sa->sa_nreg, (void **)&sa->sa_reg);
442 if (error != 0) {
443 char buf[32];
444 if (error != ENOENT ||
445 !node_has_property(node, "device_type") ||
446 strcmp(getpropstringA(node, "device_type", buf),
447 "hierarchical") != 0)
448 return (error);
449 }
450 for (n = 0; n < sa->sa_nreg; n++) {
451 /* Convert to relative addressing, if necessary */
452 u_int32_t base = sa->sa_reg[n].sbr_offset;
453 if (SBUS_ABS(base)) {
454 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
455 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
456 }
457 }
458
459 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
460 return (error);
461
462 error = getprop(node, "address", sizeof(u_int32_t),
463 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
464 if (error != 0 && error != ENOENT)
465 return (error);
466
467 return (0);
468 }
469
470 void
471 sbus_destroy_attach_args(sa)
472 struct sbus_attach_args *sa;
473 {
474 if (sa->sa_name != NULL)
475 free(sa->sa_name, M_DEVBUF);
476
477 if (sa->sa_nreg != 0)
478 free(sa->sa_reg, M_DEVBUF);
479
480 if (sa->sa_intr)
481 free(sa->sa_intr, M_DEVBUF);
482
483 if (sa->sa_promvaddrs)
484 free((void *)sa->sa_promvaddrs, M_DEVBUF);
485
486 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
487 }
488
489
490 int
491 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
492 bus_space_tag_t t;
493 bus_type_t btype;
494 bus_addr_t offset;
495 bus_size_t size;
496 int flags;
497 vaddr_t vaddr;
498 bus_space_handle_t *hp;
499 {
500 struct sbus_softc *sc = t->cookie;
501 int64_t slot = btype;
502 int i;
503
504 for (i = 0; i < sc->sc_nrange; i++) {
505 bus_addr_t paddr;
506
507 if (sc->sc_range[i].cspace != slot)
508 continue;
509
510 /* We've found the connection to the parent bus */
511 paddr = sc->sc_range[i].poffset + offset;
512 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
513 #ifdef DEBUG
514 if (sbusdebug & SDB_DVMA)
515 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
516 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
517 #endif
518 return (bus_space_map2(sc->sc_bustag, 0, paddr,
519 size, flags, vaddr, hp));
520 }
521
522 return (EINVAL);
523 }
524
525 int
526 sbus_bus_mmap(t, btype, paddr, flags, hp)
527 bus_space_tag_t t;
528 bus_type_t btype;
529 bus_addr_t paddr;
530 int flags;
531 bus_space_handle_t *hp;
532 {
533 bus_addr_t offset = paddr;
534 int slot = (paddr>>32);
535 struct sbus_softc *sc = t->cookie;
536 int i;
537
538 for (i = 0; i < sc->sc_nrange; i++) {
539 bus_addr_t paddr;
540
541 if (sc->sc_range[i].cspace != slot)
542 continue;
543
544 paddr = sc->sc_range[i].poffset + offset;
545 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
546 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
547 flags, hp));
548 }
549
550 return (-1);
551 }
552
553
554 /*
555 * Each attached device calls sbus_establish after it initializes
556 * its sbusdev portion.
557 */
558 void
559 sbus_establish(sd, dev)
560 register struct sbusdev *sd;
561 register struct device *dev;
562 {
563 register struct sbus_softc *sc;
564 register struct device *curdev;
565
566 /*
567 * We have to look for the sbus by name, since it is not necessarily
568 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
569 * We don't just use the device structure of the above-attached
570 * sbus, since we might (in the future) support multiple sbus's.
571 */
572 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
573 if (!curdev || !curdev->dv_xname)
574 panic("sbus_establish: can't find sbus parent for %s",
575 sd->sd_dev->dv_xname
576 ? sd->sd_dev->dv_xname
577 : "<unknown>" );
578
579 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
580 break;
581 }
582 sc = (struct sbus_softc *) curdev;
583
584 sd->sd_dev = dev;
585 sd->sd_bchain = sc->sc_sbdev;
586 sc->sc_sbdev = sd;
587 }
588
589 /*
590 * Reset the given sbus. (???)
591 */
592 void
593 sbusreset(sbus)
594 int sbus;
595 {
596 register struct sbusdev *sd;
597 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
598 struct device *dev;
599
600 printf("reset %s:", sc->sc_dev.dv_xname);
601 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
602 if (sd->sd_reset) {
603 dev = sd->sd_dev;
604 (*sd->sd_reset)(dev);
605 printf(" %s", dev->dv_xname);
606 }
607 }
608 #if 1
609 /* Reload iommu regs */
610 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
611 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
612 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
613 0, sc->sc_ptsb);
614 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
615 0, STRBUF_EN); /* Enable diagnostics mode? */
616 #else
617 /* Reload iommu regs */
618 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
619 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
620 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
621 #endif
622 }
623
624 /*
625 * Here are the iommu control routines.
626 */
627 void
628 sbus_enter(sc, va, pa, flags)
629 struct sbus_softc *sc;
630 vaddr_t va;
631 int64_t pa;
632 int flags;
633 {
634 int64_t tte;
635
636 #ifdef DIAGNOSTIC
637 if (va < sc->sc_dvmabase)
638 panic("sbus_enter: va 0x%lx not in DVMA space",va);
639 #endif
640
641 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
642 !(flags&BUS_DMA_COHERENT));
643
644 /* Is the streamcache flush really needed? */
645 #if 1
646 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush,
647 0, va);
648 #else
649 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
650 #endif
651 sbus_flush(sc);
652 #ifdef DEBUG
653 if (sbusdebug & SDB_DVMA)
654 printf("Clearing TSB slot %d for va %p\n", (int)IOTSBSLOT(va,sc->sc_tsbsize), va);
655 #endif
656 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
657 #if 1
658 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
659 0, va);
660 #else
661 stxa(&sc->sc_sysio->sys_iommu.iommu_flush,ASI_NUCLEUS,va);
662 #endif
663 #ifdef DEBUG
664 if (sbusdebug & SDB_DVMA)
665 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
666 va, (long)pa, IOTSBSLOT(va,sc->sc_tsbsize),
667 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
668 (long)tte);
669 #endif
670 }
671
672 /*
673 * sbus_clear: clears mappings created by sbus_enter
674 *
675 * Only demap from IOMMU if flag is set.
676 */
677 void
678 sbus_remove(sc, va, len)
679 struct sbus_softc *sc;
680 vaddr_t va;
681 size_t len;
682 {
683
684 #ifdef DIAGNOSTIC
685 if (va < sc->sc_dvmabase)
686 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
687 if ((long)(va + len) < (long)va)
688 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
689 (long) va, (long) len);
690 if (len & ~0xfffffff)
691 panic("sbus_remove: rediculous len 0x%lx", (long)len);
692 #endif
693
694 va = trunc_page(va);
695 while (len > 0) {
696
697 /*
698 * Streaming buffer flushes:
699 *
700 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
701 * If we're not on a cache line boundary (64-bits):
702 * 2 Store 0 in flag
703 * 3 Store pointer to flag in flushsync
704 * 4 wait till flushsync becomes 0x1
705 *
706 * If it takes more than .5 sec, something went wrong.
707 */
708 #ifdef DEBUG
709 if (sbusdebug & SDB_DVMA)
710 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
711 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
712 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
713 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
714 (u_long)len);
715 #endif
716 #if 1
717 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
718 #else
719 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
720 #endif
721 if (len <= NBPG) {
722 sbus_flush(sc);
723 len = 0;
724 } else len -= NBPG;
725 #ifdef DEBUG
726 if (sbusdebug & SDB_DVMA)
727 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
728 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
729 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
730 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
731 (u_long)len);
732 #endif
733 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
734 #if 1
735 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
736 #else
737 stxa(&sc->sc_sysio->sys_iommu.iommu_flush, ASI_NUCLEUS, va);
738 #endif
739 va += NBPG;
740 }
741 }
742
743 int
744 sbus_flush(sc)
745 struct sbus_softc *sc;
746 {
747 extern u_int64_t cpu_clockrate;
748 u_int64_t flushtimeout;
749
750 sc->sc_flush = 0;
751 membar_sync();
752 #if 1
753 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_flushsync, 0, sc->sc_flushpa);
754 #else
755 stxa(&sc->sc_sysio->sys_strbuf.strbuf_flushsync, ASI_NUCLEUS, sc->sc_flushpa);
756 #endif
757 membar_sync();
758 flushtimeout = tick() + cpu_clockrate/2; /* .5 sec after *now* */
759 #ifdef DEBUG
760 if (sbusdebug & SDB_DVMA)
761 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx until = %lx\n",
762 (long)sc->sc_flush, (long)&sc->sc_flush,
763 (long)sc->sc_flushpa, (long)tick(), flushtimeout);
764 #endif
765 /* Bypass non-coherent D$ */
766 #if 0
767 while( !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && flushtimeout > tick()) membar_sync();
768 #else
769 { int i; for(i=140000000/2; !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && i; i--) membar_sync(); }
770 #endif
771 #ifdef DIAGNOSTIC
772 if( !sc->sc_flush ) {
773 printf("sbus_flush: flush timeout %p at %p\n", (long)sc->sc_flush,
774 (long)sc->sc_flushpa); /* panic? */
775 #ifdef DDB
776 Debugger();
777 #endif
778 }
779 #endif
780 #ifdef DEBUG
781 if (sbusdebug & SDB_DVMA)
782 printf("sbus_flush: flushed\n");
783 #endif
784 return (sc->sc_flush);
785 }
786 /*
787 * Get interrupt attributes for an Sbus device.
788 */
789 int
790 sbus_get_intr(sc, node, ipp, np)
791 struct sbus_softc *sc;
792 int node;
793 struct sbus_intr **ipp;
794 int *np;
795 {
796 int *ipl;
797 int i, n, error;
798 char buf[32];
799
800 /*
801 * The `interrupts' property contains the Sbus interrupt level.
802 */
803 ipl = NULL;
804 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
805 /* Change format to an `struct sbus_intr' array */
806 struct sbus_intr *ip;
807 /* Default to interrupt level 2 -- otherwise unused */
808 int pri = INTLEVENCODE(2);
809 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
810 if (ip == NULL)
811 return (ENOMEM);
812 /* Now things get ugly. We need to take this value which is
813 * the interrupt vector number and encode the IPL into it
814 * somehow. Luckily, the interrupt vector has lots of free
815 * space and we can easily stuff the IPL in there for a while.
816 */
817 getpropstringA(node, "device_type", buf);
818 if (!buf[0]) {
819 getpropstringA(node, "name", buf);
820 }
821 for (i=0; intrmap[i].in_class; i++) {
822 if (strcmp(intrmap[i].in_class, buf) == 0) {
823 pri = INTLEVENCODE(intrmap[i].in_lev);
824 break;
825 }
826 }
827 for (n = 0; n < *np; n++) {
828 /*
829 * We encode vector and priority into sbi_pri so we
830 * can pass them as a unit. This will go away if
831 * sbus_establish ever takes an sbus_intr instead
832 * of an integer level.
833 * Stuff the real vector in sbi_vec.
834 */
835 ip[n].sbi_pri = pri|ipl[n];
836 ip[n].sbi_vec = ipl[n];
837 }
838 free(ipl, M_DEVBUF);
839 *ipp = ip;
840 return (0);
841 }
842
843 /* We really don't support the following */
844 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
845 /* And some devices don't even have interrupts */
846 /*
847 * Fall back on `intr' property.
848 */
849 *ipp = NULL;
850 error = getprop(node, "intr", sizeof(struct sbus_intr),
851 np, (void **)ipp);
852 switch (error) {
853 case 0:
854 for (n = *np; n-- > 0;) {
855 /*
856 * Move the interrupt vector into place.
857 * We could remap the level, but the SBUS priorities
858 * are probably good enough.
859 */
860 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
861 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
862 }
863 break;
864 case ENOENT:
865 error = 0;
866 break;
867 }
868
869 return (error);
870 }
871
872
873 /*
874 * Install an interrupt handler for an Sbus device.
875 */
876 void *
877 sbus_intr_establish(t, level, flags, handler, arg)
878 bus_space_tag_t t;
879 int level;
880 int flags;
881 int (*handler) __P((void *));
882 void *arg;
883 {
884 struct sbus_softc *sc = t->cookie;
885 struct intrhand *ih;
886 int ipl;
887 long vec = level;
888
889 ih = (struct intrhand *)
890 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
891 if (ih == NULL)
892 return (NULL);
893
894 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
895 ipl = vec;
896 else if ((vec & SBUS_INTR_COMPAT) != 0)
897 ipl = vec & ~SBUS_INTR_COMPAT;
898 else {
899 /* Decode and remove IPL */
900 ipl = INTLEV(vec);
901 vec = INTVEC(vec);
902 #ifdef DEBUG
903 if (sbusdebug & SDB_INTR) {
904 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
905 intrlev[vec]);
906 printf("Hunting for IRQ...\n");
907 }
908 #endif
909 if ((vec & INTMAP_OBIO) == 0) {
910 /* We're in an SBUS slot */
911 /* Register the map and clear intr registers */
912 #ifdef DEBUG
913 if (sbusdebug & SDB_INTR) {
914 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
915 int64_t intrmap = *intrptr;
916
917 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
918 (long)vec, (long)intrmap,
919 (long)INTSLOT(vec));
920 }
921 #endif
922 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
923 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
924 /* Enable the interrupt */
925 vec |= INTMAP_V;
926 /* Insert IGN */
927 vec |= sc->sc_ign;
928 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
929 } else {
930 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
931 int64_t intrmap = 0;
932 int i;
933
934 /* Insert IGN */
935 vec |= sc->sc_ign;
936 for (i=0;
937 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
938 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
939 i++);
940 if (INTVEC(intrmap) == INTVEC(vec)) {
941 #ifdef DEBUG
942 if (sbusdebug & SDB_INTR)
943 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
944 vec, (long)intrmap, i);
945 #endif
946 /* Register the map and clear intr registers */
947 ih->ih_map = &intrptr[i];
948 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
949 ih->ih_clr = &intrptr[i];
950 /* Enable the interrupt */
951 intrmap |= INTMAP_V;
952 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
953 } else panic("IRQ not found!");
954 }
955 }
956 #ifdef DEBUG
957 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
958 #endif
959
960 ih->ih_fun = handler;
961 ih->ih_arg = arg;
962 ih->ih_number = vec;
963 ih->ih_pil = (1<<ipl);
964 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
965 intr_fasttrap(ipl, (void (*)__P((void)))handler);
966 else
967 intr_establish(ipl, ih);
968 return (ih);
969 }
970
971 static bus_space_tag_t
972 sbus_alloc_bustag(sc)
973 struct sbus_softc *sc;
974 {
975 bus_space_tag_t sbt;
976
977 sbt = (bus_space_tag_t)
978 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
979 if (sbt == NULL)
980 return (NULL);
981
982 bzero(sbt, sizeof *sbt);
983 sbt->cookie = sc;
984 sbt->parent = sc->sc_bustag;
985 sbt->type = SBUS_BUS_SPACE;
986 sbt->sparc_bus_map = _sbus_bus_map;
987 sbt->sparc_bus_mmap = sbus_bus_mmap;
988 sbt->sparc_intr_establish = sbus_intr_establish;
989 return (sbt);
990 }
991
992
993 static bus_dma_tag_t
994 sbus_alloc_dmatag(sc)
995 struct sbus_softc *sc;
996 {
997 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
998
999 sdt = (bus_dma_tag_t)
1000 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
1001 if (sdt == NULL)
1002 /* Panic? */
1003 return (psdt);
1004
1005 sdt->_cookie = sc;
1006 sdt->_parent = psdt;
1007 #define PCOPY(x) sdt->x = psdt->x
1008 PCOPY(_dmamap_create);
1009 PCOPY(_dmamap_destroy);
1010 sdt->_dmamap_load = sbus_dmamap_load;
1011 PCOPY(_dmamap_load_mbuf);
1012 PCOPY(_dmamap_load_uio);
1013 PCOPY(_dmamap_load_raw);
1014 sdt->_dmamap_unload = sbus_dmamap_unload;
1015 sdt->_dmamap_sync = sbus_dmamap_sync;
1016 sdt->_dmamem_alloc = sbus_dmamem_alloc;
1017 sdt->_dmamem_free = sbus_dmamem_free;
1018 sdt->_dmamem_map = sbus_dmamem_map;
1019 sdt->_dmamem_unmap = sbus_dmamem_unmap;
1020 PCOPY(_dmamem_mmap);
1021 #undef PCOPY
1022 sc->sc_dmatag = sdt;
1023 return (sdt);
1024 }
1025
1026 int
1027 sbus_dmamap_load(t, map, buf, buflen, p, flags)
1028 bus_dma_tag_t t;
1029 bus_dmamap_t map;
1030 void *buf;
1031 bus_size_t buflen;
1032 struct proc *p;
1033 int flags;
1034 {
1035 int err, s;
1036 bus_size_t sgsize;
1037 paddr_t curaddr;
1038 u_long dvmaddr;
1039 vaddr_t vaddr = (vaddr_t)buf;
1040 pmap_t pmap;
1041 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1042
1043 if (map->dm_nsegs) {
1044 /* Already in use?? */
1045 #ifdef DIAGNOSTIC
1046 printf("sbus_dmamap_load: map still in use\n");
1047 #endif
1048 bus_dmamap_unload(t, map);
1049 }
1050 #if 1
1051 /*
1052 * Make sure that on error condition we return "no valid mappings".
1053 */
1054 map->dm_nsegs = 0;
1055
1056 if (buflen > map->_dm_size)
1057 #ifdef DEBUG
1058 {
1059 printf("_bus_dmamap_load(): error %d > %d -- map size exceeded!\n", buflen, map->_dm_size);
1060 Debugger();
1061 return (EINVAL);
1062 }
1063 #else
1064 return (EINVAL);
1065 #endif
1066
1067 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1068
1069 /*
1070 * XXX Need to implement "don't dma across this boundry".
1071 */
1072
1073 s = splhigh();
1074 err = extent_alloc(sc->sc_dvmamap, sgsize, NBPG,
1075 map->_dm_boundary, EX_NOWAIT, (u_long *)&dvmaddr);
1076 splx(s);
1077
1078 if (err != 0)
1079 return (err);
1080
1081 #ifdef DEBUG
1082 if (dvmaddr == (bus_addr_t)-1)
1083 {
1084 printf("_bus_dmamap_load(): dvmamap_alloc(%d, %x) failed!\n", sgsize, flags);
1085 Debugger();
1086 }
1087 #endif
1088 if (dvmaddr == (bus_addr_t)-1)
1089 return (ENOMEM);
1090
1091 /*
1092 * We always use just one segment.
1093 */
1094 map->dm_mapsize = buflen;
1095 map->dm_nsegs = 1;
1096 map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
1097 map->dm_segs[0].ds_len = sgsize;
1098
1099 #else
1100 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1101 return (err);
1102 #endif
1103 if (p != NULL)
1104 pmap = p->p_vmspace->vm_map.pmap;
1105 else
1106 pmap = pmap_kernel();
1107
1108 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1109 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1110 for (; buflen > 0; ) {
1111 /*
1112 * Get the physical address for this page.
1113 */
1114 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1115 bus_dmamap_unload(t, map);
1116 return (-1);
1117 }
1118
1119 /*
1120 * Compute the segment size, and adjust counts.
1121 */
1122 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1123 if (buflen < sgsize)
1124 sgsize = buflen;
1125
1126 #ifdef DEBUG
1127 if (sbusdebug & SDB_DVMA)
1128 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1129 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1130 #endif
1131 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1132
1133 dvmaddr += PAGE_SIZE;
1134 vaddr += sgsize;
1135 buflen -= sgsize;
1136 }
1137 return (0);
1138 }
1139
1140 void
1141 sbus_dmamap_unload(t, map)
1142 bus_dma_tag_t t;
1143 bus_dmamap_t map;
1144 {
1145 vaddr_t addr;
1146 int len, error, s;
1147 bus_addr_t dvmaddr;
1148 bus_size_t sgsize;
1149 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1150
1151 if (map->dm_nsegs != 1)
1152 panic("_sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1153
1154 addr = trunc_page(map->dm_segs[0].ds_addr);
1155 len = map->dm_segs[0].ds_len;
1156
1157 #ifdef DEBUG
1158 if (sbusdebug & SDB_DVMA)
1159 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1160 map, (long)addr, (long)len);
1161 #endif
1162 sbus_remove(sc, addr, len);
1163 #if 1
1164 dvmaddr = (map->dm_segs[0].ds_addr & ~PGOFSET);
1165 sgsize = map->dm_segs[0].ds_len;
1166
1167 /* Mark the mappings as invalid. */
1168 map->dm_mapsize = 0;
1169 map->dm_nsegs = 0;
1170
1171 /* Unmapping is bus dependent */
1172 s = splhigh();
1173 error = extent_free(sc->sc_dvmamap, dvmaddr, sgsize, EX_NOWAIT);
1174 splx(s);
1175 if (error != 0)
1176 printf("warning: %ld of DVMA space lost\n", (long)sgsize);
1177
1178 cache_flush((caddr_t)dvmaddr, (u_int) sgsize);
1179 #else
1180 bus_dmamap_unload(t->_parent, map);
1181 #endif
1182 }
1183
1184
1185 void
1186 sbus_dmamap_sync(t, map, offset, len, ops)
1187 bus_dma_tag_t t;
1188 bus_dmamap_t map;
1189 bus_addr_t offset;
1190 bus_size_t len;
1191 int ops;
1192 {
1193 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1194 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1195
1196 /*
1197 * We only support one DMA segment; supporting more makes this code
1198 * too unweildy.
1199 */
1200
1201 if (ops&BUS_DMASYNC_PREREAD) {
1202 #ifdef DEBUG
1203 if (sbusdebug & SDB_DVMA)
1204 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1205 (long)va, (u_long)len);
1206 #endif
1207
1208 /* Nothing to do */;
1209 }
1210 if (ops&BUS_DMASYNC_POSTREAD) {
1211 /*
1212 * We should sync the IOMMU streaming caches here first.
1213 */
1214 #ifdef DEBUG
1215 if (sbusdebug & SDB_DVMA)
1216 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1217 (long)va, (u_long)len);
1218 #endif
1219 while (len > 0) {
1220
1221 /*
1222 * Streaming buffer flushes:
1223 *
1224 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1225 * If we're not on a cache line boundary (64-bits):
1226 * 2 Store 0 in flag
1227 * 3 Store pointer to flag in flushsync
1228 * 4 wait till flushsync becomes 0x1
1229 *
1230 * If it takes more than .5 sec, something went wrong.
1231 */
1232 #ifdef DEBUG
1233 if (sbusdebug & SDB_DVMA)
1234 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1235 (long)va, (u_long)len);
1236 #endif
1237 #if 1
1238 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
1239 #else
1240 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
1241 #endif
1242 if (len <= NBPG) {
1243 sbus_flush(sc);
1244 len = 0;
1245 } else
1246 len -= NBPG;
1247 va += NBPG;
1248 }
1249 }
1250 if (ops&BUS_DMASYNC_PREWRITE) {
1251 #ifdef DEBUG
1252 if (sbusdebug & SDB_DVMA)
1253 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1254 (long)va, (u_long)len);
1255 #endif
1256 /* Nothing to do */;
1257 }
1258 if (ops&BUS_DMASYNC_POSTWRITE) {
1259 #ifdef DEBUG
1260 if (sbusdebug & SDB_DVMA)
1261 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1262 (long)va, (u_long)len);
1263 #endif
1264 /* Nothing to do */;
1265 }
1266 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1267 }
1268
1269
1270 /*
1271 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1272 */
1273 int
1274 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1275 bus_dma_tag_t t;
1276 bus_size_t size, alignment, boundary;
1277 bus_dma_segment_t *segs;
1278 int nsegs;
1279 int *rsegs;
1280 int flags;
1281 {
1282 paddr_t curaddr;
1283 u_long dvmaddr;
1284 vm_page_t m;
1285 struct pglist *mlist;
1286 int error;
1287 int n, s;
1288 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1289
1290 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1291 boundary, segs, nsegs, rsegs, flags)))
1292 return (error);
1293
1294 /*
1295 * Allocate a DVMA mapping for our new memory.
1296 */
1297 for (n=0; n<*rsegs; n++) {
1298 #if 1
1299 s = splhigh();
1300 if (extent_alloc(sc->sc_dvmamap, segs[0].ds_len, alignment,
1301 boundary, EX_NOWAIT, (u_long *)&dvmaddr)) {
1302 splx(s);
1303 /* Free what we got and exit */
1304 bus_dmamem_free(t->_parent, segs, nsegs);
1305 return (ENOMEM);
1306 }
1307 splx(s);
1308 #else
1309 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1310 if (dvmaddr == (bus_addr_t)-1) {
1311 /* Free what we got and exit */
1312 bus_dmamem_free(t->_parent, segs, nsegs);
1313 return (ENOMEM);
1314 }
1315 #endif
1316 segs[n].ds_addr = dvmaddr;
1317 size = segs[n].ds_len;
1318 mlist = segs[n]._ds_mlist;
1319
1320 /* Map memory into DVMA space */
1321 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1322 curaddr = VM_PAGE_TO_PHYS(m);
1323 #ifdef DEBUG
1324 if (sbusdebug & SDB_DVMA)
1325 printf("sbus_dmamem_alloc: map %p loading va %lx at pa %lx\n",
1326 (long)m, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1327 #endif
1328 sbus_enter(sc, dvmaddr, curaddr, flags);
1329 dvmaddr += PAGE_SIZE;
1330 }
1331 }
1332 return (0);
1333 }
1334
1335 void
1336 sbus_dmamem_free(t, segs, nsegs)
1337 bus_dma_tag_t t;
1338 bus_dma_segment_t *segs;
1339 int nsegs;
1340 {
1341 vaddr_t addr;
1342 int len;
1343 int n, s, error;
1344 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1345
1346
1347 for (n=0; n<nsegs; n++) {
1348 addr = segs[n].ds_addr;
1349 len = segs[n].ds_len;
1350 sbus_remove(sc, addr, len);
1351 #if 1
1352 s = splhigh();
1353 error = extent_free(sc->sc_dvmamap, addr, len, EX_NOWAIT);
1354 splx(s);
1355 if (error != 0)
1356 printf("warning: %ld of DVMA space lost\n", (long)len);
1357 #else
1358 dvmamap_free(addr, len);
1359 #endif
1360 }
1361 bus_dmamem_free(t->_parent, segs, nsegs);
1362 }
1363
1364 /*
1365 * Map the DVMA mappings into the kernel pmap.
1366 * Check the flags to see whether we're streaming or coherent.
1367 */
1368 int
1369 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1370 bus_dma_tag_t t;
1371 bus_dma_segment_t *segs;
1372 int nsegs;
1373 size_t size;
1374 caddr_t *kvap;
1375 int flags;
1376 {
1377 vm_page_t m;
1378 vaddr_t va;
1379 bus_addr_t addr;
1380 struct pglist *mlist;
1381 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1382 int cbit;
1383
1384 /*
1385 * digest flags:
1386 */
1387 cbit = 0;
1388 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1389 cbit |= PMAP_NVC;
1390 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1391 cbit |= PMAP_NC;
1392 /*
1393 * Now take this and map it into the CPU since it should already
1394 * be in the the IOMMU.
1395 */
1396 *kvap = (caddr_t)va = segs[0].ds_addr;
1397 mlist = segs[0]._ds_mlist;
1398 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1399
1400 if (size == 0)
1401 panic("_bus_dmamem_map: size botch");
1402
1403 addr = VM_PAGE_TO_PHYS(m);
1404 pmap_enter(pmap_kernel(), va, addr | cbit,
1405 VM_PROT_READ | VM_PROT_WRITE, TRUE, 0);
1406 va += PAGE_SIZE;
1407 size -= PAGE_SIZE;
1408 }
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * Unmap DVMA mappings from kernel
1415 */
1416 void
1417 sbus_dmamem_unmap(t, kva, size)
1418 bus_dma_tag_t t;
1419 caddr_t kva;
1420 size_t size;
1421 {
1422 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1423
1424 #ifdef DIAGNOSTIC
1425 if ((u_long)kva & PGOFSET)
1426 panic("_bus_dmamem_unmap");
1427 #endif
1428
1429 size = round_page(size);
1430 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1431 }
1432