sbus.c revision 1.14 1 /* $NetBSD: sbus.c,v 1.14 1999/05/25 23:14:08 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/malloc.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <vm/vm.h>
94
95 #include <machine/bus.h>
96 #include <sparc64/sparc64/vaddrs.h>
97 #include <sparc64/dev/iommureg.h>
98 #include <sparc64/dev/sbusreg.h>
99 #include <dev/sbus/sbusvar.h>
100
101 #include <machine/autoconf.h>
102 #include <machine/ctlreg.h>
103 #include <machine/cpu.h>
104 #include <machine/sparc64.h>
105
106 #ifdef DEBUG
107 #define SDB_DVMA 0x1
108 #define SDB_INTR 0x2
109 int sbusdebug = 0;
110 #endif
111
112 void sbusreset __P((int));
113 int sbus_flush __P((struct sbus_softc *));
114
115 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
116 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
117 static int sbus_get_intr __P((struct sbus_softc *, int,
118 struct sbus_intr **, int *));
119 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
120 int, bus_space_handle_t *));
121 static int _sbus_bus_map __P((
122 bus_space_tag_t,
123 bus_type_t,
124 bus_addr_t, /*offset*/
125 bus_size_t, /*size*/
126 int, /*flags*/
127 vaddr_t, /*preferred virtual address */
128 bus_space_handle_t *));
129 static void *sbus_intr_establish __P((
130 bus_space_tag_t,
131 int, /*level*/
132 int, /*flags*/
133 int (*) __P((void *)), /*handler*/
134 void *)); /*handler arg*/
135
136
137 /* autoconfiguration driver */
138 int sbus_match __P((struct device *, struct cfdata *, void *));
139 void sbus_attach __P((struct device *, struct device *, void *));
140
141
142 struct cfattach sbus_ca = {
143 sizeof(struct sbus_softc), sbus_match, sbus_attach
144 };
145
146 extern struct cfdriver sbus_cd;
147
148 /*
149 * DVMA routines
150 */
151 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
152 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
153 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
154 bus_size_t, struct proc *, int));
155 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
156 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
157 bus_size_t, int));
158 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
159 bus_size_t alignment, bus_size_t boundary,
160 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
161 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
162 int nsegs));
163 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
164 int nsegs, size_t size, caddr_t *kvap, int flags));
165 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
166 size_t size));
167
168
169 /*
170 * Child devices receive the Sbus interrupt level in their attach
171 * arguments. We translate these to CPU IPLs using the following
172 * tables. Note: obio bus interrupt levels are identical to the
173 * processor IPL.
174 *
175 * The second set of tables is used when the Sbus interrupt level
176 * cannot be had from the PROM as an `interrupt' property. We then
177 * fall back on the `intr' property which contains the CPU IPL.
178 */
179
180 /* Translate Sbus interrupt level to processor IPL */
181 static int intr_sbus2ipl_4c[] = {
182 0, 1, 2, 3, 5, 7, 8, 9
183 };
184 static int intr_sbus2ipl_4m[] = {
185 0, 2, 3, 5, 7, 9, 11, 13
186 };
187
188 /*
189 * This value is or'ed into the attach args' interrupt level cookie
190 * if the interrupt level comes from an `intr' property, i.e. it is
191 * not an Sbus interrupt level.
192 */
193 #define SBUS_INTR_COMPAT 0x80000000
194
195
196 /*
197 * Print the location of some sbus-attached device (called just
198 * before attaching that device). If `sbus' is not NULL, the
199 * device was found but not configured; print the sbus as well.
200 * Return UNCONF (config_find ignores this if the device was configured).
201 */
202 int
203 sbus_print(args, busname)
204 void *args;
205 const char *busname;
206 {
207 struct sbus_attach_args *sa = args;
208 int i;
209
210 if (busname)
211 printf("%s at %s", sa->sa_name, busname);
212 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
213 (u_long)sa->sa_offset);
214 for (i=0; i<sa->sa_nintr; i++) {
215 struct sbus_intr *sbi = &sa->sa_intr[i];
216
217 printf(" vector %lx ipl %ld",
218 (u_long)sbi->sbi_vec,
219 (long)INTLEV(sbi->sbi_pri));
220 }
221 return (UNCONF);
222 }
223
224 int
225 sbus_match(parent, cf, aux)
226 struct device *parent;
227 struct cfdata *cf;
228 void *aux;
229 {
230 struct mainbus_attach_args *ma = aux;
231
232 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
233 }
234
235 /*
236 * Attach an Sbus.
237 */
238 void
239 sbus_attach(parent, self, aux)
240 struct device *parent;
241 struct device *self;
242 void *aux;
243 {
244 struct sbus_softc *sc = (struct sbus_softc *)self;
245 struct mainbus_attach_args *ma = aux;
246 int node = ma->ma_node;
247
248 int node0, error;
249 bus_space_tag_t sbt;
250 struct sbus_attach_args sa;
251 char *busname = "sbus";
252 struct bootpath *bp = ma->ma_bp;
253
254
255 sc->sc_bustag = ma->ma_bustag;
256 sc->sc_dmatag = ma->ma_dmatag;
257 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
258 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
259
260 /* Setup interrupt translation tables */
261 sc->sc_intr2ipl = CPU_ISSUN4C
262 ? intr_sbus2ipl_4c
263 : intr_sbus2ipl_4m;
264
265 /*
266 * Record clock frequency for synchronous SCSI.
267 * IS THIS THE CORRECT DEFAULT??
268 */
269 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
270 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
271
272 sbt = sbus_alloc_bustag(sc);
273 sc->sc_dmatag = sbus_alloc_dmatag(sc);
274
275 /*
276 * Get the SBus burst transfer size if burst transfers are supported
277 */
278 sc->sc_burst = getpropint(node, "burst-sizes", 0);
279
280 /* Propagate bootpath */
281 if (bp != NULL && strcmp(bp->name, busname) == 0)
282 bp++;
283 else
284 bp = NULL;
285
286 /*
287 * Collect address translations from the OBP.
288 */
289 error = getprop(node, "ranges", sizeof(struct sbus_range),
290 &sc->sc_nrange, (void **)&sc->sc_range);
291 switch (error) {
292 case 0:
293 break;
294 #if 0
295 case ENOENT:
296 /* Fall back to our own `range' construction */
297 sc->sc_range = sbus_translations;
298 sc->sc_nrange =
299 sizeof(sbus_translations)/sizeof(sbus_translations[0]);
300 break;
301 #endif
302 default:
303 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
304 }
305
306
307 /*
308 * Setup the iommu.
309 *
310 * The sun4u iommu is part of the SBUS controller so we will
311 * deal with it here. We could try to fake a device node so
312 * we can eventually share it with the PCI bus run by psycho,
313 * but I don't want to get into that sort of cruft.
314 *
315 * First we need to allocate a IOTSB. Problem is that the IOMMU
316 * can only access the IOTSB by physical address, so all the
317 * pages must be contiguous. Luckily, the smallest IOTSB size
318 * is one 8K page.
319 */
320 #if 1
321 sc->sc_tsbsize = 0;
322 sc->sc_tsb = malloc(NBPG, M_DMAMAP, M_WAITOK);
323 sc->sc_ptsb = pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_tsb);
324 #else
325
326 /*
327 * All IOMMUs will share the same TSB which is allocated in pmap_bootstrap.
328 *
329 * This makes device management easier.
330 */
331 {
332 extern int64_t *iotsb;
333 extern paddr_t iotsbp;
334 extern int iotsbsize;
335
336 sc->sc_tsbsize = iotsbsize;
337 sc->sc_tsb = iotsb;
338 sc->sc_ptsb = iotsbp;
339 }
340 #endif
341 #if 1
342 /* Need to do 64-bit stores */
343 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
344 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
345 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
346 0, sc->sc_ptsb);
347 #else
348 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
349 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
350 #endif
351 #ifdef DEBUG
352 if (sbusdebug & SDB_DVMA)
353 {
354 /* Probe the iommu */
355 int64_t cr, tsb;
356
357 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", &sc->sc_sysio->sys_iommu.iommu_cr,
358 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
359 cr = sc->sc_sysio->sys_iommu.iommu_cr;
360 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
361 printf("iommu cr=%lx tsb=%lx\n", (long)cr, (long)tsb);
362 printf("sysio base %p phys %p TSB base %p phys %p",
363 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
364 (long)sc->sc_tsb, (long)sc->sc_ptsb);
365 delay(1000000); /* 1 s */
366 }
367 #endif
368
369 /*
370 * Initialize streaming buffer.
371 */
372 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
373 #if 1
374 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
375 0, STRBUF_EN); /* Enable diagnostics mode? */
376 #else
377 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
378 #endif
379
380 /*
381 * Now all the hardware's working we need to allocate a dvma map.
382 *
383 * The IOMMU address space always ends at 0xffffe000, but the starting
384 * address depends on the size of the map. The map size is 1024 * 2 ^
385 * sc->sc_tsbsize entries, where each entry is 8 bytes. The start of
386 * the map can be calculated by (0xffffe000 << (8 + sc->sc_tsbsize)).
387 *
388 * Note: the stupid IOMMU ignores the high bits of an address, so a
389 * NULL DMA pointer will be translated by the first page of the IOTSB.
390 * To trap bugs we'll skip the first entry in the IOTSB.
391 */
392 sc->sc_dvmamap = extent_create("SBus dvma", /* XXXX should have instance number */
393 IOTSB_VSTART(sc->sc_tsbsize) + NBPG, IOTSB_VEND,
394 M_DEVBUF, 0, 0, EX_NOWAIT);
395
396 /*
397 * Loop through ROM children, fixing any relative addresses
398 * and then configuring each device.
399 * `specials' is an array of device names that are treated
400 * specially:
401 */
402 node0 = firstchild(node);
403 for (node = node0; node; node = nextsibling(node)) {
404 char *name = getpropstring(node, "name");
405
406 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
407 node, bp, &sa) != 0) {
408 printf("sbus_attach: %s: incomplete\n", name);
409 continue;
410 }
411 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
412 sbus_destroy_attach_args(&sa);
413 }
414 }
415
416 int
417 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
418 struct sbus_softc *sc;
419 bus_space_tag_t bustag;
420 bus_dma_tag_t dmatag;
421 int node;
422 struct bootpath *bp;
423 struct sbus_attach_args *sa;
424 {
425 /*struct sbus_reg sbusreg;*/
426 /*int base;*/
427 int error;
428 int n;
429
430 bzero(sa, sizeof(struct sbus_attach_args));
431 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
432 if (error != 0)
433 return (error);
434 sa->sa_name[n] = '\0';
435
436 sa->sa_bustag = bustag;
437 sa->sa_dmatag = dmatag;
438 sa->sa_node = node;
439 sa->sa_bp = bp;
440
441 error = getprop(node, "reg", sizeof(struct sbus_reg),
442 &sa->sa_nreg, (void **)&sa->sa_reg);
443 if (error != 0) {
444 char buf[32];
445 if (error != ENOENT ||
446 !node_has_property(node, "device_type") ||
447 strcmp(getpropstringA(node, "device_type", buf),
448 "hierarchical") != 0)
449 return (error);
450 }
451 for (n = 0; n < sa->sa_nreg; n++) {
452 /* Convert to relative addressing, if necessary */
453 u_int32_t base = sa->sa_reg[n].sbr_offset;
454 if (SBUS_ABS(base)) {
455 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
456 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
457 }
458 }
459
460 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
461 return (error);
462
463 error = getprop(node, "address", sizeof(u_int32_t),
464 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
465 if (error != 0 && error != ENOENT)
466 return (error);
467
468 return (0);
469 }
470
471 void
472 sbus_destroy_attach_args(sa)
473 struct sbus_attach_args *sa;
474 {
475 if (sa->sa_name != NULL)
476 free(sa->sa_name, M_DEVBUF);
477
478 if (sa->sa_nreg != 0)
479 free(sa->sa_reg, M_DEVBUF);
480
481 if (sa->sa_intr)
482 free(sa->sa_intr, M_DEVBUF);
483
484 if (sa->sa_promvaddrs)
485 free((void *)sa->sa_promvaddrs, M_DEVBUF);
486
487 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
488 }
489
490
491 int
492 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
493 bus_space_tag_t t;
494 bus_type_t btype;
495 bus_addr_t offset;
496 bus_size_t size;
497 int flags;
498 vaddr_t vaddr;
499 bus_space_handle_t *hp;
500 {
501 struct sbus_softc *sc = t->cookie;
502 int64_t slot = btype;
503 int i;
504
505 for (i = 0; i < sc->sc_nrange; i++) {
506 bus_addr_t paddr;
507
508 if (sc->sc_range[i].cspace != slot)
509 continue;
510
511 /* We've found the connection to the parent bus */
512 paddr = sc->sc_range[i].poffset + offset;
513 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
514 #ifdef DEBUG
515 if (sbusdebug & SDB_DVMA)
516 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
517 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
518 #endif
519 return (bus_space_map2(sc->sc_bustag, 0, paddr,
520 size, flags, vaddr, hp));
521 }
522
523 return (EINVAL);
524 }
525
526 int
527 sbus_bus_mmap(t, btype, paddr, flags, hp)
528 bus_space_tag_t t;
529 bus_type_t btype;
530 bus_addr_t paddr;
531 int flags;
532 bus_space_handle_t *hp;
533 {
534 bus_addr_t offset = paddr;
535 int slot = (paddr>>32);
536 struct sbus_softc *sc = t->cookie;
537 int i;
538
539 for (i = 0; i < sc->sc_nrange; i++) {
540 bus_addr_t paddr;
541
542 if (sc->sc_range[i].cspace != slot)
543 continue;
544
545 paddr = sc->sc_range[i].poffset + offset;
546 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
547 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
548 flags, hp));
549 }
550
551 return (-1);
552 }
553
554
555 /*
556 * Each attached device calls sbus_establish after it initializes
557 * its sbusdev portion.
558 */
559 void
560 sbus_establish(sd, dev)
561 register struct sbusdev *sd;
562 register struct device *dev;
563 {
564 register struct sbus_softc *sc;
565 register struct device *curdev;
566
567 /*
568 * We have to look for the sbus by name, since it is not necessarily
569 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
570 * We don't just use the device structure of the above-attached
571 * sbus, since we might (in the future) support multiple sbus's.
572 */
573 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
574 if (!curdev || !curdev->dv_xname)
575 panic("sbus_establish: can't find sbus parent for %s",
576 sd->sd_dev->dv_xname
577 ? sd->sd_dev->dv_xname
578 : "<unknown>" );
579
580 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
581 break;
582 }
583 sc = (struct sbus_softc *) curdev;
584
585 sd->sd_dev = dev;
586 sd->sd_bchain = sc->sc_sbdev;
587 sc->sc_sbdev = sd;
588 }
589
590 /*
591 * Reset the given sbus. (???)
592 */
593 void
594 sbusreset(sbus)
595 int sbus;
596 {
597 register struct sbusdev *sd;
598 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
599 struct device *dev;
600
601 printf("reset %s:", sc->sc_dev.dv_xname);
602 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
603 if (sd->sd_reset) {
604 dev = sd->sd_dev;
605 (*sd->sd_reset)(dev);
606 printf(" %s", dev->dv_xname);
607 }
608 }
609 #if 1
610 /* Reload iommu regs */
611 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
612 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
613 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
614 0, sc->sc_ptsb);
615 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
616 0, STRBUF_EN); /* Enable diagnostics mode? */
617 #else
618 /* Reload iommu regs */
619 stxa(&sc->sc_sysio->sys_iommu.iommu_cr,ASI_NUCLEUS,(IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
620 stxa(&sc->sc_sysio->sys_iommu.iommu_tsb,ASI_NUCLEUS,sc->sc_ptsb);
621 stxa(&sc->sc_sysio->sys_strbuf.strbuf_ctl,ASI_NUCLEUS,STRBUF_EN);
622 #endif
623 }
624
625 /*
626 * Here are the iommu control routines.
627 */
628 void
629 sbus_enter(sc, va, pa, flags)
630 struct sbus_softc *sc;
631 vaddr_t va;
632 int64_t pa;
633 int flags;
634 {
635 int64_t tte;
636
637 #ifdef DIAGNOSTIC
638 if (va < sc->sc_dvmabase)
639 panic("sbus_enter: va 0x%lx not in DVMA space",va);
640 #endif
641
642 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
643 !(flags&BUS_DMA_COHERENT));
644
645 /* Is the streamcache flush really needed? */
646 #if 1
647 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush,
648 0, va);
649 #else
650 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
651 #endif
652 sbus_flush(sc);
653 #ifdef DEBUG
654 if (sbusdebug & SDB_DVMA)
655 printf("Clearing TSB slot %d for va %p\n", (int)IOTSBSLOT(va,sc->sc_tsbsize), va);
656 #endif
657 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
658 #if 1
659 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
660 0, va);
661 #else
662 stxa(&sc->sc_sysio->sys_iommu.iommu_flush,ASI_NUCLEUS,va);
663 #endif
664 #ifdef DEBUG
665 if (sbusdebug & SDB_DVMA)
666 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
667 va, (long)pa, IOTSBSLOT(va,sc->sc_tsbsize),
668 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
669 (long)tte);
670 #endif
671 }
672
673 /*
674 * sbus_clear: clears mappings created by sbus_enter
675 *
676 * Only demap from IOMMU if flag is set.
677 */
678 void
679 sbus_remove(sc, va, len)
680 struct sbus_softc *sc;
681 vaddr_t va;
682 size_t len;
683 {
684
685 #ifdef DIAGNOSTIC
686 if (va < sc->sc_dvmabase)
687 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
688 if ((long)(va + len) < (long)va)
689 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
690 (long) va, (long) len);
691 if (len & ~0xfffffff)
692 panic("sbus_remove: rediculous len 0x%lx", (long)len);
693 #endif
694
695 va = trunc_page(va);
696 while (len > 0) {
697
698 /*
699 * Streaming buffer flushes:
700 *
701 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
702 * If we're not on a cache line boundary (64-bits):
703 * 2 Store 0 in flag
704 * 3 Store pointer to flag in flushsync
705 * 4 wait till flushsync becomes 0x1
706 *
707 * If it takes more than .5 sec, something went wrong.
708 */
709 #ifdef DEBUG
710 if (sbusdebug & SDB_DVMA)
711 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
712 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
713 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
714 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
715 (u_long)len);
716 #endif
717 #if 1
718 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
719 #else
720 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
721 #endif
722 if (len <= NBPG) {
723 sbus_flush(sc);
724 len = 0;
725 } else len -= NBPG;
726 #ifdef DEBUG
727 if (sbusdebug & SDB_DVMA)
728 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
729 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
730 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
731 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
732 (u_long)len);
733 #endif
734 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
735 #if 1
736 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
737 #else
738 stxa(&sc->sc_sysio->sys_iommu.iommu_flush, ASI_NUCLEUS, va);
739 #endif
740 va += NBPG;
741 }
742 }
743
744 int
745 sbus_flush(sc)
746 struct sbus_softc *sc;
747 {
748 extern u_int64_t cpu_clockrate;
749 u_int64_t flushtimeout;
750
751 sc->sc_flush = 0;
752 membar_sync();
753 #if 1
754 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_flushsync, 0, sc->sc_flushpa);
755 #else
756 stxa(&sc->sc_sysio->sys_strbuf.strbuf_flushsync, ASI_NUCLEUS, sc->sc_flushpa);
757 #endif
758 membar_sync();
759 flushtimeout = tick() + cpu_clockrate/2; /* .5 sec after *now* */
760 #ifdef DEBUG
761 if (sbusdebug & SDB_DVMA)
762 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx until = %lx\n",
763 (long)sc->sc_flush, (long)&sc->sc_flush,
764 (long)sc->sc_flushpa, (long)tick(), flushtimeout);
765 #endif
766 /* Bypass non-coherent D$ */
767 #if 0
768 while( !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && flushtimeout > tick()) membar_sync();
769 #else
770 { int i; for(i=140000000/2; !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) && i; i--) membar_sync(); }
771 #endif
772 #ifdef DIAGNOSTIC
773 if( !sc->sc_flush ) {
774 printf("sbus_flush: flush timeout %p at %p\n", (long)sc->sc_flush,
775 (long)sc->sc_flushpa); /* panic? */
776 #ifdef DDB
777 Debugger();
778 #endif
779 }
780 #endif
781 #ifdef DEBUG
782 if (sbusdebug & SDB_DVMA)
783 printf("sbus_flush: flushed\n");
784 #endif
785 return (sc->sc_flush);
786 }
787 /*
788 * Get interrupt attributes for an Sbus device.
789 */
790 int
791 sbus_get_intr(sc, node, ipp, np)
792 struct sbus_softc *sc;
793 int node;
794 struct sbus_intr **ipp;
795 int *np;
796 {
797 int *ipl;
798 int i, n, error;
799 char buf[32];
800
801 /*
802 * The `interrupts' property contains the Sbus interrupt level.
803 */
804 ipl = NULL;
805 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
806 /* Change format to an `struct sbus_intr' array */
807 struct sbus_intr *ip;
808 /* Default to interrupt level 2 -- otherwise unused */
809 int pri = INTLEVENCODE(2);
810 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
811 if (ip == NULL)
812 return (ENOMEM);
813 /* Now things get ugly. We need to take this value which is
814 * the interrupt vector number and encode the IPL into it
815 * somehow. Luckily, the interrupt vector has lots of free
816 * space and we can easily stuff the IPL in there for a while.
817 */
818 getpropstringA(node, "device_type", buf);
819 if (!buf[0]) {
820 getpropstringA(node, "name", buf);
821 }
822 for (i=0; intrmap[i].in_class; i++) {
823 if (strcmp(intrmap[i].in_class, buf) == 0) {
824 pri = INTLEVENCODE(intrmap[i].in_lev);
825 break;
826 }
827 }
828 for (n = 0; n < *np; n++) {
829 /*
830 * We encode vector and priority into sbi_pri so we
831 * can pass them as a unit. This will go away if
832 * sbus_establish ever takes an sbus_intr instead
833 * of an integer level.
834 * Stuff the real vector in sbi_vec.
835 */
836 ip[n].sbi_pri = pri|ipl[n];
837 ip[n].sbi_vec = ipl[n];
838 }
839 free(ipl, M_DEVBUF);
840 *ipp = ip;
841 return (0);
842 }
843
844 /* We really don't support the following */
845 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
846 /* And some devices don't even have interrupts */
847 /*
848 * Fall back on `intr' property.
849 */
850 *ipp = NULL;
851 error = getprop(node, "intr", sizeof(struct sbus_intr),
852 np, (void **)ipp);
853 switch (error) {
854 case 0:
855 for (n = *np; n-- > 0;) {
856 /*
857 * Move the interrupt vector into place.
858 * We could remap the level, but the SBUS priorities
859 * are probably good enough.
860 */
861 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
862 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
863 }
864 break;
865 case ENOENT:
866 error = 0;
867 break;
868 }
869
870 return (error);
871 }
872
873
874 /*
875 * Install an interrupt handler for an Sbus device.
876 */
877 void *
878 sbus_intr_establish(t, level, flags, handler, arg)
879 bus_space_tag_t t;
880 int level;
881 int flags;
882 int (*handler) __P((void *));
883 void *arg;
884 {
885 struct sbus_softc *sc = t->cookie;
886 struct intrhand *ih;
887 int ipl;
888 long vec = level;
889
890 ih = (struct intrhand *)
891 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
892 if (ih == NULL)
893 return (NULL);
894
895 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
896 ipl = vec;
897 else if ((vec & SBUS_INTR_COMPAT) != 0)
898 ipl = vec & ~SBUS_INTR_COMPAT;
899 else {
900 /* Decode and remove IPL */
901 ipl = INTLEV(vec);
902 vec = INTVEC(vec);
903 #ifdef DEBUG
904 if (sbusdebug & SDB_INTR) {
905 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
906 intrlev[vec]);
907 printf("Hunting for IRQ...\n");
908 }
909 #endif
910 if ((vec & INTMAP_OBIO) == 0) {
911 /* We're in an SBUS slot */
912 /* Register the map and clear intr registers */
913 #ifdef DEBUG
914 if (sbusdebug & SDB_INTR) {
915 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
916 int64_t intrmap = *intrptr;
917
918 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
919 (long)vec, (long)intrmap,
920 (long)INTSLOT(vec));
921 }
922 #endif
923 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
924 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
925 /* Enable the interrupt */
926 vec |= INTMAP_V;
927 /* Insert IGN */
928 vec |= sc->sc_ign;
929 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
930 } else {
931 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
932 int64_t intrmap = 0;
933 int i;
934
935 /* Insert IGN */
936 vec |= sc->sc_ign;
937 for (i=0;
938 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
939 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
940 i++);
941 if (INTVEC(intrmap) == INTVEC(vec)) {
942 #ifdef DEBUG
943 if (sbusdebug & SDB_INTR)
944 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
945 vec, (long)intrmap, i);
946 #endif
947 /* Register the map and clear intr registers */
948 ih->ih_map = &intrptr[i];
949 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
950 ih->ih_clr = &intrptr[i];
951 /* Enable the interrupt */
952 intrmap |= INTMAP_V;
953 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
954 } else panic("IRQ not found!");
955 }
956 }
957 #ifdef DEBUG
958 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
959 #endif
960
961 ih->ih_fun = handler;
962 ih->ih_arg = arg;
963 ih->ih_number = vec;
964 ih->ih_pil = (1<<ipl);
965 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
966 intr_fasttrap(ipl, (void (*)__P((void)))handler);
967 else
968 intr_establish(ipl, ih);
969 return (ih);
970 }
971
972 static bus_space_tag_t
973 sbus_alloc_bustag(sc)
974 struct sbus_softc *sc;
975 {
976 bus_space_tag_t sbt;
977
978 sbt = (bus_space_tag_t)
979 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
980 if (sbt == NULL)
981 return (NULL);
982
983 bzero(sbt, sizeof *sbt);
984 sbt->cookie = sc;
985 sbt->parent = sc->sc_bustag;
986 sbt->type = SBUS_BUS_SPACE;
987 sbt->sparc_bus_map = _sbus_bus_map;
988 sbt->sparc_bus_mmap = sbus_bus_mmap;
989 sbt->sparc_intr_establish = sbus_intr_establish;
990 return (sbt);
991 }
992
993
994 static bus_dma_tag_t
995 sbus_alloc_dmatag(sc)
996 struct sbus_softc *sc;
997 {
998 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
999
1000 sdt = (bus_dma_tag_t)
1001 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
1002 if (sdt == NULL)
1003 /* Panic? */
1004 return (psdt);
1005
1006 sdt->_cookie = sc;
1007 sdt->_parent = psdt;
1008 #define PCOPY(x) sdt->x = psdt->x
1009 PCOPY(_dmamap_create);
1010 PCOPY(_dmamap_destroy);
1011 sdt->_dmamap_load = sbus_dmamap_load;
1012 PCOPY(_dmamap_load_mbuf);
1013 PCOPY(_dmamap_load_uio);
1014 PCOPY(_dmamap_load_raw);
1015 sdt->_dmamap_unload = sbus_dmamap_unload;
1016 sdt->_dmamap_sync = sbus_dmamap_sync;
1017 sdt->_dmamem_alloc = sbus_dmamem_alloc;
1018 sdt->_dmamem_free = sbus_dmamem_free;
1019 sdt->_dmamem_map = sbus_dmamem_map;
1020 sdt->_dmamem_unmap = sbus_dmamem_unmap;
1021 PCOPY(_dmamem_mmap);
1022 #undef PCOPY
1023 sc->sc_dmatag = sdt;
1024 return (sdt);
1025 }
1026
1027 int
1028 sbus_dmamap_load(t, map, buf, buflen, p, flags)
1029 bus_dma_tag_t t;
1030 bus_dmamap_t map;
1031 void *buf;
1032 bus_size_t buflen;
1033 struct proc *p;
1034 int flags;
1035 {
1036 int err, s;
1037 bus_size_t sgsize;
1038 paddr_t curaddr;
1039 u_long dvmaddr;
1040 vaddr_t vaddr = (vaddr_t)buf;
1041 pmap_t pmap;
1042 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1043
1044 if (map->dm_nsegs) {
1045 /* Already in use?? */
1046 #ifdef DIAGNOSTIC
1047 printf("sbus_dmamap_load: map still in use\n");
1048 #endif
1049 bus_dmamap_unload(t, map);
1050 }
1051 #if 1
1052 /*
1053 * Make sure that on error condition we return "no valid mappings".
1054 */
1055 map->dm_nsegs = 0;
1056
1057 if (buflen > map->_dm_size)
1058 #ifdef DEBUG
1059 {
1060 printf("_bus_dmamap_load(): error %d > %d -- map size exceeded!\n", buflen, map->_dm_size);
1061 Debugger();
1062 return (EINVAL);
1063 }
1064 #else
1065 return (EINVAL);
1066 #endif
1067
1068 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1069
1070 /*
1071 * XXX Need to implement "don't dma across this boundry".
1072 */
1073
1074 s = splhigh();
1075 err = extent_alloc(sc->sc_dvmamap, sgsize, NBPG,
1076 map->_dm_boundary, EX_NOWAIT, (u_long *)&dvmaddr);
1077 splx(s);
1078
1079 if (err != 0)
1080 return (err);
1081
1082 #ifdef DEBUG
1083 if (dvmaddr == (bus_addr_t)-1)
1084 {
1085 printf("_bus_dmamap_load(): dvmamap_alloc(%d, %x) failed!\n", sgsize, flags);
1086 Debugger();
1087 }
1088 #endif
1089 if (dvmaddr == (bus_addr_t)-1)
1090 return (ENOMEM);
1091
1092 /*
1093 * We always use just one segment.
1094 */
1095 map->dm_mapsize = buflen;
1096 map->dm_nsegs = 1;
1097 map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
1098 map->dm_segs[0].ds_len = sgsize;
1099
1100 #else
1101 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1102 return (err);
1103 #endif
1104 if (p != NULL)
1105 pmap = p->p_vmspace->vm_map.pmap;
1106 else
1107 pmap = pmap_kernel();
1108
1109 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1110 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1111 for (; buflen > 0; ) {
1112 /*
1113 * Get the physical address for this page.
1114 */
1115 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1116 bus_dmamap_unload(t, map);
1117 return (-1);
1118 }
1119
1120 /*
1121 * Compute the segment size, and adjust counts.
1122 */
1123 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1124 if (buflen < sgsize)
1125 sgsize = buflen;
1126
1127 #ifdef DEBUG
1128 if (sbusdebug & SDB_DVMA)
1129 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1130 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1131 #endif
1132 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1133
1134 dvmaddr += PAGE_SIZE;
1135 vaddr += sgsize;
1136 buflen -= sgsize;
1137 }
1138 return (0);
1139 }
1140
1141 void
1142 sbus_dmamap_unload(t, map)
1143 bus_dma_tag_t t;
1144 bus_dmamap_t map;
1145 {
1146 vaddr_t addr;
1147 int len, error, s;
1148 bus_addr_t dvmaddr;
1149 bus_size_t sgsize;
1150 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1151
1152 if (map->dm_nsegs != 1)
1153 panic("_sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1154
1155 addr = trunc_page(map->dm_segs[0].ds_addr);
1156 len = map->dm_segs[0].ds_len;
1157
1158 #ifdef DEBUG
1159 if (sbusdebug & SDB_DVMA)
1160 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1161 map, (long)addr, (long)len);
1162 #endif
1163 sbus_remove(sc, addr, len);
1164 #if 1
1165 dvmaddr = (map->dm_segs[0].ds_addr & ~PGOFSET);
1166 sgsize = map->dm_segs[0].ds_len;
1167
1168 /* Mark the mappings as invalid. */
1169 map->dm_mapsize = 0;
1170 map->dm_nsegs = 0;
1171
1172 /* Unmapping is bus dependent */
1173 s = splhigh();
1174 error = extent_free(sc->sc_dvmamap, dvmaddr, sgsize, EX_NOWAIT);
1175 splx(s);
1176 if (error != 0)
1177 printf("warning: %ld of DVMA space lost\n", (long)sgsize);
1178
1179 cache_flush((caddr_t)dvmaddr, (u_int) sgsize);
1180 #else
1181 bus_dmamap_unload(t->_parent, map);
1182 #endif
1183 }
1184
1185
1186 void
1187 sbus_dmamap_sync(t, map, offset, len, ops)
1188 bus_dma_tag_t t;
1189 bus_dmamap_t map;
1190 bus_addr_t offset;
1191 bus_size_t len;
1192 int ops;
1193 {
1194 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1195 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1196
1197 /*
1198 * We only support one DMA segment; supporting more makes this code
1199 * too unweildy.
1200 */
1201
1202 if (ops&BUS_DMASYNC_PREREAD) {
1203 #ifdef DEBUG
1204 if (sbusdebug & SDB_DVMA)
1205 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1206 (long)va, (u_long)len);
1207 #endif
1208
1209 /* Nothing to do */;
1210 }
1211 if (ops&BUS_DMASYNC_POSTREAD) {
1212 /*
1213 * We should sync the IOMMU streaming caches here first.
1214 */
1215 #ifdef DEBUG
1216 if (sbusdebug & SDB_DVMA)
1217 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1218 (long)va, (u_long)len);
1219 #endif
1220 while (len > 0) {
1221
1222 /*
1223 * Streaming buffer flushes:
1224 *
1225 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1226 * If we're not on a cache line boundary (64-bits):
1227 * 2 Store 0 in flag
1228 * 3 Store pointer to flag in flushsync
1229 * 4 wait till flushsync becomes 0x1
1230 *
1231 * If it takes more than .5 sec, something went wrong.
1232 */
1233 #ifdef DEBUG
1234 if (sbusdebug & SDB_DVMA)
1235 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1236 (long)va, (u_long)len);
1237 #endif
1238 #if 1
1239 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
1240 #else
1241 stxa(&(sc->sc_sysio->sys_strbuf.strbuf_pgflush), ASI_NUCLEUS, va);
1242 #endif
1243 if (len <= NBPG) {
1244 sbus_flush(sc);
1245 len = 0;
1246 } else
1247 len -= NBPG;
1248 va += NBPG;
1249 }
1250 }
1251 if (ops&BUS_DMASYNC_PREWRITE) {
1252 #ifdef DEBUG
1253 if (sbusdebug & SDB_DVMA)
1254 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1255 (long)va, (u_long)len);
1256 #endif
1257 /* Nothing to do */;
1258 }
1259 if (ops&BUS_DMASYNC_POSTWRITE) {
1260 #ifdef DEBUG
1261 if (sbusdebug & SDB_DVMA)
1262 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1263 (long)va, (u_long)len);
1264 #endif
1265 /* Nothing to do */;
1266 }
1267 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1268 }
1269
1270
1271 /*
1272 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1273 */
1274 int
1275 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1276 bus_dma_tag_t t;
1277 bus_size_t size, alignment, boundary;
1278 bus_dma_segment_t *segs;
1279 int nsegs;
1280 int *rsegs;
1281 int flags;
1282 {
1283 paddr_t curaddr;
1284 u_long dvmaddr;
1285 vm_page_t m;
1286 struct pglist *mlist;
1287 int error;
1288 int n, s;
1289 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1290
1291 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1292 boundary, segs, nsegs, rsegs, flags)))
1293 return (error);
1294
1295 /*
1296 * Allocate a DVMA mapping for our new memory.
1297 */
1298 for (n=0; n<*rsegs; n++) {
1299 #if 1
1300 s = splhigh();
1301 if (extent_alloc(sc->sc_dvmamap, segs[0].ds_len, alignment,
1302 boundary, EX_NOWAIT, (u_long *)&dvmaddr)) {
1303 splx(s);
1304 /* Free what we got and exit */
1305 bus_dmamem_free(t->_parent, segs, nsegs);
1306 return (ENOMEM);
1307 }
1308 splx(s);
1309 #else
1310 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1311 if (dvmaddr == (bus_addr_t)-1) {
1312 /* Free what we got and exit */
1313 bus_dmamem_free(t->_parent, segs, nsegs);
1314 return (ENOMEM);
1315 }
1316 #endif
1317 segs[n].ds_addr = dvmaddr;
1318 size = segs[n].ds_len;
1319 mlist = segs[n]._ds_mlist;
1320
1321 /* Map memory into DVMA space */
1322 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1323 curaddr = VM_PAGE_TO_PHYS(m);
1324 #ifdef DEBUG
1325 if (sbusdebug & SDB_DVMA)
1326 printf("sbus_dmamem_alloc: map %p loading va %lx at pa %lx\n",
1327 (long)m, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1328 #endif
1329 sbus_enter(sc, dvmaddr, curaddr, flags);
1330 dvmaddr += PAGE_SIZE;
1331 }
1332 }
1333 return (0);
1334 }
1335
1336 void
1337 sbus_dmamem_free(t, segs, nsegs)
1338 bus_dma_tag_t t;
1339 bus_dma_segment_t *segs;
1340 int nsegs;
1341 {
1342 vaddr_t addr;
1343 int len;
1344 int n, s, error;
1345 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1346
1347
1348 for (n=0; n<nsegs; n++) {
1349 addr = segs[n].ds_addr;
1350 len = segs[n].ds_len;
1351 sbus_remove(sc, addr, len);
1352 #if 1
1353 s = splhigh();
1354 error = extent_free(sc->sc_dvmamap, addr, len, EX_NOWAIT);
1355 splx(s);
1356 if (error != 0)
1357 printf("warning: %ld of DVMA space lost\n", (long)len);
1358 #else
1359 dvmamap_free(addr, len);
1360 #endif
1361 }
1362 bus_dmamem_free(t->_parent, segs, nsegs);
1363 }
1364
1365 /*
1366 * Map the DVMA mappings into the kernel pmap.
1367 * Check the flags to see whether we're streaming or coherent.
1368 */
1369 int
1370 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1371 bus_dma_tag_t t;
1372 bus_dma_segment_t *segs;
1373 int nsegs;
1374 size_t size;
1375 caddr_t *kvap;
1376 int flags;
1377 {
1378 vm_page_t m;
1379 vaddr_t va;
1380 bus_addr_t addr;
1381 struct pglist *mlist;
1382 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1383 int cbit;
1384
1385 /*
1386 * digest flags:
1387 */
1388 cbit = 0;
1389 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1390 cbit |= PMAP_NVC;
1391 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1392 cbit |= PMAP_NC;
1393 /*
1394 * Now take this and map it into the CPU since it should already
1395 * be in the the IOMMU.
1396 */
1397 *kvap = (caddr_t)va = segs[0].ds_addr;
1398 mlist = segs[0]._ds_mlist;
1399 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1400
1401 if (size == 0)
1402 panic("_bus_dmamem_map: size botch");
1403
1404 addr = VM_PAGE_TO_PHYS(m);
1405 pmap_enter(pmap_kernel(), va, addr | cbit,
1406 VM_PROT_READ | VM_PROT_WRITE, TRUE,
1407 VM_PROT_READ | VM_PROT_WRITE);
1408 va += PAGE_SIZE;
1409 size -= PAGE_SIZE;
1410 }
1411
1412 return (0);
1413 }
1414
1415 /*
1416 * Unmap DVMA mappings from kernel
1417 */
1418 void
1419 sbus_dmamem_unmap(t, kva, size)
1420 bus_dma_tag_t t;
1421 caddr_t kva;
1422 size_t size;
1423 {
1424 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1425
1426 #ifdef DIAGNOSTIC
1427 if ((u_long)kva & PGOFSET)
1428 panic("_bus_dmamem_unmap");
1429 #endif
1430
1431 size = round_page(size);
1432 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1433 }
1434