sbus.c revision 1.16 1 /* $NetBSD: sbus.c,v 1.16 1999/05/31 00:14:00 eeh Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This software was developed by the Computer Systems Engineering group
44 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
45 * contributed to Berkeley.
46 *
47 * All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Lawrence Berkeley Laboratory.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by the University of
63 * California, Berkeley and its contributors.
64 * 4. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)sbus.c 8.1 (Berkeley) 6/11/93
81 */
82
83 /*
84 * Sbus stuff.
85 */
86 #include "opt_ddb.h"
87
88 #include <sys/param.h>
89 #include <sys/extent.h>
90 #include <sys/malloc.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <vm/vm.h>
94
95 #include <machine/bus.h>
96 #include <sparc64/sparc64/vaddrs.h>
97 #include <sparc64/dev/iommureg.h>
98 #include <sparc64/dev/sbusreg.h>
99 #include <dev/sbus/sbusvar.h>
100
101 #include <machine/autoconf.h>
102 #include <machine/ctlreg.h>
103 #include <machine/cpu.h>
104 #include <machine/sparc64.h>
105
106 #ifdef DEBUG
107 #define SDB_DVMA 0x1
108 #define SDB_INTR 0x2
109 int sbusdebug = 0;
110 #endif
111
112 void sbusreset __P((int));
113 int sbus_flush __P((struct sbus_softc *));
114
115 static bus_space_tag_t sbus_alloc_bustag __P((struct sbus_softc *));
116 static bus_dma_tag_t sbus_alloc_dmatag __P((struct sbus_softc *));
117 static int sbus_get_intr __P((struct sbus_softc *, int,
118 struct sbus_intr **, int *));
119 static int sbus_bus_mmap __P((bus_space_tag_t, bus_type_t, bus_addr_t,
120 int, bus_space_handle_t *));
121 static int _sbus_bus_map __P((
122 bus_space_tag_t,
123 bus_type_t,
124 bus_addr_t, /*offset*/
125 bus_size_t, /*size*/
126 int, /*flags*/
127 vaddr_t, /*preferred virtual address */
128 bus_space_handle_t *));
129 static void *sbus_intr_establish __P((
130 bus_space_tag_t,
131 int, /*level*/
132 int, /*flags*/
133 int (*) __P((void *)), /*handler*/
134 void *)); /*handler arg*/
135
136
137 /* autoconfiguration driver */
138 int sbus_match __P((struct device *, struct cfdata *, void *));
139 void sbus_attach __P((struct device *, struct device *, void *));
140
141
142 struct cfattach sbus_ca = {
143 sizeof(struct sbus_softc), sbus_match, sbus_attach
144 };
145
146 extern struct cfdriver sbus_cd;
147
148 /*
149 * DVMA routines
150 */
151 void sbus_enter __P((struct sbus_softc *, vaddr_t, int64_t, int));
152 void sbus_remove __P((struct sbus_softc *, vaddr_t, size_t));
153 int sbus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
154 bus_size_t, struct proc *, int));
155 void sbus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
156 void sbus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
157 bus_size_t, int));
158 int sbus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
159 bus_size_t alignment, bus_size_t boundary,
160 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
161 void sbus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
162 int nsegs));
163 int sbus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
164 int nsegs, size_t size, caddr_t *kvap, int flags));
165 void sbus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
166 size_t size));
167
168
169 /*
170 * Child devices receive the Sbus interrupt level in their attach
171 * arguments. We translate these to CPU IPLs using the following
172 * tables. Note: obio bus interrupt levels are identical to the
173 * processor IPL.
174 *
175 * The second set of tables is used when the Sbus interrupt level
176 * cannot be had from the PROM as an `interrupt' property. We then
177 * fall back on the `intr' property which contains the CPU IPL.
178 */
179
180 /* Translate Sbus interrupt level to processor IPL */
181 static int intr_sbus2ipl_4c[] = {
182 0, 1, 2, 3, 5, 7, 8, 9
183 };
184 static int intr_sbus2ipl_4m[] = {
185 0, 2, 3, 5, 7, 9, 11, 13
186 };
187
188 /*
189 * This value is or'ed into the attach args' interrupt level cookie
190 * if the interrupt level comes from an `intr' property, i.e. it is
191 * not an Sbus interrupt level.
192 */
193 #define SBUS_INTR_COMPAT 0x80000000
194
195
196 /*
197 * Print the location of some sbus-attached device (called just
198 * before attaching that device). If `sbus' is not NULL, the
199 * device was found but not configured; print the sbus as well.
200 * Return UNCONF (config_find ignores this if the device was configured).
201 */
202 int
203 sbus_print(args, busname)
204 void *args;
205 const char *busname;
206 {
207 struct sbus_attach_args *sa = args;
208 int i;
209
210 if (busname)
211 printf("%s at %s", sa->sa_name, busname);
212 printf(" slot %ld offset 0x%lx", (long)sa->sa_slot,
213 (u_long)sa->sa_offset);
214 for (i=0; i<sa->sa_nintr; i++) {
215 struct sbus_intr *sbi = &sa->sa_intr[i];
216
217 printf(" vector %lx ipl %ld",
218 (u_long)sbi->sbi_vec,
219 (long)INTLEV(sbi->sbi_pri));
220 }
221 return (UNCONF);
222 }
223
224 int
225 sbus_match(parent, cf, aux)
226 struct device *parent;
227 struct cfdata *cf;
228 void *aux;
229 {
230 struct mainbus_attach_args *ma = aux;
231
232 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
233 }
234
235 /*
236 * Attach an Sbus.
237 */
238 void
239 sbus_attach(parent, self, aux)
240 struct device *parent;
241 struct device *self;
242 void *aux;
243 {
244 struct sbus_softc *sc = (struct sbus_softc *)self;
245 struct mainbus_attach_args *ma = aux;
246 int node = ma->ma_node;
247
248 int node0, error;
249 bus_space_tag_t sbt;
250 struct sbus_attach_args sa;
251 char *busname = "sbus";
252 struct bootpath *bp = ma->ma_bp;
253
254
255 sc->sc_bustag = ma->ma_bustag;
256 sc->sc_dmatag = ma->ma_dmatag;
257 sc->sc_sysio = (struct sysioreg*)(u_long)ma->ma_address[0]; /* Use prom mapping for sysio. */
258 sc->sc_ign = ma->ma_interrupts[0] & INTMAP_IGN; /* Find interrupt group no */
259
260 /* Setup interrupt translation tables */
261 sc->sc_intr2ipl = CPU_ISSUN4C
262 ? intr_sbus2ipl_4c
263 : intr_sbus2ipl_4m;
264
265 /*
266 * Record clock frequency for synchronous SCSI.
267 * IS THIS THE CORRECT DEFAULT??
268 */
269 sc->sc_clockfreq = getpropint(node, "clock-frequency", 25*1000*1000);
270 printf(": clock = %s MHz\n", clockfreq(sc->sc_clockfreq));
271
272 sbt = sbus_alloc_bustag(sc);
273 sc->sc_dmatag = sbus_alloc_dmatag(sc);
274
275 /*
276 * Get the SBus burst transfer size if burst transfers are supported
277 */
278 sc->sc_burst = getpropint(node, "burst-sizes", 0);
279
280 /* Propagate bootpath */
281 if (bp != NULL && strcmp(bp->name, busname) == 0)
282 bp++;
283 else
284 bp = NULL;
285
286 /*
287 * Collect address translations from the OBP.
288 */
289 error = getprop(node, "ranges", sizeof(struct sbus_range),
290 &sc->sc_nrange, (void **)&sc->sc_range);
291 if (error)
292 panic("%s: error getting ranges property", sc->sc_dev.dv_xname);
293
294 /*
295 * Setup the iommu.
296 *
297 * The sun4u iommu is part of the SBUS controller so we will
298 * deal with it here. We could try to fake a device node so
299 * we can eventually share it with the PCI bus run by psycho,
300 * but I don't want to get into that sort of cruft.
301 *
302 * First we need to allocate a IOTSB. Problem is that the IOMMU
303 * can only access the IOTSB by physical address, so all the
304 * pages must be contiguous. Luckily, the smallest IOTSB size
305 * is one 8K page.
306 */
307 sc->sc_tsbsize = 0;
308 sc->sc_tsb = malloc(NBPG, M_DMAMAP, M_WAITOK);
309 sc->sc_ptsb = pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_tsb);
310
311 /* Need to do 64-bit stores */
312 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
313 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
314 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
315 0, sc->sc_ptsb);
316
317 #ifdef DEBUG
318 if (sbusdebug & SDB_DVMA)
319 {
320 /* Probe the iommu */
321 int64_t cr, tsb;
322
323 printf("iommu regs at: cr=%lx tsb=%lx flush=%lx\n", &sc->sc_sysio->sys_iommu.iommu_cr,
324 &sc->sc_sysio->sys_iommu.iommu_tsb, &sc->sc_sysio->sys_iommu.iommu_flush);
325 cr = sc->sc_sysio->sys_iommu.iommu_cr;
326 tsb = sc->sc_sysio->sys_iommu.iommu_tsb;
327 printf("iommu cr=%lx tsb=%lx\n", (long)cr, (long)tsb);
328 printf("sysio base %p phys %p TSB base %p phys %p",
329 (long)sc->sc_sysio, (long)pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_sysio),
330 (long)sc->sc_tsb, (long)sc->sc_ptsb);
331 delay(1000000); /* 1 s */
332 }
333 #endif
334
335 /*
336 * Initialize streaming buffer.
337 */
338 sc->sc_flushpa = pmap_extract(pmap_kernel(), (vaddr_t)&sc->sc_flush);
339 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
340 0, STRBUF_EN); /* Enable diagnostics mode? */
341
342 /*
343 * Now all the hardware's working we need to allocate a dvma map.
344 *
345 * The IOMMU address space always ends at 0xffffe000, but the starting
346 * address depends on the size of the map. The map size is 1024 * 2 ^
347 * sc->sc_tsbsize entries, where each entry is 8 bytes. The start of
348 * the map can be calculated by (0xffffe000 << (8 + sc->sc_tsbsize)).
349 *
350 * Note: the stupid IOMMU ignores the high bits of an address, so a
351 * NULL DMA pointer will be translated by the first page of the IOTSB.
352 * To trap bugs we'll skip the first entry in the IOTSB.
353 */
354 sc->sc_dvmamap = extent_create("SBus dvma", /* XXXX should have instance number */
355 IOTSB_VSTART(sc->sc_tsbsize) + NBPG, IOTSB_VEND,
356 M_DEVBUF, 0, 0, EX_NOWAIT);
357
358 /*
359 * Loop through ROM children, fixing any relative addresses
360 * and then configuring each device.
361 * `specials' is an array of device names that are treated
362 * specially:
363 */
364 node0 = firstchild(node);
365 for (node = node0; node; node = nextsibling(node)) {
366 char *name = getpropstring(node, "name");
367
368 if (sbus_setup_attach_args(sc, sbt, sc->sc_dmatag,
369 node, bp, &sa) != 0) {
370 printf("sbus_attach: %s: incomplete\n", name);
371 continue;
372 }
373 (void) config_found(&sc->sc_dev, (void *)&sa, sbus_print);
374 sbus_destroy_attach_args(&sa);
375 }
376 }
377
378 int
379 sbus_setup_attach_args(sc, bustag, dmatag, node, bp, sa)
380 struct sbus_softc *sc;
381 bus_space_tag_t bustag;
382 bus_dma_tag_t dmatag;
383 int node;
384 struct bootpath *bp;
385 struct sbus_attach_args *sa;
386 {
387 /*struct sbus_reg sbusreg;*/
388 /*int base;*/
389 int error;
390 int n;
391
392 bzero(sa, sizeof(struct sbus_attach_args));
393 error = getprop(node, "name", 1, &n, (void **)&sa->sa_name);
394 if (error != 0)
395 return (error);
396 sa->sa_name[n] = '\0';
397
398 sa->sa_bustag = bustag;
399 sa->sa_dmatag = dmatag;
400 sa->sa_node = node;
401 sa->sa_bp = bp;
402
403 error = getprop(node, "reg", sizeof(struct sbus_reg),
404 &sa->sa_nreg, (void **)&sa->sa_reg);
405 if (error != 0) {
406 char buf[32];
407 if (error != ENOENT ||
408 !node_has_property(node, "device_type") ||
409 strcmp(getpropstringA(node, "device_type", buf),
410 "hierarchical") != 0)
411 return (error);
412 }
413 for (n = 0; n < sa->sa_nreg; n++) {
414 /* Convert to relative addressing, if necessary */
415 u_int32_t base = sa->sa_reg[n].sbr_offset;
416 if (SBUS_ABS(base)) {
417 sa->sa_reg[n].sbr_slot = SBUS_ABS_TO_SLOT(base);
418 sa->sa_reg[n].sbr_offset = SBUS_ABS_TO_OFFSET(base);
419 }
420 }
421
422 if ((error = sbus_get_intr(sc, node, &sa->sa_intr, &sa->sa_nintr)) != 0)
423 return (error);
424
425 error = getprop(node, "address", sizeof(u_int32_t),
426 &sa->sa_npromvaddrs, (void **)&sa->sa_promvaddrs);
427 if (error != 0 && error != ENOENT)
428 return (error);
429
430 return (0);
431 }
432
433 void
434 sbus_destroy_attach_args(sa)
435 struct sbus_attach_args *sa;
436 {
437 if (sa->sa_name != NULL)
438 free(sa->sa_name, M_DEVBUF);
439
440 if (sa->sa_nreg != 0)
441 free(sa->sa_reg, M_DEVBUF);
442
443 if (sa->sa_intr)
444 free(sa->sa_intr, M_DEVBUF);
445
446 if (sa->sa_promvaddrs)
447 free((void *)sa->sa_promvaddrs, M_DEVBUF);
448
449 bzero(sa, sizeof(struct sbus_attach_args));/*DEBUG*/
450 }
451
452
453 int
454 _sbus_bus_map(t, btype, offset, size, flags, vaddr, hp)
455 bus_space_tag_t t;
456 bus_type_t btype;
457 bus_addr_t offset;
458 bus_size_t size;
459 int flags;
460 vaddr_t vaddr;
461 bus_space_handle_t *hp;
462 {
463 struct sbus_softc *sc = t->cookie;
464 int64_t slot = btype;
465 int i;
466
467 for (i = 0; i < sc->sc_nrange; i++) {
468 bus_addr_t paddr;
469
470 if (sc->sc_range[i].cspace != slot)
471 continue;
472
473 /* We've found the connection to the parent bus */
474 paddr = sc->sc_range[i].poffset + offset;
475 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
476 #ifdef DEBUG
477 if (sbusdebug & SDB_DVMA)
478 printf("\n_sbus_bus_map: mapping paddr slot %lx offset %lx poffset %lx paddr %lx\n",
479 (long)slot, (long)offset, (long)sc->sc_range[i].poffset, (long)paddr);
480 #endif
481 return (bus_space_map2(sc->sc_bustag, 0, paddr,
482 size, flags, vaddr, hp));
483 }
484
485 return (EINVAL);
486 }
487
488 int
489 sbus_bus_mmap(t, btype, paddr, flags, hp)
490 bus_space_tag_t t;
491 bus_type_t btype;
492 bus_addr_t paddr;
493 int flags;
494 bus_space_handle_t *hp;
495 {
496 bus_addr_t offset = paddr;
497 int slot = (paddr>>32);
498 struct sbus_softc *sc = t->cookie;
499 int i;
500
501 for (i = 0; i < sc->sc_nrange; i++) {
502 bus_addr_t paddr;
503
504 if (sc->sc_range[i].cspace != slot)
505 continue;
506
507 paddr = sc->sc_range[i].poffset + offset;
508 paddr |= ((bus_addr_t)sc->sc_range[i].pspace<<32);
509 return (bus_space_mmap(sc->sc_bustag, 0, paddr,
510 flags, hp));
511 }
512
513 return (-1);
514 }
515
516
517 /*
518 * Each attached device calls sbus_establish after it initializes
519 * its sbusdev portion.
520 */
521 void
522 sbus_establish(sd, dev)
523 register struct sbusdev *sd;
524 register struct device *dev;
525 {
526 register struct sbus_softc *sc;
527 register struct device *curdev;
528
529 /*
530 * We have to look for the sbus by name, since it is not necessarily
531 * our immediate parent (i.e. sun4m /iommu/sbus/espdma/esp)
532 * We don't just use the device structure of the above-attached
533 * sbus, since we might (in the future) support multiple sbus's.
534 */
535 for (curdev = dev->dv_parent; ; curdev = curdev->dv_parent) {
536 if (!curdev || !curdev->dv_xname)
537 panic("sbus_establish: can't find sbus parent for %s",
538 sd->sd_dev->dv_xname
539 ? sd->sd_dev->dv_xname
540 : "<unknown>" );
541
542 if (strncmp(curdev->dv_xname, "sbus", 4) == 0)
543 break;
544 }
545 sc = (struct sbus_softc *) curdev;
546
547 sd->sd_dev = dev;
548 sd->sd_bchain = sc->sc_sbdev;
549 sc->sc_sbdev = sd;
550 }
551
552 /*
553 * Reset the given sbus. (???)
554 */
555 void
556 sbusreset(sbus)
557 int sbus;
558 {
559 register struct sbusdev *sd;
560 struct sbus_softc *sc = sbus_cd.cd_devs[sbus];
561 struct device *dev;
562
563 printf("reset %s:", sc->sc_dev.dv_xname);
564 for (sd = sc->sc_sbdev; sd != NULL; sd = sd->sd_bchain) {
565 if (sd->sd_reset) {
566 dev = sd->sd_dev;
567 (*sd->sd_reset)(dev);
568 printf(" %s", dev->dv_xname);
569 }
570 }
571 /* Reload iommu regs */
572 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_cr,
573 0, (IOMMUCR_TSB1K|IOMMUCR_8KPG|IOMMUCR_EN));
574 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_tsb,
575 0, sc->sc_ptsb);
576 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_ctl,
577 0, STRBUF_EN); /* Enable diagnostics mode? */
578 }
579
580 /*
581 * Here are the iommu control routines.
582 */
583 void
584 sbus_enter(sc, va, pa, flags)
585 struct sbus_softc *sc;
586 vaddr_t va;
587 int64_t pa;
588 int flags;
589 {
590 int64_t tte;
591
592 #ifdef DIAGNOSTIC
593 if (va < sc->sc_dvmabase)
594 panic("sbus_enter: va 0x%lx not in DVMA space",va);
595 #endif
596
597 tte = MAKEIOTTE(pa, !(flags&BUS_DMA_NOWRITE), !(flags&BUS_DMA_NOCACHE),
598 !(flags&BUS_DMA_COHERENT));
599
600 /* Is the streamcache flush really needed? */
601 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush,
602 0, va);
603 sbus_flush(sc);
604 #ifdef DEBUG
605 if (sbusdebug & SDB_DVMA)
606 printf("Clearing TSB slot %d for va %p\n", (int)IOTSBSLOT(va,sc->sc_tsbsize), va);
607 #endif
608 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = tte;
609 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush,
610 0, va);
611 #ifdef DEBUG
612 if (sbusdebug & SDB_DVMA)
613 printf("sbus_enter: va %lx pa %lx TSB[%lx]@%p=%lx\n",
614 va, (long)pa, IOTSBSLOT(va,sc->sc_tsbsize),
615 &sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
616 (long)tte);
617 #endif
618 }
619
620 /*
621 * sbus_clear: clears mappings created by sbus_enter
622 *
623 * Only demap from IOMMU if flag is set.
624 */
625 void
626 sbus_remove(sc, va, len)
627 struct sbus_softc *sc;
628 vaddr_t va;
629 size_t len;
630 {
631
632 #ifdef DIAGNOSTIC
633 if (va < sc->sc_dvmabase)
634 panic("sbus_remove: va 0x%lx not in DVMA space", (long)va);
635 if ((long)(va + len) < (long)va)
636 panic("sbus_remove: va 0x%lx + len 0x%lx wraps",
637 (long) va, (long) len);
638 if (len & ~0xfffffff)
639 panic("sbus_remove: rediculous len 0x%lx", (long)len);
640 #endif
641
642 va = trunc_page(va);
643 while (len > 0) {
644
645 /*
646 * Streaming buffer flushes:
647 *
648 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
649 * If we're not on a cache line boundary (64-bits):
650 * 2 Store 0 in flag
651 * 3 Store pointer to flag in flushsync
652 * 4 wait till flushsync becomes 0x1
653 *
654 * If it takes more than .5 sec, something went wrong.
655 */
656 #ifdef DEBUG
657 if (sbusdebug & SDB_DVMA)
658 printf("sbus_remove: flushing va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
659 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
660 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
661 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
662 (u_long)len);
663 #endif
664 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
665 if (len <= NBPG) {
666 sbus_flush(sc);
667 len = 0;
668 } else len -= NBPG;
669 #ifdef DEBUG
670 if (sbusdebug & SDB_DVMA)
671 printf("sbus_remove: flushed va %p TSB[%lx]@%p=%lx, %lu bytes left\n",
672 (long)va, (long)IOTSBSLOT(va,sc->sc_tsbsize),
673 (long)&sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)],
674 (long)(sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)]),
675 (u_long)len);
676 #endif
677 sc->sc_tsb[IOTSBSLOT(va,sc->sc_tsbsize)] = 0;
678 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_iommu.iommu_flush, 0, va);
679 va += NBPG;
680 }
681 }
682
683 int
684 sbus_flush(sc)
685 struct sbus_softc *sc;
686 {
687 struct timeval cur, flushtimeout;
688 #define BUMPTIME(t, usec) { \
689 register volatile struct timeval *tp = (t); \
690 register long us; \
691 \
692 tp->tv_usec = us = tp->tv_usec + (usec); \
693 if (us >= 1000000) { \
694 tp->tv_usec = us - 1000000; \
695 tp->tv_sec++; \
696 } \
697 }
698
699 sc->sc_flush = 0;
700 membar_sync();
701 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_flushsync, 0, sc->sc_flushpa);
702 membar_sync();
703
704 microtime(&flushtimeout);
705 cur = flushtimeout;
706 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */
707
708 #ifdef DEBUG
709 if (sbusdebug & SDB_DVMA) {
710 printf("sbus_flush: flush = %lx at va = %lx pa = %lx now=%lx until = %lx:%lx\n",
711 (long)sc->sc_flush, (long)&sc->sc_flush,
712 (long)sc->sc_flushpa, cur.tv_sec, cur.tv_usec,
713 flushtimeout.tv_sec, flushtimeout.tv_usec);
714 }
715 #endif
716 /* Bypass non-coherent D$ */
717 while( !ldxa(sc->sc_flushpa, ASI_PHYS_CACHED) &&
718 ((cur.tv_sec <= flushtimeout.tv_sec) &&
719 (cur.tv_usec <= flushtimeout.tv_usec)))
720 microtime(&cur);
721
722 #ifdef DIAGNOSTIC
723 if( !sc->sc_flush ) {
724 printf("sbus_flush: flush timeout %p at %p\n", (long)sc->sc_flush,
725 (long)sc->sc_flushpa); /* panic? */
726 #ifdef DDB
727 Debugger();
728 #endif
729 }
730 #endif
731 #ifdef DEBUG
732 if (sbusdebug & SDB_DVMA)
733 printf("sbus_flush: flushed\n");
734 #endif
735 return (sc->sc_flush);
736 }
737
738 /*
739 * Get interrupt attributes for an Sbus device.
740 */
741 int
742 sbus_get_intr(sc, node, ipp, np)
743 struct sbus_softc *sc;
744 int node;
745 struct sbus_intr **ipp;
746 int *np;
747 {
748 int *ipl;
749 int i, n, error;
750 char buf[32];
751
752 /*
753 * The `interrupts' property contains the Sbus interrupt level.
754 */
755 ipl = NULL;
756 if (getprop(node, "interrupts", sizeof(int), np, (void **)&ipl) == 0) {
757 /* Change format to an `struct sbus_intr' array */
758 struct sbus_intr *ip;
759 /* Default to interrupt level 2 -- otherwise unused */
760 int pri = INTLEVENCODE(2);
761 ip = malloc(*np * sizeof(struct sbus_intr), M_DEVBUF, M_NOWAIT);
762 if (ip == NULL)
763 return (ENOMEM);
764 /* Now things get ugly. We need to take this value which is
765 * the interrupt vector number and encode the IPL into it
766 * somehow. Luckily, the interrupt vector has lots of free
767 * space and we can easily stuff the IPL in there for a while.
768 */
769 getpropstringA(node, "device_type", buf);
770 if (!buf[0]) {
771 getpropstringA(node, "name", buf);
772 }
773 for (i=0; intrmap[i].in_class; i++) {
774 if (strcmp(intrmap[i].in_class, buf) == 0) {
775 pri = INTLEVENCODE(intrmap[i].in_lev);
776 break;
777 }
778 }
779 for (n = 0; n < *np; n++) {
780 /*
781 * We encode vector and priority into sbi_pri so we
782 * can pass them as a unit. This will go away if
783 * sbus_establish ever takes an sbus_intr instead
784 * of an integer level.
785 * Stuff the real vector in sbi_vec.
786 */
787 ip[n].sbi_pri = pri|ipl[n];
788 ip[n].sbi_vec = ipl[n];
789 }
790 free(ipl, M_DEVBUF);
791 *ipp = ip;
792 return (0);
793 }
794
795 /* We really don't support the following */
796 /* printf("\nWARNING: sbus_get_intr() \"interrupts\" not found -- using \"intr\"\n"); */
797 /* And some devices don't even have interrupts */
798 /*
799 * Fall back on `intr' property.
800 */
801 *ipp = NULL;
802 error = getprop(node, "intr", sizeof(struct sbus_intr),
803 np, (void **)ipp);
804 switch (error) {
805 case 0:
806 for (n = *np; n-- > 0;) {
807 /*
808 * Move the interrupt vector into place.
809 * We could remap the level, but the SBUS priorities
810 * are probably good enough.
811 */
812 (*ipp)[n].sbi_vec = (*ipp)[n].sbi_pri;
813 (*ipp)[n].sbi_pri |= INTLEVENCODE((*ipp)[n].sbi_pri);
814 }
815 break;
816 case ENOENT:
817 error = 0;
818 break;
819 }
820
821 return (error);
822 }
823
824
825 /*
826 * Install an interrupt handler for an Sbus device.
827 */
828 void *
829 sbus_intr_establish(t, level, flags, handler, arg)
830 bus_space_tag_t t;
831 int level;
832 int flags;
833 int (*handler) __P((void *));
834 void *arg;
835 {
836 struct sbus_softc *sc = t->cookie;
837 struct intrhand *ih;
838 int ipl;
839 long vec = level;
840
841 ih = (struct intrhand *)
842 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
843 if (ih == NULL)
844 return (NULL);
845
846 if ((flags & BUS_INTR_ESTABLISH_SOFTINTR) != 0)
847 ipl = vec;
848 else if ((vec & SBUS_INTR_COMPAT) != 0)
849 ipl = vec & ~SBUS_INTR_COMPAT;
850 else {
851 /* Decode and remove IPL */
852 ipl = INTLEV(vec);
853 vec = INTVEC(vec);
854 #ifdef DEBUG
855 if (sbusdebug & SDB_INTR) {
856 printf("\nsbus: intr[%ld]%lx: %lx\n", (long)ipl, (long)vec,
857 intrlev[vec]);
858 printf("Hunting for IRQ...\n");
859 }
860 #endif
861 if ((vec & INTMAP_OBIO) == 0) {
862 /* We're in an SBUS slot */
863 /* Register the map and clear intr registers */
864 #ifdef DEBUG
865 if (sbusdebug & SDB_INTR) {
866 int64_t *intrptr = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
867 int64_t intrmap = *intrptr;
868
869 printf("Found SBUS %lx IRQ as %llx in slot %ld\n",
870 (long)vec, (long)intrmap,
871 (long)INTSLOT(vec));
872 }
873 #endif
874 ih->ih_map = &(&sc->sc_sysio->sbus_slot0_int)[INTSLOT(vec)];
875 ih->ih_clr = &sc->sc_sysio->sbus0_clr_int[INTVEC(vec)];
876 /* Enable the interrupt */
877 vec |= INTMAP_V;
878 /* Insert IGN */
879 vec |= sc->sc_ign;
880 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, vec);
881 } else {
882 int64_t *intrptr = &sc->sc_sysio->scsi_int_map;
883 int64_t intrmap = 0;
884 int i;
885
886 /* Insert IGN */
887 vec |= sc->sc_ign;
888 for (i=0;
889 &intrptr[i] <= (int64_t *)&sc->sc_sysio->reserved_int_map &&
890 INTVEC(intrmap=intrptr[i]) != INTVEC(vec);
891 i++);
892 if (INTVEC(intrmap) == INTVEC(vec)) {
893 #ifdef DEBUG
894 if (sbusdebug & SDB_INTR)
895 printf("Found OBIO %lx IRQ as %lx in slot %d\n",
896 vec, (long)intrmap, i);
897 #endif
898 /* Register the map and clear intr registers */
899 ih->ih_map = &intrptr[i];
900 intrptr = (int64_t *)&sc->sc_sysio->scsi_clr_int;
901 ih->ih_clr = &intrptr[i];
902 /* Enable the interrupt */
903 intrmap |= INTMAP_V;
904 bus_space_write_8(sc->sc_bustag, ih->ih_map, 0, (u_long)intrmap);
905 } else panic("IRQ not found!");
906 }
907 }
908 #ifdef DEBUG
909 if (sbusdebug & SDB_INTR) { long i; for (i=0; i<1400000000; i++); }
910 #endif
911
912 ih->ih_fun = handler;
913 ih->ih_arg = arg;
914 ih->ih_number = vec;
915 ih->ih_pil = (1<<ipl);
916 if ((flags & BUS_INTR_ESTABLISH_FASTTRAP) != 0)
917 intr_fasttrap(ipl, (void (*)__P((void)))handler);
918 else
919 intr_establish(ipl, ih);
920 return (ih);
921 }
922
923 static bus_space_tag_t
924 sbus_alloc_bustag(sc)
925 struct sbus_softc *sc;
926 {
927 bus_space_tag_t sbt;
928
929 sbt = (bus_space_tag_t)
930 malloc(sizeof(struct sparc_bus_space_tag), M_DEVBUF, M_NOWAIT);
931 if (sbt == NULL)
932 return (NULL);
933
934 bzero(sbt, sizeof *sbt);
935 sbt->cookie = sc;
936 sbt->parent = sc->sc_bustag;
937 sbt->type = SBUS_BUS_SPACE;
938 sbt->sparc_bus_map = _sbus_bus_map;
939 sbt->sparc_bus_mmap = sbus_bus_mmap;
940 sbt->sparc_intr_establish = sbus_intr_establish;
941 return (sbt);
942 }
943
944
945 static bus_dma_tag_t
946 sbus_alloc_dmatag(sc)
947 struct sbus_softc *sc;
948 {
949 bus_dma_tag_t sdt, psdt = sc->sc_dmatag;
950
951 sdt = (bus_dma_tag_t)
952 malloc(sizeof(struct sparc_bus_dma_tag), M_DEVBUF, M_NOWAIT);
953 if (sdt == NULL)
954 /* Panic? */
955 return (psdt);
956
957 sdt->_cookie = sc;
958 sdt->_parent = psdt;
959 #define PCOPY(x) sdt->x = psdt->x
960 PCOPY(_dmamap_create);
961 PCOPY(_dmamap_destroy);
962 sdt->_dmamap_load = sbus_dmamap_load;
963 PCOPY(_dmamap_load_mbuf);
964 PCOPY(_dmamap_load_uio);
965 PCOPY(_dmamap_load_raw);
966 sdt->_dmamap_unload = sbus_dmamap_unload;
967 sdt->_dmamap_sync = sbus_dmamap_sync;
968 sdt->_dmamem_alloc = sbus_dmamem_alloc;
969 sdt->_dmamem_free = sbus_dmamem_free;
970 sdt->_dmamem_map = sbus_dmamem_map;
971 sdt->_dmamem_unmap = sbus_dmamem_unmap;
972 PCOPY(_dmamem_mmap);
973 #undef PCOPY
974 sc->sc_dmatag = sdt;
975 return (sdt);
976 }
977
978 int
979 sbus_dmamap_load(t, map, buf, buflen, p, flags)
980 bus_dma_tag_t t;
981 bus_dmamap_t map;
982 void *buf;
983 bus_size_t buflen;
984 struct proc *p;
985 int flags;
986 {
987 int err, s;
988 bus_size_t sgsize;
989 paddr_t curaddr;
990 u_long dvmaddr;
991 vaddr_t vaddr = (vaddr_t)buf;
992 pmap_t pmap;
993 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
994
995 if (map->dm_nsegs) {
996 /* Already in use?? */
997 #ifdef DIAGNOSTIC
998 printf("sbus_dmamap_load: map still in use\n");
999 #endif
1000 bus_dmamap_unload(t, map);
1001 }
1002 #if 1
1003 /*
1004 * Make sure that on error condition we return "no valid mappings".
1005 */
1006 map->dm_nsegs = 0;
1007
1008 if (buflen > map->_dm_size)
1009 #ifdef DEBUG
1010 {
1011 printf("sbus_dmamap_load(): error %d > %d -- map size exceeded!\n", buflen, map->_dm_size);
1012 Debugger();
1013 return (EINVAL);
1014 }
1015 #else
1016 return (EINVAL);
1017 #endif
1018 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1019
1020 /*
1021 * XXX Need to implement "don't dma across this boundry".
1022 */
1023
1024 s = splhigh();
1025 err = extent_alloc(sc->sc_dvmamap, sgsize, NBPG,
1026 map->_dm_boundary, EX_NOWAIT, (u_long *)&dvmaddr);
1027 splx(s);
1028
1029 if (err != 0)
1030 return (err);
1031
1032 #ifdef DEBUG
1033 if (dvmaddr == (bus_addr_t)-1)
1034 {
1035 printf("sbus_dmamap_load(): dvmamap_alloc(%d, %x) failed!\n", sgsize, flags);
1036 Debugger();
1037 }
1038 #endif
1039 if (dvmaddr == (bus_addr_t)-1)
1040 return (ENOMEM);
1041
1042 /*
1043 * We always use just one segment.
1044 */
1045 map->dm_mapsize = buflen;
1046 map->dm_nsegs = 1;
1047 map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
1048 map->dm_segs[0].ds_len = sgsize;
1049
1050 #else
1051 if ((err = bus_dmamap_load(t->_parent, map, buf, buflen, p, flags)))
1052 return (err);
1053 #endif
1054 if (p != NULL)
1055 pmap = p->p_vmspace->vm_map.pmap;
1056 else
1057 pmap = pmap_kernel();
1058
1059 dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
1060 sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1061 for (; buflen > 0; ) {
1062 /*
1063 * Get the physical address for this page.
1064 */
1065 if ((curaddr = (bus_addr_t)pmap_extract(pmap, (vaddr_t)vaddr)) == NULL) {
1066 bus_dmamap_unload(t, map);
1067 return (-1);
1068 }
1069
1070 /*
1071 * Compute the segment size, and adjust counts.
1072 */
1073 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
1074 if (buflen < sgsize)
1075 sgsize = buflen;
1076
1077 #ifdef DEBUG
1078 if (sbusdebug & SDB_DVMA)
1079 printf("sbus_dmamap_load: map %p loading va %lx at pa %lx\n",
1080 map, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1081 #endif
1082 sbus_enter(sc, trunc_page(dvmaddr), trunc_page(curaddr), flags);
1083
1084 dvmaddr += PAGE_SIZE;
1085 vaddr += sgsize;
1086 buflen -= sgsize;
1087 }
1088 return (0);
1089 }
1090
1091 void
1092 sbus_dmamap_unload(t, map)
1093 bus_dma_tag_t t;
1094 bus_dmamap_t map;
1095 {
1096 vaddr_t addr;
1097 int len, error, s;
1098 bus_addr_t dvmaddr;
1099 bus_size_t sgsize;
1100 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1101
1102 if (map->dm_nsegs != 1)
1103 panic("sbus_dmamap_unload: nsegs = %d", map->dm_nsegs);
1104
1105 addr = trunc_page(map->dm_segs[0].ds_addr);
1106 len = map->dm_segs[0].ds_len;
1107
1108 #ifdef DEBUG
1109 if (sbusdebug & SDB_DVMA)
1110 printf("sbus_dmamap_unload: map %p removing va %lx size %lx\n",
1111 map, (long)addr, (long)len);
1112 #endif
1113 sbus_remove(sc, addr, len);
1114 #if 1
1115 dvmaddr = (map->dm_segs[0].ds_addr & ~PGOFSET);
1116 sgsize = map->dm_segs[0].ds_len;
1117
1118 /* Mark the mappings as invalid. */
1119 map->dm_mapsize = 0;
1120 map->dm_nsegs = 0;
1121
1122 /* Unmapping is bus dependent */
1123 s = splhigh();
1124 error = extent_free(sc->sc_dvmamap, dvmaddr, sgsize, EX_NOWAIT);
1125 splx(s);
1126 if (error != 0)
1127 printf("warning: %ld of DVMA space lost\n", (long)sgsize);
1128
1129 cache_flush((caddr_t)dvmaddr, (u_int) sgsize);
1130 #else
1131 bus_dmamap_unload(t->_parent, map);
1132 #endif
1133 }
1134
1135
1136 void
1137 sbus_dmamap_sync(t, map, offset, len, ops)
1138 bus_dma_tag_t t;
1139 bus_dmamap_t map;
1140 bus_addr_t offset;
1141 bus_size_t len;
1142 int ops;
1143 {
1144 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1145 vaddr_t va = map->dm_segs[0].ds_addr + offset;
1146
1147 /*
1148 * We only support one DMA segment; supporting more makes this code
1149 * too unweildy.
1150 */
1151
1152 if (ops&BUS_DMASYNC_PREREAD) {
1153 #ifdef DEBUG
1154 if (sbusdebug & SDB_DVMA)
1155 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREREAD\n",
1156 (long)va, (u_long)len);
1157 #endif
1158
1159 /* Nothing to do */;
1160 }
1161 if (ops&BUS_DMASYNC_POSTREAD) {
1162 /*
1163 * We should sync the IOMMU streaming caches here first.
1164 */
1165 #ifdef DEBUG
1166 if (sbusdebug & SDB_DVMA)
1167 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTREAD\n",
1168 (long)va, (u_long)len);
1169 #endif
1170 while (len > 0) {
1171
1172 /*
1173 * Streaming buffer flushes:
1174 *
1175 * 1 Tell strbuf to flush by storing va to strbuf_pgflush
1176 * If we're not on a cache line boundary (64-bits):
1177 * 2 Store 0 in flag
1178 * 3 Store pointer to flag in flushsync
1179 * 4 wait till flushsync becomes 0x1
1180 *
1181 * If it takes more than .5 sec, something went wrong.
1182 */
1183 #ifdef DEBUG
1184 if (sbusdebug & SDB_DVMA)
1185 printf("sbus_dmamap_sync: flushing va %p, %lu bytes left\n",
1186 (long)va, (u_long)len);
1187 #endif
1188 bus_space_write_8(sc->sc_bustag, &sc->sc_sysio->sys_strbuf.strbuf_pgflush, 0, va);
1189 if (len <= NBPG) {
1190 sbus_flush(sc);
1191 len = 0;
1192 } else
1193 len -= NBPG;
1194 va += NBPG;
1195 }
1196 }
1197 if (ops&BUS_DMASYNC_PREWRITE) {
1198 #ifdef DEBUG
1199 if (sbusdebug & SDB_DVMA)
1200 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_PREWRITE\n",
1201 (long)va, (u_long)len);
1202 #endif
1203 /* Nothing to do */;
1204 }
1205 if (ops&BUS_DMASYNC_POSTWRITE) {
1206 #ifdef DEBUG
1207 if (sbusdebug & SDB_DVMA)
1208 printf("sbus_dmamap_sync: syncing va %p len %lu BUS_DMASYNC_POSTWRITE\n",
1209 (long)va, (u_long)len);
1210 #endif
1211 /* Nothing to do */;
1212 }
1213 bus_dmamap_sync(t->_parent, map, offset, len, ops);
1214 }
1215
1216
1217 /*
1218 * Take memory allocated by our parent bus and generate DVMA mappings for it.
1219 */
1220 int
1221 sbus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
1222 bus_dma_tag_t t;
1223 bus_size_t size, alignment, boundary;
1224 bus_dma_segment_t *segs;
1225 int nsegs;
1226 int *rsegs;
1227 int flags;
1228 {
1229 paddr_t curaddr;
1230 u_long dvmaddr;
1231 vm_page_t m;
1232 struct pglist *mlist;
1233 int error;
1234 int n, s;
1235 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1236
1237 if ((error = bus_dmamem_alloc(t->_parent, size, alignment,
1238 boundary, segs, nsegs, rsegs, flags)))
1239 return (error);
1240
1241 /*
1242 * Allocate a DVMA mapping for our new memory.
1243 */
1244 for (n=0; n<*rsegs; n++) {
1245 #if 1
1246 s = splhigh();
1247 if (extent_alloc(sc->sc_dvmamap, segs[0].ds_len, alignment,
1248 boundary, EX_NOWAIT, (u_long *)&dvmaddr)) {
1249 splx(s);
1250 /* Free what we got and exit */
1251 bus_dmamem_free(t->_parent, segs, nsegs);
1252 return (ENOMEM);
1253 }
1254 splx(s);
1255 #else
1256 dvmaddr = dvmamap_alloc(segs[0].ds_len, flags);
1257 if (dvmaddr == (bus_addr_t)-1) {
1258 /* Free what we got and exit */
1259 bus_dmamem_free(t->_parent, segs, nsegs);
1260 return (ENOMEM);
1261 }
1262 #endif
1263 segs[n].ds_addr = dvmaddr;
1264 size = segs[n].ds_len;
1265 mlist = segs[n]._ds_mlist;
1266
1267 /* Map memory into DVMA space */
1268 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1269 curaddr = VM_PAGE_TO_PHYS(m);
1270 #ifdef DEBUG
1271 if (sbusdebug & SDB_DVMA)
1272 printf("sbus_dmamem_alloc: map %p loading va %lx at pa %lx\n",
1273 (long)m, (long)dvmaddr, (long)(curaddr & ~(NBPG-1)));
1274 #endif
1275 sbus_enter(sc, dvmaddr, curaddr, flags);
1276 dvmaddr += PAGE_SIZE;
1277 }
1278 }
1279 return (0);
1280 }
1281
1282 void
1283 sbus_dmamem_free(t, segs, nsegs)
1284 bus_dma_tag_t t;
1285 bus_dma_segment_t *segs;
1286 int nsegs;
1287 {
1288 vaddr_t addr;
1289 int len;
1290 int n, s, error;
1291 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1292
1293
1294 for (n=0; n<nsegs; n++) {
1295 addr = segs[n].ds_addr;
1296 len = segs[n].ds_len;
1297 sbus_remove(sc, addr, len);
1298 #if 1
1299 s = splhigh();
1300 error = extent_free(sc->sc_dvmamap, addr, len, EX_NOWAIT);
1301 splx(s);
1302 if (error != 0)
1303 printf("warning: %ld of DVMA space lost\n", (long)len);
1304 #else
1305 dvmamap_free(addr, len);
1306 #endif
1307 }
1308 bus_dmamem_free(t->_parent, segs, nsegs);
1309 }
1310
1311 /*
1312 * Map the DVMA mappings into the kernel pmap.
1313 * Check the flags to see whether we're streaming or coherent.
1314 */
1315 int
1316 sbus_dmamem_map(t, segs, nsegs, size, kvap, flags)
1317 bus_dma_tag_t t;
1318 bus_dma_segment_t *segs;
1319 int nsegs;
1320 size_t size;
1321 caddr_t *kvap;
1322 int flags;
1323 {
1324 vm_page_t m;
1325 vaddr_t va;
1326 bus_addr_t addr;
1327 struct pglist *mlist;
1328 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1329 int cbit;
1330
1331 /*
1332 * digest flags:
1333 */
1334 cbit = 0;
1335 if (flags & BUS_DMA_COHERENT) /* Disable vcache */
1336 cbit |= PMAP_NVC;
1337 if (flags & BUS_DMA_NOCACHE) /* sideffects */
1338 cbit |= PMAP_NC;
1339 /*
1340 * Now take this and map it into the CPU since it should already
1341 * be in the the IOMMU.
1342 */
1343 *kvap = (caddr_t)va = segs[0].ds_addr;
1344 mlist = segs[0]._ds_mlist;
1345 for (m = mlist->tqh_first; m != NULL; m = m->pageq.tqe_next) {
1346
1347 if (size == 0)
1348 panic("_bus_dmamem_map: size botch");
1349
1350 addr = VM_PAGE_TO_PHYS(m);
1351 pmap_enter(pmap_kernel(), va, addr | cbit,
1352 VM_PROT_READ | VM_PROT_WRITE, TRUE,
1353 VM_PROT_READ | VM_PROT_WRITE);
1354 va += PAGE_SIZE;
1355 size -= PAGE_SIZE;
1356 }
1357
1358 return (0);
1359 }
1360
1361 /*
1362 * Unmap DVMA mappings from kernel
1363 */
1364 void
1365 sbus_dmamem_unmap(t, kva, size)
1366 bus_dma_tag_t t;
1367 caddr_t kva;
1368 size_t size;
1369 {
1370 struct sbus_softc *sc = (struct sbus_softc *)t->_cookie;
1371
1372 #ifdef DIAGNOSTIC
1373 if ((u_long)kva & PGOFSET)
1374 panic("_bus_dmamem_unmap");
1375 #endif
1376
1377 size = round_page(size);
1378 pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
1379 }
1380