mvmebus.c revision 1.2 1 /* $NetBSD: mvmebus.c,v 1.2 2002/09/27 15:37:24 provos Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Steve C. Woodford.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/malloc.h>
44 #include <sys/kcore.h>
45
46 #include <machine/cpu.h>
47 #include <machine/bus.h>
48
49 #include <dev/vme/vmereg.h>
50 #include <dev/vme/vmevar.h>
51
52 #include <dev/mvme/mvmebus.h>
53
54 #ifdef DIAGNOSTIC
55 int mvmebus_dummy_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
56 bus_size_t, int, bus_dmamap_t *);
57 void mvmebus_dummy_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
58 int mvmebus_dummy_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
59 bus_size_t, bus_dma_segment_t *, int, int *, int);
60 void mvmebus_dummy_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
61 #endif
62
63 #ifdef DEBUG
64 static const char *mvmebus_mod_string(vme_addr_t, vme_size_t,
65 vme_am_t, vme_datasize_t);
66 #endif
67
68 static void mvmebus_offboard_ram(struct mvmebus_softc *);
69 static int mvmebus_dmamap_load_common(struct mvmebus_softc *, bus_dmamap_t);
70
71 vme_am_t _mvmebus_am_cap[] = {
72 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_USER,
73 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_USER,
74 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_USER,
75 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_USER,
76 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_SUPER,
77 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_SUPER,
78 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_SUPER,
79 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_SUPER
80 };
81
82 const char *mvmebus_irq_name[] = {
83 "vmeirq0", "vmeirq1", "vmeirq2", "vmeirq3",
84 "vmeirq4", "vmeirq5", "vmeirq6", "vmeirq7"
85 };
86
87 extern phys_ram_seg_t mem_clusters[0];
88 extern int mem_cluster_cnt;
89
90
91 static void
92 mvmebus_offboard_ram(sc)
93 struct mvmebus_softc *sc;
94 {
95 struct mvmebus_range *svr, *mvr;
96 vme_addr_t start, end, size;
97 int i;
98
99 /*
100 * If we have any offboard RAM (i.e. a VMEbus RAM board) then
101 * we need to record its details since it's effectively another
102 * VMEbus slave image as far as we're concerned.
103 * The chip-specific backend will have reserved sc->sc_slaves[0]
104 * for exactly this purpose.
105 */
106 svr = sc->sc_slaves;
107 if (mem_cluster_cnt < 2) {
108 svr->vr_am = MVMEBUS_AM_DISABLED;
109 return;
110 }
111
112 start = mem_clusters[1].start;
113 size = mem_clusters[1].size - 1;
114 end = start + size;
115
116 /*
117 * Figure out which VMEbus master image the RAM is
118 * visible through. This will tell us the address
119 * modifier and datasizes it uses, as well as allowing
120 * us to calculate its `real' VMEbus address.
121 *
122 * XXX FIXME: This is broken if the RAM is mapped through
123 * a translated address space. For example, on mvme167 it's
124 * perfectly legal to set up the following A32 mapping:
125 *
126 * vr_locaddr == 0x80000000
127 * vr_vmestart == 0x10000000
128 * vr_vmeend == 0x10ffffff
129 *
130 * In this case, RAM at VMEbus address 0x10800000 will appear at local
131 * address 0x80800000, but we need to set the slave vr_vmestart to
132 * 0x10800000.
133 */
134 for (i = 0, mvr = sc->sc_masters; i < sc->sc_nmasters; i++, mvr++) {
135 vme_addr_t vstart = mvr->vr_locstart + mvr->vr_vmestart;
136
137 if (start >= vstart &&
138 end <= vstart + (mvr->vr_vmeend - mvr->vr_vmestart))
139 break;
140 }
141 if (i == sc->sc_nmasters) {
142 svr->vr_am = MVMEBUS_AM_DISABLED;
143 #ifdef DEBUG
144 printf("%s: No VMEbus master mapping for offboard RAM!\n",
145 sc->sc_dev.dv_xname);
146 #endif
147 return;
148 }
149
150 svr->vr_locstart = start;
151 svr->vr_vmestart = start & mvr->vr_mask;
152 svr->vr_vmeend = svr->vr_vmestart + size;
153 svr->vr_datasize = mvr->vr_datasize;
154 svr->vr_mask = mvr->vr_mask;
155 svr->vr_am = mvr->vr_am & VME_AM_ADRSIZEMASK;
156 svr->vr_am |= MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_PROG |
157 MVMEBUS_AM_CAP_SUPER | MVMEBUS_AM_CAP_USER;
158 }
159
160 void
161 mvmebus_attach(sc)
162 struct mvmebus_softc *sc;
163 {
164 struct vmebus_attach_args vaa;
165 int i;
166
167 /* Zap the IRQ reference counts */
168 for (i = 0; i < 8; i++)
169 sc->sc_irqref[i] = 0;
170
171 /* If there's offboard RAM, get its VMEbus slave attributes */
172 mvmebus_offboard_ram(sc);
173
174 #ifdef DEBUG
175 for (i = 0; i < sc->sc_nmasters; i++) {
176 struct mvmebus_range *vr = &sc->sc_masters[i];
177 if (vr->vr_am == MVMEBUS_AM_DISABLED) {
178 printf("%s: Master#%d: disabled\n",
179 sc->sc_dev.dv_xname, i);
180 continue;
181 }
182 printf("%s: Master#%d: 0x%08lx -> %s\n",
183 sc->sc_dev.dv_xname, i,
184 vr->vr_locstart + (vr->vr_vmestart & vr->vr_mask),
185 mvmebus_mod_string(vr->vr_vmestart,
186 (vr->vr_vmeend - vr->vr_vmestart) + 1,
187 vr->vr_am, vr->vr_datasize));
188 }
189
190 for (i = 0; i < sc->sc_nslaves; i++) {
191 struct mvmebus_range *vr = &sc->sc_slaves[i];
192 if (vr->vr_am == MVMEBUS_AM_DISABLED) {
193 printf("%s: Slave#%d: disabled\n",
194 sc->sc_dev.dv_xname, i);
195 continue;
196 }
197 printf("%s: Slave#%d: 0x%08lx -> %s\n",
198 sc->sc_dev.dv_xname, i, vr->vr_locstart,
199 mvmebus_mod_string(vr->vr_vmestart,
200 (vr->vr_vmeend - vr->vr_vmestart) + 1,
201 vr->vr_am, vr->vr_datasize));
202 }
203 #endif
204
205 sc->sc_vct.cookie = sc;
206 sc->sc_vct.vct_probe = mvmebus_probe;
207 sc->sc_vct.vct_map = mvmebus_map;
208 sc->sc_vct.vct_unmap = mvmebus_unmap;
209 sc->sc_vct.vct_int_map = mvmebus_intmap;
210 sc->sc_vct.vct_int_evcnt = mvmebus_intr_evcnt;
211 sc->sc_vct.vct_int_establish = mvmebus_intr_establish;
212 sc->sc_vct.vct_int_disestablish = mvmebus_intr_disestablish;
213 sc->sc_vct.vct_dmamap_create = mvmebus_dmamap_create;
214 sc->sc_vct.vct_dmamap_destroy = mvmebus_dmamap_destroy;
215 sc->sc_vct.vct_dmamem_alloc = mvmebus_dmamem_alloc;
216 sc->sc_vct.vct_dmamem_free = mvmebus_dmamem_free;
217
218 sc->sc_mvmedmat._cookie = sc;
219 sc->sc_mvmedmat._dmamap_load = mvmebus_dmamap_load;
220 sc->sc_mvmedmat._dmamap_load_mbuf = mvmebus_dmamap_load_mbuf;
221 sc->sc_mvmedmat._dmamap_load_uio = mvmebus_dmamap_load_uio;
222 sc->sc_mvmedmat._dmamap_load_raw = mvmebus_dmamap_load_raw;
223 sc->sc_mvmedmat._dmamap_unload = mvmebus_dmamap_unload;
224 sc->sc_mvmedmat._dmamap_sync = mvmebus_dmamap_sync;
225 sc->sc_mvmedmat._dmamem_map = mvmebus_dmamem_map;
226 sc->sc_mvmedmat._dmamem_unmap = mvmebus_dmamem_unmap;
227 sc->sc_mvmedmat._dmamem_mmap = mvmebus_dmamem_mmap;
228
229 #ifdef DIAGNOSTIC
230 sc->sc_mvmedmat._dmamap_create = mvmebus_dummy_dmamap_create;
231 sc->sc_mvmedmat._dmamap_destroy = mvmebus_dummy_dmamap_destroy;
232 sc->sc_mvmedmat._dmamem_alloc = mvmebus_dummy_dmamem_alloc;
233 sc->sc_mvmedmat._dmamem_free = mvmebus_dummy_dmamem_free;
234 #else
235 sc->sc_mvmedmat._dmamap_create = NULL;
236 sc->sc_mvmedmat._dmamap_destroy = NULL;
237 sc->sc_mvmedmat._dmamem_alloc = NULL;
238 sc->sc_mvmedmat._dmamem_free = NULL;
239 #endif
240
241 vaa.va_vct = &sc->sc_vct;
242 vaa.va_bdt = &sc->sc_mvmedmat;
243 vaa.va_slaveconfig = NULL;
244
245 config_found(&sc->sc_dev, &vaa, 0);
246 }
247
248 int
249 mvmebus_map(vsc, vmeaddr, len, am, datasize, swap, tag, handle, resc)
250 void *vsc;
251 vme_addr_t vmeaddr;
252 vme_size_t len;
253 vme_am_t am;
254 vme_datasize_t datasize;
255 vme_swap_t swap;
256 bus_space_tag_t *tag;
257 bus_space_handle_t *handle;
258 vme_mapresc_t *resc;
259 {
260 struct mvmebus_softc *sc;
261 struct mvmebus_mapresc *mr;
262 struct mvmebus_range *vr;
263 vme_addr_t end;
264 vme_am_t cap, as;
265 paddr_t paddr;
266 int rv, i;
267
268 sc = vsc;
269 end = (vmeaddr + len) - 1;
270 paddr = 0;
271 vr = sc->sc_masters;
272 cap = MVMEBUS_AM2CAP(am);
273 as = am & VME_AM_ADRSIZEMASK;
274
275 for (i = 0; i < sc->sc_nmasters && paddr == 0; i++, vr++) {
276 if (vr->vr_am == MVMEBUS_AM_DISABLED)
277 continue;
278
279 if (cap == (vr->vr_am & cap) &&
280 as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
281 datasize <= vr->vr_datasize &&
282 vmeaddr >= vr->vr_vmestart && end < vr->vr_vmeend)
283 paddr = vr->vr_locstart + (vmeaddr & vr->vr_mask);
284 }
285 if (paddr == 0)
286 return (ENOMEM);
287
288 rv = bus_space_map(sc->sc_bust, paddr, len, 0, handle);
289 if (rv != 0)
290 return (rv);
291
292 /* Allocate space for the resource tag */
293 if ((mr = malloc(sizeof(*mr), M_DEVBUF, M_NOWAIT)) == NULL) {
294 bus_space_unmap(sc->sc_bust, *handle, len);
295 return (ENOMEM);
296 }
297
298 /* Record the range's details */
299 mr->mr_am = am;
300 mr->mr_datasize = datasize;
301 mr->mr_addr = vmeaddr;
302 mr->mr_size = len;
303 mr->mr_handle = *handle;
304 mr->mr_range = i;
305
306 *tag = sc->sc_bust;
307 *resc = (vme_mapresc_t *) mr;
308
309 return (0);
310 }
311
312 /* ARGSUSED */
313 void
314 mvmebus_unmap(vsc, resc)
315 void *vsc;
316 vme_mapresc_t resc;
317 {
318 struct mvmebus_softc *sc = vsc;
319 struct mvmebus_mapresc *mr = (struct mvmebus_mapresc *) resc;
320
321 bus_space_unmap(sc->sc_bust, mr->mr_handle, mr->mr_size);
322
323 free(mr, M_DEVBUF);
324 }
325
326 int
327 mvmebus_probe(vsc, vmeaddr, len, am, datasize, callback, arg)
328 void *vsc;
329 vme_addr_t vmeaddr;
330 vme_size_t len;
331 vme_am_t am;
332 vme_datasize_t datasize;
333 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t);
334 void *arg;
335 {
336 bus_space_tag_t tag;
337 bus_space_handle_t handle;
338 vme_mapresc_t resc;
339 vme_size_t offs;
340 int rv;
341
342 /* Get a temporary mapping to the VMEbus range */
343 rv = mvmebus_map(vsc, vmeaddr, len, am, datasize, 0,
344 &tag, &handle, &resc);
345 if (rv)
346 return (rv);
347
348 if (callback)
349 rv = (*callback) (arg, tag, handle);
350 else
351 for (offs = 0; offs < len && rv == 0;) {
352 switch (datasize) {
353 case VME_D8:
354 rv = bus_space_peek_1(tag, handle, offs, NULL);
355 offs += 1;
356 break;
357
358 case VME_D16:
359 rv = bus_space_peek_2(tag, handle, offs, NULL);
360 offs += 2;
361 break;
362
363 case VME_D32:
364 rv = bus_space_peek_4(tag, handle, offs, NULL);
365 offs += 4;
366 break;
367 }
368 }
369
370 mvmebus_unmap(vsc, resc);
371
372 return (rv);
373 }
374
375 /* ARGSUSED */
376 int
377 mvmebus_intmap(vsc, level, vector, handlep)
378 void *vsc;
379 int level, vector;
380 vme_intr_handle_t *handlep;
381 {
382
383 if (level < 1 || level > 7 || vector < 0x80 || vector > 0xff)
384 return (EINVAL);
385
386 /* This is rather gross */
387 *handlep = (void *) (int) ((level << 8) | vector);
388 return (0);
389 }
390
391 /* ARGSUSED */
392 const struct evcnt *
393 mvmebus_intr_evcnt(vsc, handle)
394 void *vsc;
395 vme_intr_handle_t handle;
396 {
397 struct mvmebus_softc *sc = vsc;
398
399 return (&sc->sc_evcnt[(((int) handle) >> 8) - 1]);
400 }
401
402 void *
403 mvmebus_intr_establish(vsc, handle, prior, func, arg)
404 void *vsc;
405 vme_intr_handle_t handle;
406 int prior;
407 int (*func)(void *);
408 void *arg;
409 {
410 struct mvmebus_softc *sc;
411 int level, vector, first;
412
413 sc = vsc;
414
415 /* Extract the interrupt's level and vector */
416 level = ((int) handle) >> 8;
417 vector = ((int) handle) & 0xff;
418
419 #ifdef DIAGNOSTIC
420 if (vector < 0 || vector > 0xff) {
421 printf("%s: Illegal vector offset: 0x%x\n",
422 sc->sc_dev.dv_xname, vector);
423 panic("mvmebus_intr_establish");
424 }
425 if (level < 1 || level > 7) {
426 printf("%s: Illegal interrupt level: %d\n",
427 sc->sc_dev.dv_xname, level);
428 panic("mvmebus_intr_establish");
429 }
430 #endif
431
432 first = (sc->sc_irqref[level]++ == 0);
433
434 (*sc->sc_intr_establish)(sc->sc_chip, prior, level, vector, first,
435 func, arg, &sc->sc_evcnt[level - 1]);
436
437 return ((void *) handle);
438 }
439
440 void
441 mvmebus_intr_disestablish(vsc, handle)
442 void *vsc;
443 vme_intr_handle_t handle;
444 {
445 struct mvmebus_softc *sc;
446 int level, vector, last;
447
448 sc = vsc;
449
450 /* Extract the interrupt's level and vector */
451 level = ((int) handle) >> 8;
452 vector = ((int) handle) & 0xff;
453
454 #ifdef DIAGNOSTIC
455 if (vector < 0 || vector > 0xff) {
456 printf("%s: Illegal vector offset: 0x%x\n",
457 sc->sc_dev.dv_xname, vector);
458 panic("mvmebus_intr_disestablish");
459 }
460 if (level < 1 || level > 7) {
461 printf("%s: Illegal interrupt level: %d\n",
462 sc->sc_dev.dv_xname, level);
463 panic("mvmebus_intr_disestablish");
464 }
465 if (sc->sc_irqref[level] == 0) {
466 printf("%s: VMEirq#%d: Reference count already zero!\n",
467 sc->sc_dev.dv_xname, level);
468 panic("mvmebus_intr_disestablish");
469 }
470 #endif
471
472 last = (--(sc->sc_irqref[level]) == 0);
473
474 (*sc->sc_intr_disestablish)(sc->sc_chip, level, vector, last,
475 &sc->sc_evcnt[level - 1]);
476 }
477
478 #ifdef DIAGNOSTIC
479 /* ARGSUSED */
480 int
481 mvmebus_dummy_dmamap_create(t, size, nsegs, maxsegsz, boundary, flags, dmamp)
482 bus_dma_tag_t t;
483 bus_size_t size;
484 int nsegs;
485 bus_size_t maxsegsz;
486 bus_size_t boundary;
487 int flags;
488 bus_dmamap_t *dmamp;
489 {
490
491 panic("Must use vme_dmamap_create() in place of bus_dmamap_create()");
492 return (0); /* Shutup the compiler */
493 }
494
495 /* ARGSUSED */
496 void
497 mvmebus_dummy_dmamap_destroy(t, map)
498 bus_dma_tag_t t;
499 bus_dmamap_t map;
500 {
501
502 panic("Must use vme_dmamap_destroy() in place of bus_dmamap_destroy()");
503 }
504 #endif
505
506 /* ARGSUSED */
507 int
508 mvmebus_dmamap_create(vsc, len, am, datasize, swap, nsegs,
509 segsz, bound, flags, mapp)
510 void *vsc;
511 vme_size_t len;
512 vme_am_t am;
513 vme_datasize_t datasize;
514 vme_swap_t swap;
515 int nsegs;
516 vme_size_t segsz;
517 vme_addr_t bound;
518 int flags;
519 bus_dmamap_t *mapp;
520 {
521 struct mvmebus_softc *sc = vsc;
522 struct mvmebus_dmamap *vmap;
523 struct mvmebus_range *vr;
524 vme_am_t cap, as;
525 int i, rv;
526
527 cap = MVMEBUS_AM2CAP(am);
528 as = am & VME_AM_ADRSIZEMASK;
529
530 /*
531 * Verify that we even stand a chance of satisfying
532 * the VMEbus address space and datasize requested.
533 */
534 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
535 if (vr->vr_am == MVMEBUS_AM_DISABLED)
536 continue;
537
538 if (as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
539 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
540 len <= (vr->vr_vmeend - vr->vr_vmestart))
541 break;
542 }
543
544 if (i == sc->sc_nslaves)
545 return (EINVAL);
546
547 if ((vmap = malloc(sizeof(*vmap), M_DMAMAP,
548 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
549 return (ENOMEM);
550
551
552 rv = bus_dmamap_create(sc->sc_dmat, len, nsegs, segsz,
553 bound, flags, mapp);
554 if (rv != 0) {
555 free(vmap, M_DMAMAP);
556 return (rv);
557 }
558
559 vmap->vm_am = am;
560 vmap->vm_datasize = datasize;
561 vmap->vm_swap = swap;
562 vmap->vm_slave = vr;
563
564 (*mapp)->_dm_cookie = vmap;
565
566 return (0);
567 }
568
569 void
570 mvmebus_dmamap_destroy(vsc, map)
571 void *vsc;
572 bus_dmamap_t map;
573 {
574 struct mvmebus_softc *sc = vsc;
575
576 free(map->_dm_cookie, M_DMAMAP);
577 bus_dmamap_destroy(sc->sc_dmat, map);
578 }
579
580 static int
581 mvmebus_dmamap_load_common(sc, map)
582 struct mvmebus_softc *sc;
583 bus_dmamap_t map;
584 {
585 struct mvmebus_dmamap *vmap = map->_dm_cookie;
586 struct mvmebus_range *vr = vmap->vm_slave;
587 bus_dma_segment_t *ds;
588 vme_am_t cap, am;
589 int i;
590
591 cap = MVMEBUS_AM2CAP(vmap->vm_am);
592 am = vmap->vm_am & VME_AM_ADRSIZEMASK;
593
594 /*
595 * Traverse the list of segments which make up this map, and
596 * convert the cpu-relative addresses therein to VMEbus addresses.
597 */
598 for (ds = &map->dm_segs[0]; ds < &map->dm_segs[map->dm_nsegs]; ds++) {
599 /*
600 * First, see if this map's slave image can access the
601 * segment, otherwise we have to waste time scanning all
602 * the slave images.
603 */
604 vr = vmap->vm_slave;
605 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
606 cap == (vr->vr_am & cap) &&
607 vmap->vm_datasize <= vr->vr_datasize &&
608 ds->_ds_cpuaddr >= vr->vr_locstart &&
609 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
610 goto found;
611
612 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
613 if (vr->vr_am == MVMEBUS_AM_DISABLED)
614 continue;
615
616 /*
617 * Filter out any slave images which don't have the
618 * same VMEbus address modifier and datasize as
619 * this DMA map, and those which don't cover the
620 * physical address region containing the segment.
621 */
622 if (vr != vmap->vm_slave &&
623 am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
624 cap == (vr->vr_am & cap) &&
625 vmap->vm_datasize <= vr->vr_datasize &&
626 ds->_ds_cpuaddr >= vr->vr_locstart &&
627 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
628 break;
629 }
630
631 /*
632 * Did we find an applicable slave image which covers this
633 * segment?
634 */
635 if (i == sc->sc_nslaves) {
636 /*
637 * XXX TODO:
638 *
639 * Bounce this segment via a bounce buffer allocated
640 * from this DMA map.
641 */
642 printf("mvmebus_dmamap_load_common: bounce needed!\n");
643 return (EINVAL);
644 }
645
646 found:
647 /*
648 * Generate the VMEbus address of this segment
649 */
650 ds->ds_addr = (ds->_ds_cpuaddr - vr->vr_locstart) +
651 vr->vr_vmestart;
652 }
653
654 return (0);
655 }
656
657 int
658 mvmebus_dmamap_load(t, map, buf, buflen, p, flags)
659 bus_dma_tag_t t;
660 bus_dmamap_t map;
661 void *buf;
662 bus_size_t buflen;
663 struct proc *p;
664 int flags;
665 {
666 struct mvmebus_softc *sc = t->_cookie;
667 int rv;
668
669 rv = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags);
670 if (rv != 0)
671 return rv;
672
673 return mvmebus_dmamap_load_common(sc, map);
674 }
675
676 int
677 mvmebus_dmamap_load_mbuf(t, map, chain, flags)
678 bus_dma_tag_t t;
679 bus_dmamap_t map;
680 struct mbuf *chain;
681 int flags;
682 {
683 struct mvmebus_softc *sc = t->_cookie;
684 int rv;
685
686 rv = bus_dmamap_load_mbuf(sc->sc_dmat, map, chain, flags);
687 if (rv != 0)
688 return rv;
689
690 return mvmebus_dmamap_load_common(sc, map);
691 }
692
693 int
694 mvmebus_dmamap_load_uio(t, map, uio, flags)
695 bus_dma_tag_t t;
696 bus_dmamap_t map;
697 struct uio *uio;
698 int flags;
699 {
700 struct mvmebus_softc *sc = t->_cookie;
701 int rv;
702
703 rv = bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags);
704 if (rv != 0)
705 return rv;
706
707 return mvmebus_dmamap_load_common(sc, map);
708 }
709
710 int
711 mvmebus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
712 bus_dma_tag_t t;
713 bus_dmamap_t map;
714 bus_dma_segment_t *segs;
715 int nsegs;
716 bus_size_t size;
717 int flags;
718 {
719 struct mvmebus_softc *sc = t->_cookie;
720 int rv;
721
722 /*
723 * mvmebus_dmamem_alloc() will ensure that the physical memory
724 * backing these segments is 100% accessible in at least one
725 * of the board's VMEbus slave images.
726 */
727 rv = bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags);
728 if (rv != 0)
729 return rv;
730
731 return mvmebus_dmamap_load_common(sc, map);
732 }
733
734 void
735 mvmebus_dmamap_unload(t, map)
736 bus_dma_tag_t t;
737 bus_dmamap_t map;
738 {
739 struct mvmebus_softc *sc = t->_cookie;
740
741 /* XXX Deal with bounce buffers */
742
743 bus_dmamap_unload(sc->sc_dmat, map);
744 }
745
746 void
747 mvmebus_dmamap_sync(t, map, offset, len, ops)
748 bus_dma_tag_t t;
749 bus_dmamap_t map;
750 bus_addr_t offset;
751 bus_size_t len;
752 int ops;
753 {
754 struct mvmebus_softc *sc = t->_cookie;
755
756 /* XXX Bounce buffers */
757
758 bus_dmamap_sync(sc->sc_dmat, map, offset, len, ops);
759 }
760
761 #ifdef DIAGNOSTIC
762 /* ARGSUSED */
763 int
764 mvmebus_dummy_dmamem_alloc(t, size, align, boundary, segs, nsegs, rsegs, flags)
765 bus_dma_tag_t t;
766 bus_size_t size;
767 bus_size_t align;
768 bus_size_t boundary;
769 bus_dma_segment_t *segs;
770 int nsegs;
771 int *rsegs;
772 int flags;
773 {
774
775 panic("Must use vme_dmamem_alloc() in place of bus_dmamem_alloc()");
776 }
777
778 /* ARGSUSED */
779 void
780 mvmebus_dummy_dmamem_free(t, segs, nsegs)
781 bus_dma_tag_t t;
782 bus_dma_segment_t *segs;
783 int nsegs;
784 {
785
786 panic("Must use vme_dmamem_free() in place of bus_dmamem_free()");
787 }
788 #endif
789
790 /* ARGSUSED */
791 int
792 mvmebus_dmamem_alloc(vsc, len, am, datasize, swap, segs, nsegs, rsegs, flags)
793 void *vsc;
794 vme_size_t len;
795 vme_am_t am;
796 vme_datasize_t datasize;
797 vme_swap_t swap;
798 bus_dma_segment_t *segs;
799 int nsegs;
800 int *rsegs;
801 int flags;
802 {
803 extern paddr_t avail_start;
804 struct mvmebus_softc *sc = vsc;
805 struct mvmebus_range *vr;
806 bus_addr_t low, high;
807 bus_size_t bound;
808 vme_am_t cap;
809 int i;
810
811 cap = MVMEBUS_AM2CAP(am);
812 am &= VME_AM_ADRSIZEMASK;
813
814 /*
815 * Find a slave mapping in the requested VMEbus address space.
816 */
817 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
818 if (vr->vr_am == MVMEBUS_AM_DISABLED)
819 continue;
820
821 if (i == 0 && (flags & BUS_DMA_ONBOARD_RAM) != 0)
822 continue;
823
824 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
825 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
826 len <= (vr->vr_vmeend - vr->vr_vmestart))
827 break;
828 }
829 if (i == sc->sc_nslaves)
830 return (EINVAL);
831
832 /*
833 * Set up the constraints so we can allocate physical memory which
834 * is visible in the requested address space
835 */
836 low = max(vr->vr_locstart, avail_start);
837 high = vr->vr_locstart + (vr->vr_vmeend - vr->vr_vmestart) + 1;
838 bound = (bus_size_t) vr->vr_mask + 1;
839
840 /*
841 * Allocate physical memory.
842 *
843 * Note: This fills in the segments with cpu-relative physical
844 * addresses. A further call to bus_dmamap_load_raw() (with a
845 * dma map which specifies the same VMEbus address space and
846 * constraints as the call to here) must be made. The segments
847 * of the dma map will then contain VMEbus-relative physical
848 * addresses of the memory allocated here.
849 */
850 return _bus_dmamem_alloc_common(sc->sc_dmat, low, high,
851 len, 0, bound, segs, nsegs, rsegs, flags);
852 }
853
854 void
855 mvmebus_dmamem_free(vsc, segs, nsegs)
856 void *vsc;
857 bus_dma_segment_t *segs;
858 int nsegs;
859 {
860 struct mvmebus_softc *sc = vsc;
861
862 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
863 }
864
865 int
866 mvmebus_dmamem_map(t, segs, nsegs, size, kvap, flags)
867 bus_dma_tag_t t;
868 bus_dma_segment_t *segs;
869 int nsegs;
870 size_t size;
871 caddr_t *kvap;
872 int flags;
873 {
874 struct mvmebus_softc *sc = t->_cookie;
875
876 return bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags);
877 }
878
879 void
880 mvmebus_dmamem_unmap(t, kva, size)
881 bus_dma_tag_t t;
882 caddr_t kva;
883 size_t size;
884 {
885 struct mvmebus_softc *sc = t->_cookie;
886
887 bus_dmamem_unmap(sc->sc_dmat, kva, size);
888 }
889
890 paddr_t
891 mvmebus_dmamem_mmap(t, segs, nsegs, offset, prot, flags)
892 bus_dma_tag_t t;
893 bus_dma_segment_t *segs;
894 int nsegs;
895 off_t offset;
896 int prot;
897 int flags;
898 {
899 struct mvmebus_softc *sc = t->_cookie;
900
901 return bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, offset, prot, flags);
902 }
903
904 #ifdef DEBUG
905 static const char *
906 mvmebus_mod_string(addr, len, am, ds)
907 vme_addr_t addr;
908 vme_size_t len;
909 vme_am_t am;
910 vme_datasize_t ds;
911 {
912 static const char *mode[] = {"BLT64)", "DATA)", "PROG)", "BLT32)"};
913 static const char *dsiz[] = {"(", "(D8,", "(D16,", "(D16-D8,",
914 "(D32,", "(D32,D8,", "(D32-D16,", "(D32-D8,"};
915 static char mstring[40];
916 char *fmt;
917
918 switch (am & VME_AM_ADRSIZEMASK) {
919 case VME_AM_A32:
920 fmt = "A32:%08x-%08x ";
921 break;
922
923 case VME_AM_A24:
924 fmt = "A24:%06x-%06x ";
925 break;
926
927 case VME_AM_A16:
928 fmt = "A16:%04x-%04x ";
929 break;
930
931 case VME_AM_USERDEF:
932 fmt = "USR:%08x-%08x ";
933 break;
934 }
935
936 sprintf(mstring, fmt, addr, addr + len - 1);
937 strcat(mstring, dsiz[ds & 0x7]);
938
939 if (MVMEBUS_AM_HAS_CAP(am)) {
940 if (am & MVMEBUS_AM_CAP_DATA)
941 strcat(mstring, "D");
942 if (am & MVMEBUS_AM_CAP_PROG)
943 strcat(mstring, "P");
944 if (am & MVMEBUS_AM_CAP_USER)
945 strcat(mstring, "U");
946 if (am & MVMEBUS_AM_CAP_SUPER)
947 strcat(mstring, "S");
948 if (am & MVMEBUS_AM_CAP_BLK)
949 strcat(mstring, "B");
950 if (am & MVMEBUS_AM_CAP_BLKD64)
951 strcat(mstring, "6");
952 strcat(mstring, ")");
953 } else {
954 strcat(mstring, ((am & VME_AM_PRIVMASK) == VME_AM_USER) ?
955 "USER," : "SUPER,");
956 strcat(mstring, mode[am & VME_AM_MODEMASK]);
957 }
958
959 return (mstring);
960 }
961 #endif
962