maple.c revision 1.26 1 /* $NetBSD: maple.c,v 1.26 2003/07/15 01:31:39 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by ITOH Yasufumi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 2001 Marcus Comstedt
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Marcus Comstedt.
54 * 4. Neither the name of The NetBSD Foundation nor the names of its
55 * contributors may be used to endorse or promote products derived
56 * from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
59 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
60 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
62 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
65 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
66 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
67 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
68 * POSSIBILITY OF SUCH DAMAGE.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: maple.c,v 1.26 2003/07/15 01:31:39 lukem Exp $");
73
74 #include <sys/param.h>
75 #include <sys/device.h>
76 #include <sys/fcntl.h>
77 #include <sys/kernel.h>
78 #include <sys/kthread.h>
79 #include <sys/poll.h>
80 #include <sys/select.h>
81 #include <sys/proc.h>
82 #include <sys/signalvar.h>
83 #include <sys/systm.h>
84 #include <sys/conf.h>
85
86 #include <uvm/uvm_extern.h>
87
88 #include <machine/cpu.h>
89 #include <machine/bus.h>
90 #include <machine/sysasicvar.h>
91 #include <sh3/pmap.h>
92
93 #include <dreamcast/dev/maple/maple.h>
94 #include <dreamcast/dev/maple/mapleconf.h>
95 #include <dreamcast/dev/maple/maplevar.h>
96 #include <dreamcast/dev/maple/maplereg.h>
97 #include <dreamcast/dev/maple/mapleio.h>
98
99 #include "locators.h"
100
101 /* Internal macros, functions, and variables. */
102
103 #define MAPLE_CALLOUT_TICKS 2
104
105 #define MAPLEBUSUNIT(dev) (minor(dev)>>5)
106 #define MAPLEPORT(dev) ((minor(dev) & 0x18) >> 3)
107 #define MAPLESUBUNIT(dev) (minor(dev) & 0x7)
108
109 /* interrupt priority level */
110 #define IPL_MAPLE IPL_BIO
111 #define splmaple() splbio()
112
113 /*
114 * Function declarations.
115 */
116 static int maplematch(struct device *, struct cfdata *, void *);
117 static void mapleattach(struct device *, struct device *, void *);
118 static void maple_create_event_thread(void *);
119 static void maple_scanbus(struct maple_softc *);
120 static char * maple_unit_name(char *, int port, int subunit);
121 static void maple_begin_txbuf(struct maple_softc *);
122 static int maple_end_txbuf(struct maple_softc *);
123 static void maple_queue_command(struct maple_softc *, struct maple_unit *,
124 int command, int datalen, const void *dataaddr);
125 static void maple_write_command(struct maple_softc *, struct maple_unit *,
126 int, int, const void *);
127 static void maple_start(struct maple_softc *sc);
128 static void maple_start_poll(struct maple_softc *);
129 static void maple_check_subunit_change(struct maple_softc *,
130 struct maple_unit *);
131 static void maple_check_unit_change(struct maple_softc *,
132 struct maple_unit *);
133 static void maple_print_unit(void *, const char *);
134 static int maplesubmatch(struct device *, struct cfdata *, void *);
135 static int mapleprint(void *, const char *);
136 static void maple_attach_unit(struct maple_softc *, struct maple_unit *);
137 static void maple_detach_unit_nofix(struct maple_softc *,
138 struct maple_unit *);
139 static void maple_detach_unit(struct maple_softc *, struct maple_unit *);
140 static void maple_queue_cmds(struct maple_softc *,
141 struct maple_cmdq_head *);
142 static void maple_unit_probe(struct maple_softc *);
143 static void maple_unit_ping(struct maple_softc *);
144 static int maple_send_defered_periodic(struct maple_softc *);
145 static void maple_send_periodic(struct maple_softc *);
146 static void maple_remove_from_queues(struct maple_softc *,
147 struct maple_unit *);
148 static int maple_retry(struct maple_softc *, struct maple_unit *,
149 enum maple_dma_stat);
150 static void maple_queue_retry(struct maple_softc *);
151 static void maple_check_responses(struct maple_softc *);
152 static void maple_event_thread(void *);
153 static int maple_intr(void *);
154 static void maple_callout(void *);
155
156 int maple_alloc_dma(size_t, vaddr_t *, paddr_t *);
157 #if 0
158 void maple_free_dma(paddr_t, size_t);
159 #endif
160
161 /*
162 * Global variables.
163 */
164 int maple_polling; /* Are we polling? (Debugger mode) */
165
166 CFATTACH_DECL(maple, sizeof(struct maple_softc),
167 maplematch, mapleattach, NULL, NULL);
168
169 extern struct cfdriver maple_cd;
170
171 dev_type_open(mapleopen);
172 dev_type_close(mapleclose);
173 dev_type_ioctl(mapleioctl);
174
175 const struct cdevsw maple_cdevsw = {
176 mapleopen, mapleclose, noread, nowrite, mapleioctl,
177 nostop, notty, nopoll, nommap, nokqfilter,
178 };
179
180 static int
181 maplematch(struct device *parent, struct cfdata *cf, void *aux)
182 {
183
184 return (1);
185 }
186
187 static void
188 mapleattach(struct device *parent, struct device *self, void *aux)
189 {
190 struct maple_softc *sc;
191 struct maple_unit *u;
192 vaddr_t dmabuffer;
193 paddr_t dmabuffer_phys;
194 u_int32_t *p;
195 int port, subunit, f;
196
197 sc = (struct maple_softc *)self;
198
199 printf(": %s\n", sysasic_intr_string(IPL_MAPLE));
200
201 if (maple_alloc_dma(MAPLE_DMABUF_SIZE, &dmabuffer, &dmabuffer_phys)) {
202 printf("%s: unable to allocate DMA buffers.\n",
203 sc->sc_dev.dv_xname);
204 return;
205 }
206
207 p = (u_int32_t *)dmabuffer;
208
209 for (port = 0; port < MAPLE_PORTS; port++)
210 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++) {
211 u = &sc->sc_unit[port][subunit];
212 u->port = port;
213 u->subunit = subunit;
214 u->u_dma_stat = MAPLE_DMA_IDLE;
215 u->u_rxbuf = p;
216 u->u_rxbuf_phys = SH3_P2SEG_TO_PHYS(p);
217 p += 256;
218
219 for (f = 0; f < MAPLE_NFUNC; f++) {
220 u->u_func[f].f_funcno = f;
221 u->u_func[f].f_unit = u;
222 }
223 }
224
225 sc->sc_txbuf = p;
226 sc->sc_txbuf_phys = SH3_P2SEG_TO_PHYS(p);
227
228 SIMPLEQ_INIT(&sc->sc_retryq);
229 TAILQ_INIT(&sc->sc_probeq);
230 TAILQ_INIT(&sc->sc_pingq);
231 TAILQ_INIT(&sc->sc_periodicq);
232 TAILQ_INIT(&sc->sc_periodicdeferq);
233 TAILQ_INIT(&sc->sc_acmdq);
234 TAILQ_INIT(&sc->sc_pcmdq);
235
236 MAPLE_RESET = RESET_MAGIC;
237 MAPLE_RESET2 = 0;
238
239 MAPLE_SPEED = SPEED_2MBPS | TIMEOUT(50000);
240
241 MAPLE_ENABLE = 1;
242
243 maple_polling = 1;
244 maple_scanbus(sc);
245
246 callout_init(&sc->maple_callout_ch);
247
248 sc->sc_intrhand = sysasic_intr_establish(SYSASIC_EVENT_MAPLE_DMADONE,
249 IPL_MAPLE, maple_intr, sc);
250
251 config_pending_incr(); /* create thread before mounting root */
252 kthread_create(maple_create_event_thread, sc);
253 }
254
255 static void
256 maple_create_event_thread(void *arg)
257 {
258 struct maple_softc *sc = arg;
259
260 if (kthread_create1(maple_event_thread, sc, &sc->event_thread,
261 "%s", sc->sc_dev.dv_xname) == 0)
262 return;
263
264 panic("%s: unable to create event thread", sc->sc_dev.dv_xname);
265 }
266
267 /*
268 * initial device attach
269 */
270 static void
271 maple_scanbus(struct maple_softc *sc)
272 {
273 struct maple_unit *u;
274 int port;
275 int last_port, last_subunit;
276 int i;
277
278 KASSERT(cold && maple_polling);
279
280 /* probe all ports */
281 for (port = 0; port < MAPLE_PORTS; port++) {
282 u = &sc->sc_unit[port][0];
283 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
284 {
285 char buf[16];
286 printf("%s: queued to probe 1\n",
287 maple_unit_name(buf, u->port, u->subunit));
288 }
289 #endif
290 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q);
291 u->u_queuestat = MAPLE_QUEUE_PROBE;
292 }
293
294 last_port = last_subunit = -1;
295 maple_begin_txbuf(sc);
296 while ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) {
297 /*
298 * Check wrap condition
299 */
300 if (u->port < last_port || u->subunit <= last_subunit)
301 break;
302 last_port = u->port;
303 if (u->port == MAPLE_PORTS - 1)
304 last_subunit = u->subunit;
305
306 maple_unit_probe(sc);
307 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) {
308 maple_start_poll(sc);
309 maple_check_responses(sc);
310 if (i == 0)
311 break;
312 /* attach may issue cmds */
313 maple_queue_cmds(sc, &sc->sc_acmdq);
314 }
315 }
316 }
317
318 void
319 maple_run_polling(struct device *dev)
320 {
321 struct maple_softc *sc;
322 int port, subunit;
323 int i;
324
325 sc = (struct maple_softc *)dev;
326
327 /*
328 * first, make sure polling works
329 */
330 while (MAPLE_STATE != 0) /* XXX may lost a DMA cycle */
331 ;
332
333 /* XXX this will break internal state */
334 for (port = 0; port < MAPLE_PORTS; port++)
335 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++)
336 sc->sc_unit[port][subunit].u_dma_stat = MAPLE_DMA_IDLE;
337 SIMPLEQ_INIT(&sc->sc_retryq); /* XXX discard current retrys */
338
339 /*
340 * do polling (periodic status check only)
341 */
342 maple_begin_txbuf(sc);
343 maple_send_defered_periodic(sc);
344 maple_send_periodic(sc);
345 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) {
346 maple_start_poll(sc);
347 maple_check_responses(sc);
348 if (i == 0)
349 break;
350
351 /* maple_check_responses() has executed maple_begin_txbuf() */
352 maple_queue_retry(sc);
353 maple_send_defered_periodic(sc);
354 }
355 }
356
357 static char *
358 maple_unit_name(char *buf, int port, int subunit)
359 {
360
361 sprintf(buf, "maple%c", port + 'A');
362 if (subunit)
363 sprintf(buf+6, "%d", subunit);
364
365 return buf;
366 }
367
368 int
369 maple_alloc_dma(size_t size, vaddr_t *vap, paddr_t *pap)
370 {
371 extern paddr_t avail_start, avail_end; /* from pmap.c */
372 struct pglist mlist;
373 struct vm_page *m;
374 int error;
375
376 size = round_page(size);
377
378 error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
379 0, 0, &mlist, 1, 0);
380 if (error)
381 return (error);
382
383 m = TAILQ_FIRST(&mlist);
384 *pap = VM_PAGE_TO_PHYS(m);
385 *vap = SH3_PHYS_TO_P2SEG(VM_PAGE_TO_PHYS(m));
386
387 return (0);
388 }
389
390 #if 0 /* currently unused */
391 void
392 maple_free_dma(paddr_t paddr, size_t size)
393 {
394 struct pglist mlist;
395 struct vm_page *m;
396 bus_addr_t addr;
397
398 TAILQ_INIT(&mlist);
399 for (addr = paddr; addr < paddr + size; addr += PAGE_SIZE) {
400 m = PHYS_TO_VM_PAGE(addr);
401 TAILQ_INSERT_TAIL(&mlist, m, pageq);
402 }
403 uvm_pglistfree(&mlist);
404 }
405 #endif
406
407 static void
408 maple_begin_txbuf(struct maple_softc *sc)
409 {
410
411 sc->sc_txlink = sc->sc_txpos = sc->sc_txbuf;
412 SIMPLEQ_INIT(&sc->sc_dmaq);
413 }
414
415 static int
416 maple_end_txbuf(struct maple_softc *sc)
417 {
418
419 /* if no frame have been written, we can't mark the
420 list end, and so the DMA must not be activated */
421 if (sc->sc_txpos == sc->sc_txbuf)
422 return (0);
423
424 *sc->sc_txlink |= 0x80000000;
425
426 return (1);
427 }
428
429 static const int8_t subunit_code[] = { 0x20, 0x01, 0x02, 0x04, 0x08, 0x10 };
430
431 static void
432 maple_queue_command(struct maple_softc *sc, struct maple_unit *u,
433 int command, int datalen, const void *dataaddr)
434 {
435 int to, from;
436 u_int32_t *p = sc->sc_txpos;
437
438 /* Max data length = 255 longs = 1020 bytes */
439 KASSERT(datalen >= 0 && datalen <= 255);
440
441 /* Compute sender and recipient address */
442 from = u->port << 6;
443 to = from | subunit_code[u->subunit];
444
445 sc->sc_txlink = p;
446
447 /* Set length of packet and destination port (A-D) */
448 *p++ = datalen | (u->port << 16);
449
450 /* Write address to receive buffer where the response
451 frame should be put */
452 *p++ = u->u_rxbuf_phys;
453
454 /* Create the frame header. The fields are assembled "backwards"
455 because of the Maple Bus big-endianness. */
456 *p++ = (command & 0xff) | (to << 8) | (from << 16) | (datalen << 24);
457
458 /* Copy parameter data, if any */
459 if (datalen > 0) {
460 const u_int32_t *param = dataaddr;
461 int i;
462 for (i = 0; i < datalen; i++)
463 *p++ = *param++;
464 }
465
466 sc->sc_txpos = p;
467
468 SIMPLEQ_INSERT_TAIL(&sc->sc_dmaq, u, u_dmaq);
469 }
470
471 static void
472 maple_write_command(struct maple_softc *sc, struct maple_unit *u, int command,
473 int datalen, const void *dataaddr)
474 {
475 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
476 char buf[16];
477
478 if (u->u_retrycnt)
479 printf("%s: retrycnt %d\n",
480 maple_unit_name(buf, u->port, u->subunit), u->u_retrycnt);
481 #endif
482 u->u_retrycnt = 0;
483 u->u_command = command;
484 u->u_datalen = datalen;
485 u->u_dataaddr = dataaddr;
486
487 maple_queue_command(sc, u, command, datalen, dataaddr);
488 }
489
490 /* start DMA */
491 static void
492 maple_start(struct maple_softc *sc)
493 {
494
495 MAPLE_DMAADDR = sc->sc_txbuf_phys;
496 MAPLE_STATE = 1;
497 }
498
499 /* start DMA -- wait until DMA done */
500 static void
501 maple_start_poll(struct maple_softc *sc)
502 {
503
504 MAPLE_DMAADDR = sc->sc_txbuf_phys;
505 MAPLE_STATE = 1;
506 while (MAPLE_STATE != 0)
507 ;
508 }
509
510 static void
511 maple_check_subunit_change(struct maple_softc *sc, struct maple_unit *u)
512 {
513 struct maple_unit *u1;
514 int port;
515 int8_t unit_map;
516 int units, un;
517 int i;
518
519 KASSERT(u->subunit == 0);
520
521 port = u->port;
522 unit_map = ((int8_t *) u->u_rxbuf)[2];
523 if (sc->sc_port_unit_map[port] == unit_map)
524 return;
525
526 units = ((unit_map & 0x1f) << 1) | 1;
527 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
528 {
529 char buf[16];
530 printf("%s: unit_map 0x%x -> 0x%x (units 0x%x)\n",
531 maple_unit_name(buf, u->port, u->subunit),
532 sc->sc_port_unit_map[port], unit_map, units);
533 }
534 #endif
535 #if 0 /* this detects unit removal rapidly but is not reliable */
536 /* check for unit change */
537 un = sc->sc_port_units[port] & ~units;
538
539 /* detach removed devices */
540 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
541 if (un & (1 << i))
542 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]);
543 #endif
544
545 sc->sc_port_unit_map[port] = unit_map;
546
547 /* schedule scanning child devices */
548 un = units & ~sc->sc_port_units[port];
549 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
550 if (un & (1 << i)) {
551 u1 = &sc->sc_unit[port][i];
552 maple_remove_from_queues(sc, u1);
553 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
554 {
555 char buf[16];
556 printf("%s: queued to probe 2\n",
557 maple_unit_name(buf, u1->port, u1->subunit));
558 }
559 #endif
560 TAILQ_INSERT_HEAD(&sc->sc_probeq, u1, u_q);
561 u1->u_queuestat = MAPLE_QUEUE_PROBE;
562 u1->u_proberetry = 0;
563 }
564 }
565
566 static void
567 maple_check_unit_change(struct maple_softc *sc, struct maple_unit *u)
568 {
569 struct maple_devinfo *newinfo = (void *) (u->u_rxbuf + 1);
570 int port, subunit;
571
572 port = u->port;
573 subunit = u->subunit;
574 if (memcmp(&u->devinfo, newinfo, sizeof(struct maple_devinfo)) == 0)
575 goto out; /* no change */
576
577 /* unit inserted */
578
579 /* attach this device */
580 u->devinfo = *newinfo;
581 maple_attach_unit(sc, u);
582
583 out:
584 maple_remove_from_queues(sc, u);
585 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
586 {
587 char buf[16];
588 printf("%s: queued to ping\n",
589 maple_unit_name(buf, u->port, u->subunit));
590 }
591 #endif
592 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
593 u->u_queuestat = MAPLE_QUEUE_PING;
594 }
595
596 static void
597 maple_print_unit(void *aux, const char *pnp)
598 {
599 struct maple_attach_args *ma = aux;
600 int port, subunit;
601 char buf[16];
602 char *prod, *p, oc;
603
604 port = ma->ma_unit->port;
605 subunit = ma->ma_unit->subunit;
606
607 if (pnp != NULL)
608 printf("%s at %s", maple_unit_name(buf, port, subunit), pnp);
609
610 printf(" port %d", port);
611
612 if (subunit != 0)
613 printf(" subunit %d", subunit);
614
615 #ifdef MAPLE_DEBUG
616 printf(": a %#x c %#x fn %#x d %#x,%#x,%#x",
617 ma->ma_devinfo->di_area_code,
618 ma->ma_devinfo->di_connector_direction,
619 ntohl(ma->ma_devinfo->di_func),
620 ntohl(ma->ma_devinfo->di_function_data[0]),
621 ntohl(ma->ma_devinfo->di_function_data[1]),
622 ntohl(ma->ma_devinfo->di_function_data[2]));
623 #endif
624
625 /* nul termination */
626 prod = ma->ma_devinfo->di_product_name;
627 for (p = prod + sizeof ma->ma_devinfo->di_product_name; p >= prod; p--)
628 if (p[-1] != '\0' && p[-1] != ' ')
629 break;
630 oc = *p;
631 *p = '\0';
632
633 printf(": %s", prod);
634
635 *p = oc; /* restore */
636 }
637
638 static int
639 maplesubmatch(struct device *parent, struct cfdata *match, void *aux)
640 {
641 struct maple_attach_args *ma = aux;
642
643 if (match->cf_loc[MAPLECF_PORT] != MAPLECF_PORT_DEFAULT &&
644 match->cf_loc[MAPLECF_PORT] != ma->ma_unit->port)
645 return (0);
646
647 if (match->cf_loc[MAPLECF_SUBUNIT] != MAPLECF_SUBUNIT_DEFAULT &&
648 match->cf_loc[MAPLECF_SUBUNIT] != ma->ma_unit->subunit)
649 return (0);
650
651 return (config_match(parent, match, aux));
652 }
653
654 static int
655 mapleprint(void *aux, const char *str)
656 {
657 struct maple_attach_args *ma = aux;
658
659 #ifdef MAPLE_DEBUG
660 if (str)
661 aprint_normal("%s", str);
662 aprint_normal(" function %d", ma->ma_function);
663
664 return UNCONF;
665 #else /* quiet */
666 if (!str)
667 aprint_normal(" function %d", ma->ma_function);
668
669 return QUIET;
670 #endif
671 }
672
673 static void
674 maple_attach_unit(struct maple_softc *sc, struct maple_unit *u)
675 {
676 struct maple_attach_args ma;
677 u_int32_t func;
678 int f;
679 char oldxname[16];
680
681 ma.ma_unit = u;
682 ma.ma_devinfo = &u->devinfo;
683 ma.ma_basedevinfo = &sc->sc_unit[u->port][0].devinfo;
684 func = ntohl(ma.ma_devinfo->di_func);
685
686 maple_print_unit(&ma, sc->sc_dev.dv_xname);
687 printf("\n");
688 strcpy(oldxname, sc->sc_dev.dv_xname);
689 maple_unit_name(sc->sc_dev.dv_xname, u->port, u->subunit);
690
691 for (f = 0; f < MAPLE_NFUNC; f++) {
692 u->u_func[f].f_callback = NULL;
693 u->u_func[f].f_arg = NULL;
694 u->u_func[f].f_cmdstat = MAPLE_CMDSTAT_NONE;
695 u->u_func[f].f_dev = NULL;
696 if (func & MAPLE_FUNC(f)) {
697 ma.ma_function = f;
698 u->u_func[f].f_dev = config_found_sm(&sc->sc_dev, &ma,
699 mapleprint, maplesubmatch);
700 u->u_ping_func = f; /* XXX using largest func */
701 }
702 }
703 #ifdef MAPLE_MEMCARD_PING_HACK
704 /*
705 * Some 3rd party memory card pretend to be Visual Memory,
706 * but need special handling for ping.
707 */
708 if (func == (MAPLE_FUNC(MAPLE_FN_MEMCARD) | MAPLE_FUNC(MAPLE_FN_LCD) |
709 MAPLE_FUNC(MAPLE_FN_CLOCK))) {
710 u->u_ping_func = MAPLE_FN_MEMCARD;
711 u->u_ping_stat = MAPLE_PING_MEMCARD;
712 } else {
713 u->u_ping_stat = MAPLE_PING_NORMAL;
714 }
715 #endif
716 strcpy(sc->sc_dev.dv_xname, oldxname);
717
718 sc->sc_port_units[u->port] |= 1 << u->subunit;
719 }
720
721 static void
722 maple_detach_unit_nofix(struct maple_softc *sc, struct maple_unit *u)
723 {
724 struct maple_func *fn;
725 struct device *dev;
726 struct maple_unit *u1;
727 int port;
728 int error;
729 int i;
730 char buf[16];
731
732 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
733 printf("%s: remove\n", maple_unit_name(buf, u->port, u->subunit));
734 #endif
735 maple_remove_from_queues(sc, u);
736 port = u->port;
737 sc->sc_port_units[port] &= ~(1 << u->subunit);
738
739 if (u->subunit == 0) {
740 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
741 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]);
742 }
743
744 for (fn = u->u_func; fn < &u->u_func[MAPLE_NFUNC]; fn++) {
745 if ((dev = fn->f_dev) != NULL) {
746 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
747 printf("%s: detaching func %d\n",
748 maple_unit_name(buf, port, u->subunit),
749 fn->f_funcno);
750 #endif
751
752 /*
753 * Remove functions from command queue.
754 */
755 switch (fn->f_cmdstat) {
756 case MAPLE_CMDSTAT_ASYNC:
757 case MAPLE_CMDSTAT_PERIODIC_DEFERED:
758 TAILQ_REMOVE(&sc->sc_acmdq, fn, f_cmdq);
759 break;
760 case MAPLE_CMDSTAT_ASYNC_PERIODICQ:
761 case MAPLE_CMDSTAT_PERIODIC:
762 TAILQ_REMOVE(&sc->sc_pcmdq, fn, f_cmdq);
763 break;
764 default:
765 break;
766 }
767
768 /*
769 * Detach devices.
770 */
771 if ((error = config_detach(fn->f_dev, DETACH_FORCE))) {
772 printf("%s: failed to detach %s (func %d), errno %d\n",
773 maple_unit_name(buf, port, u->subunit),
774 fn->f_dev->dv_xname, fn->f_funcno, error);
775 }
776 }
777
778 maple_enable_periodic(&sc->sc_dev, u, fn->f_funcno, 0);
779
780 fn->f_dev = NULL;
781 fn->f_callback = NULL;
782 fn->f_arg = NULL;
783 fn->f_cmdstat = MAPLE_CMDSTAT_NONE;
784 }
785 if (u->u_dma_stat == MAPLE_DMA_RETRY) {
786 /* XXX expensive? */
787 SIMPLEQ_FOREACH(u1, &sc->sc_retryq, u_dmaq) {
788 if (u1 == u) {
789 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
790 printf("%s: abort retry\n",
791 maple_unit_name(buf, port, u->subunit));
792 #endif
793 SIMPLEQ_REMOVE(&sc->sc_retryq, u, maple_unit,
794 u_dmaq);
795 break;
796 }
797 }
798 }
799 u->u_dma_stat = MAPLE_DMA_IDLE;
800 u->u_noping = 0;
801 /* u->u_dma_func = uninitialized; */
802 KASSERT(u->getcond_func_set == 0);
803 memset(&u->devinfo, 0, sizeof(struct maple_devinfo));
804
805 if (u->subunit == 0) {
806 sc->sc_port_unit_map[port] = 0;
807 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
808 {
809 char buf[16];
810 printf("%s: queued to probe 3\n",
811 maple_unit_name(buf, port, u->subunit));
812 }
813 #endif
814 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q);
815 u->u_queuestat = MAPLE_QUEUE_PROBE;
816 }
817 }
818
819 static void
820 maple_detach_unit(struct maple_softc *sc, struct maple_unit *u)
821 {
822
823 maple_detach_unit_nofix(sc, u);
824 if (u->subunit != 0)
825 sc->sc_port_unit_map[u->port] &= ~(1 << (u->subunit - 1));
826 }
827
828 /*
829 * Send a command (called by drivers)
830 *
831 * The "cataaddr" must not point at temporary storage like stack.
832 * Only one command (per function) is valid at a time.
833 */
834 void
835 maple_command(struct device *dev, struct maple_unit *u, int func,
836 int command, int datalen, const void *dataaddr, int flags)
837 {
838 struct maple_softc *sc = (void *) dev;
839 struct maple_func *fn;
840 int s;
841
842 KASSERT(func >= 0 && func < 32);
843 KASSERT(command);
844 KASSERT((flags & ~MAPLE_FLAG_CMD_PERIODIC_TIMING) == 0);
845
846 s = splsoftclock();
847
848 fn = &u->u_func[func];
849 #if 1 /*def DIAGNOSTIC*/
850 {char buf[16];
851 if (fn->f_cmdstat != MAPLE_CMDSTAT_NONE)
852 panic("maple_command: %s func %d: requesting more than one commands",
853 maple_unit_name(buf, u->port, u->subunit), func);
854 }
855 #endif
856 fn->f_command = command;
857 fn->f_datalen = datalen;
858 fn->f_dataaddr = dataaddr;
859 if (flags & MAPLE_FLAG_CMD_PERIODIC_TIMING) {
860 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC;
861 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq);
862 } else {
863 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC;
864 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq);
865 wakeup(&sc->sc_event); /* wake for async event */
866 }
867 splx(s);
868 }
869
870 static void
871 maple_queue_cmds(struct maple_softc *sc,
872 struct maple_cmdq_head *head)
873 {
874 struct maple_func *fn, *nextfn;
875 struct maple_unit *u;
876
877 /*
878 * Note: since the queue element may be queued immediately,
879 * we can't use TAILQ_FOREACH.
880 */
881 fn = TAILQ_FIRST(head);
882 TAILQ_INIT(head);
883 for ( ; fn; fn = nextfn) {
884 nextfn = TAILQ_NEXT(fn, f_cmdq);
885
886 KASSERT(fn->f_cmdstat != MAPLE_CMDSTAT_NONE);
887 u = fn->f_unit;
888 if (u->u_dma_stat == MAPLE_DMA_IDLE) {
889 maple_write_command(sc, u,
890 fn->f_command, fn->f_datalen, fn->f_dataaddr);
891 u->u_dma_stat = (fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC ||
892 fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC_PERIODICQ) ?
893 MAPLE_DMA_ACMD : MAPLE_DMA_PCMD;
894 u->u_dma_func = fn->f_funcno;
895 fn->f_cmdstat = MAPLE_CMDSTAT_NONE;
896 } else if (u->u_dma_stat == MAPLE_DMA_RETRY) {
897 /* unit is busy --- try again */
898 /*
899 * always add to periodic command queue
900 * (wait until the next periodic timing),
901 * since the unit will never be freed until the
902 * next periodic timing.
903 */
904 switch (fn->f_cmdstat) {
905 case MAPLE_CMDSTAT_ASYNC:
906 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC_PERIODICQ;
907 break;
908 case MAPLE_CMDSTAT_PERIODIC_DEFERED:
909 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC;
910 break;
911 default:
912 break;
913 }
914 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq);
915 } else {
916 /* unit is busy --- try again */
917 /*
918 * always add to async command queue
919 * (process immediately)
920 */
921 switch (fn->f_cmdstat) {
922 case MAPLE_CMDSTAT_ASYNC_PERIODICQ:
923 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC;
924 break;
925 case MAPLE_CMDSTAT_PERIODIC:
926 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC_DEFERED;
927 break;
928 default:
929 break;
930 }
931 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq);
932 }
933 }
934 }
935
936 /* schedule probing a device */
937 static void
938 maple_unit_probe(struct maple_softc *sc)
939 {
940 struct maple_unit *u;
941
942 if ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) {
943 KASSERT(u->u_dma_stat == MAPLE_DMA_IDLE);
944 KASSERT(u->u_queuestat == MAPLE_QUEUE_PROBE);
945 maple_remove_from_queues(sc, u);
946 maple_write_command(sc, u, MAPLE_COMMAND_DEVINFO, 0, NULL);
947 u->u_dma_stat = MAPLE_DMA_PROBE;
948 /* u->u_dma_func = ignored; */
949 }
950 }
951
952 /*
953 * Enable/disable unit pinging (called by drivers)
954 */
955 /* ARGSUSED */
956 void
957 maple_enable_unit_ping(struct device *dev, struct maple_unit *u,
958 int func, int enable)
959 {
960 #if 0 /* currently unused */
961 struct maple_softc *sc = (void *) dev;
962 #endif
963
964 if (enable)
965 u->u_noping &= ~MAPLE_FUNC(func);
966 else
967 u->u_noping |= MAPLE_FUNC(func);
968 }
969
970 /* schedule pinging a device */
971 static void
972 maple_unit_ping(struct maple_softc *sc)
973 {
974 struct maple_unit *u;
975 struct maple_func *fn;
976 #ifdef MAPLE_MEMCARD_PING_HACK
977 static const u_int32_t memcard_ping_arg[2] = {
978 0x02000000, /* htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD)) */
979 0 /* pt (1 byte) and unused 3 bytes */
980 };
981 #endif
982
983 if ((u = TAILQ_FIRST(&sc->sc_pingq)) != NULL) {
984 KASSERT(u->u_queuestat == MAPLE_QUEUE_PING);
985 maple_remove_from_queues(sc, u);
986 if (u->u_dma_stat == MAPLE_DMA_IDLE && u->u_noping == 0) {
987 #ifdef MAPLE_MEMCARD_PING_HACK
988 if (u->u_ping_stat == MAPLE_PING_MINFO) {
989 /* use MINFO for some memory cards */
990 maple_write_command(sc, u,
991 MAPLE_COMMAND_GETMINFO,
992 2, memcard_ping_arg);
993 } else
994 #endif
995 {
996 fn = &u->u_func[u->u_ping_func];
997 fn->f_work = htonl(MAPLE_FUNC(u->u_ping_func));
998 maple_write_command(sc, u,
999 MAPLE_COMMAND_GETCOND,
1000 1, &fn->f_work);
1001 }
1002 u->u_dma_stat = MAPLE_DMA_PING;
1003 /* u->u_dma_func = XXX; */
1004 } else {
1005 /* no need if periodic */
1006 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
1007 u->u_queuestat = MAPLE_QUEUE_PING;
1008 }
1009 }
1010 }
1011
1012 /*
1013 * Enable/disable periodic GETCOND (called by drivers)
1014 */
1015 void
1016 maple_enable_periodic(struct device *dev, struct maple_unit *u,
1017 int func, int on)
1018 {
1019 struct maple_softc *sc = (void *) dev;
1020 struct maple_func *fn;
1021
1022 KASSERT(func >= 0 && func < 32);
1023
1024 fn = &u->u_func[func];
1025
1026 if (on) {
1027 if (fn->f_periodic_stat == MAPLE_PERIODIC_NONE) {
1028 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq);
1029 fn->f_periodic_stat = MAPLE_PERIODIC_INQ;
1030 u->getcond_func_set |= MAPLE_FUNC(func);
1031 }
1032 } else {
1033 if (fn->f_periodic_stat == MAPLE_PERIODIC_INQ)
1034 TAILQ_REMOVE(&sc->sc_periodicq, fn, f_periodicq);
1035 else if (fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED)
1036 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq);
1037 fn->f_periodic_stat = MAPLE_PERIODIC_NONE;
1038 u->getcond_func_set &= ~MAPLE_FUNC(func);
1039 }
1040 }
1041
1042 /*
1043 * queue periodic GETCOND
1044 */
1045 static int
1046 maple_send_defered_periodic(struct maple_softc *sc)
1047 {
1048 struct maple_unit *u;
1049 struct maple_func *fn, *nextfn;
1050 int defer_remain = 0;
1051
1052 for (fn = TAILQ_FIRST(&sc->sc_periodicdeferq); fn; fn = nextfn) {
1053 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED);
1054
1055 nextfn = TAILQ_NEXT(fn, f_periodicq);
1056
1057 u = fn->f_unit;
1058 if (u->u_dma_stat == MAPLE_DMA_IDLE ||
1059 u->u_dma_stat == MAPLE_DMA_RETRY) {
1060 /*
1061 * if IDLE -> queue this request
1062 * if RETRY -> the unit never be freed until the next
1063 * periodic timing, so just restore to
1064 * the normal periodic queue.
1065 */
1066 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq);
1067 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq);
1068 fn->f_periodic_stat = MAPLE_PERIODIC_INQ;
1069
1070 if (u->u_dma_stat == MAPLE_DMA_IDLE) {
1071 /*
1072 * queue periodic command
1073 */
1074 fn->f_work = htonl(MAPLE_FUNC(fn->f_funcno));
1075 maple_write_command(sc, u,
1076 MAPLE_COMMAND_GETCOND, 1, &fn->f_work);
1077 u->u_dma_stat = MAPLE_DMA_PERIODIC;
1078 u->u_dma_func = fn->f_funcno;
1079 }
1080 } else {
1081 defer_remain = 1;
1082 }
1083 }
1084
1085 return defer_remain;
1086 }
1087
1088 static void
1089 maple_send_periodic(struct maple_softc *sc)
1090 {
1091 struct maple_unit *u;
1092 struct maple_func *fn, *nextfn;
1093
1094 for (fn = TAILQ_FIRST(&sc->sc_periodicq); fn; fn = nextfn) {
1095 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_INQ);
1096
1097 nextfn = TAILQ_NEXT(fn, f_periodicq);
1098
1099 u = fn->f_unit;
1100 if (u->u_dma_stat != MAPLE_DMA_IDLE) {
1101 if (u->u_dma_stat != MAPLE_DMA_RETRY) {
1102 /*
1103 * can't be queued --- move to defered queue
1104 */
1105 TAILQ_REMOVE(&sc->sc_periodicq, fn,
1106 f_periodicq);
1107 TAILQ_INSERT_TAIL(&sc->sc_periodicdeferq, fn,
1108 f_periodicq);
1109 fn->f_periodic_stat = MAPLE_PERIODIC_DEFERED;
1110 }
1111 } else {
1112 /*
1113 * queue periodic command
1114 */
1115 fn->f_work = htonl(MAPLE_FUNC(fn->f_funcno));
1116 maple_write_command(sc, u, MAPLE_COMMAND_GETCOND,
1117 1, &fn->f_work);
1118 u->u_dma_stat = MAPLE_DMA_PERIODIC;
1119 u->u_dma_func = fn->f_funcno;
1120 }
1121 }
1122 }
1123
1124 static void
1125 maple_remove_from_queues(struct maple_softc *sc, struct maple_unit *u)
1126 {
1127
1128 /* remove from queues */
1129 if (u->u_queuestat == MAPLE_QUEUE_PROBE)
1130 TAILQ_REMOVE(&sc->sc_probeq, u, u_q);
1131 else if (u->u_queuestat == MAPLE_QUEUE_PING)
1132 TAILQ_REMOVE(&sc->sc_pingq, u, u_q);
1133 #ifdef DIAGNOSTIC
1134 else if (u->u_queuestat != MAPLE_QUEUE_NONE)
1135 panic("maple_remove_from_queues: queuestat %d", u->u_queuestat);
1136 #endif
1137 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1138 if (u->u_queuestat != MAPLE_QUEUE_NONE) {
1139 char buf[16];
1140 printf("%s: dequeued\n",
1141 maple_unit_name(buf, u->port, u->subunit));
1142 }
1143 #endif
1144
1145 u->u_queuestat = MAPLE_QUEUE_NONE;
1146 }
1147
1148 /*
1149 * retry current command at next periodic timing
1150 */
1151 static int
1152 maple_retry(struct maple_softc *sc, struct maple_unit *u,
1153 enum maple_dma_stat st)
1154 {
1155
1156 KASSERT(st != MAPLE_DMA_IDLE && st != MAPLE_DMA_RETRY);
1157
1158 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1159 if (u->u_retrycnt == 0) {
1160 char buf[16];
1161 printf("%s: retrying: %#x, %#x, %p\n",
1162 maple_unit_name(buf, u->port, u->subunit),
1163 u->u_command, u->u_datalen, u->u_dataaddr);
1164 }
1165 #endif
1166 if (u->u_retrycnt >= MAPLE_RETRY_MAX)
1167 return 1;
1168
1169 u->u_retrycnt++;
1170
1171 u->u_saved_dma_stat = st;
1172 u->u_dma_stat = MAPLE_DMA_RETRY; /* no new command before retry done */
1173 SIMPLEQ_INSERT_TAIL(&sc->sc_retryq, u, u_dmaq);
1174
1175 return 0;
1176 }
1177
1178 static void
1179 maple_queue_retry(struct maple_softc *sc)
1180 {
1181 struct maple_unit *u, *nextu;
1182
1183 /*
1184 * Note: since the queue element is queued immediately
1185 * in maple_queue_command, we can't use SIMPLEQ_FOREACH.
1186 */
1187 for (u = SIMPLEQ_FIRST(&sc->sc_retryq); u; u = nextu) {
1188 nextu = SIMPLEQ_NEXT(u, u_dmaq);
1189
1190 /*
1191 * Retrying is in the highest priority, and the unit shall
1192 * always be free.
1193 */
1194 KASSERT(u->u_dma_stat == MAPLE_DMA_RETRY);
1195 maple_queue_command(sc, u, u->u_command, u->u_datalen,
1196 u->u_dataaddr);
1197 u->u_dma_stat = u->u_saved_dma_stat;
1198
1199 #ifdef DIAGNOSTIC
1200 KASSERT(u->u_saved_dma_stat != MAPLE_DMA_IDLE);
1201 u->u_saved_dma_stat = MAPLE_DMA_IDLE;
1202 #endif
1203 }
1204 SIMPLEQ_INIT(&sc->sc_retryq);
1205 }
1206
1207 /*
1208 * Process DMA results.
1209 * Requires kernel context.
1210 */
1211 static void
1212 maple_check_responses(struct maple_softc *sc)
1213 {
1214 struct maple_unit *u, *nextu;
1215 struct maple_func *fn;
1216 maple_response_t response;
1217 int func_code, len;
1218 int flags;
1219 char buf[16];
1220
1221 /*
1222 * Note: since the queue element may be queued immediately,
1223 * we can't use SIMPLEQ_FOREACH.
1224 */
1225 for (u = SIMPLEQ_FIRST(&sc->sc_dmaq), maple_begin_txbuf(sc);
1226 u; u = nextu) {
1227 nextu = SIMPLEQ_NEXT(u, u_dmaq);
1228
1229 if (u->u_dma_stat == MAPLE_DMA_IDLE)
1230 continue; /* just detached or DDB was active */
1231
1232 /*
1233 * check for retransmission
1234 */
1235 if ((response = u->u_rxbuf[0]) == MAPLE_RESPONSE_AGAIN) {
1236 if (maple_retry(sc, u, u->u_dma_stat) == 0)
1237 continue;
1238 /* else pass error to upper layer */
1239 }
1240
1241 len = (u->u_rxbuf[0] >> 24); /* length in long */
1242 len <<= 2; /* length in byte */
1243
1244 /*
1245 * call handler
1246 */
1247 if (u->u_dma_stat == MAPLE_DMA_PERIODIC) {
1248 /*
1249 * periodic GETCOND
1250 */
1251 u->u_dma_stat = MAPLE_DMA_IDLE;
1252 func_code = u->u_dma_func;
1253 if (response == MAPLE_RESPONSE_DATATRF && len > 0 &&
1254 ntohl(u->u_rxbuf[1]) == MAPLE_FUNC(func_code)) {
1255 fn = &u->u_func[func_code];
1256 if (fn->f_dev)
1257 (*fn->f_callback)(fn->f_arg,
1258 (void *)u->u_rxbuf, len,
1259 MAPLE_FLAG_PERIODIC);
1260 } else if (response == MAPLE_RESPONSE_NONE) {
1261 /* XXX OK? */
1262 /* detach */
1263 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1264 printf("%s: func: %d: periodic response %d\n",
1265 maple_unit_name(buf, u->port, u->subunit),
1266 u->u_dma_func,
1267 response);
1268 #endif
1269 /*
1270 * Some 3rd party devices sometimes
1271 * do not respond.
1272 */
1273 if (maple_retry(sc, u, MAPLE_DMA_PERIODIC))
1274 maple_detach_unit(sc, u);
1275 }
1276 /* XXX check unexpected conditions? */
1277
1278 } else if (u->u_dma_stat == MAPLE_DMA_PROBE) {
1279 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE);
1280 u->u_dma_stat = MAPLE_DMA_IDLE;
1281 switch (response) {
1282 default:
1283 case MAPLE_RESPONSE_NONE:
1284 /*
1285 * Do not use maple_retry(), which conflicts
1286 * with probe structure.
1287 */
1288 if (u->subunit != 0 &&
1289 ++u->u_proberetry > MAPLE_PROBERETRY_MAX) {
1290 printf("%s: no response\n",
1291 maple_unit_name(buf,
1292 u->port, u->subunit));
1293 } else {
1294 /* probe again */
1295 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1296 printf("%s: queued to probe 4\n",
1297 maple_unit_name(buf, u->port, u->subunit));
1298 #endif
1299 TAILQ_INSERT_TAIL(&sc->sc_probeq, u,
1300 u_q);
1301 u->u_queuestat = MAPLE_QUEUE_PROBE;
1302 }
1303 break;
1304 case MAPLE_RESPONSE_DEVINFO:
1305 /* check if the unit is changed */
1306 maple_check_unit_change(sc, u);
1307 break;
1308 }
1309
1310 } else if (u->u_dma_stat == MAPLE_DMA_PING) {
1311 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE);
1312 u->u_dma_stat = MAPLE_DMA_IDLE;
1313 switch (response) {
1314 default:
1315 case MAPLE_RESPONSE_NONE:
1316 /*
1317 * Some 3rd party devices sometimes
1318 * do not respond.
1319 */
1320 if (maple_retry(sc, u, MAPLE_DMA_PING)) {
1321 /* detach */
1322 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1323 printf("%s: ping response %d\n",
1324 maple_unit_name(buf, u->port,
1325 u->subunit),
1326 response);
1327 #endif
1328 #ifdef MAPLE_MEMCARD_PING_HACK
1329 if (u->u_ping_stat
1330 == MAPLE_PING_MEMCARD) {
1331 /*
1332 * The unit claims itself to be
1333 * a Visual Memory, and has
1334 * never responded to GETCOND.
1335 * Try again using MINFO, in
1336 * case it is a poorly
1337 * implemented 3rd party card.
1338 */
1339 #ifdef MAPLE_DEBUG
1340 printf("%s: switching ping method\n",
1341 maple_unit_name(buf,
1342 u->port, u->subunit));
1343 #endif
1344 u->u_ping_stat
1345 = MAPLE_PING_MINFO;
1346 TAILQ_INSERT_TAIL(&sc->sc_pingq,
1347 u, u_q);
1348 u->u_queuestat
1349 = MAPLE_QUEUE_PING;
1350 } else
1351 #endif /* MAPLE_MEMCARD_PING_HACK */
1352 maple_detach_unit(sc, u);
1353 }
1354 break;
1355 case MAPLE_RESPONSE_BADCMD:
1356 case MAPLE_RESPONSE_BADFUNC:
1357 case MAPLE_RESPONSE_DATATRF:
1358 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
1359 u->u_queuestat = MAPLE_QUEUE_PING;
1360 #ifdef MAPLE_MEMCARD_PING_HACK
1361 /*
1362 * If the unit responds to GETCOND, it is a
1363 * normal implementation.
1364 */
1365 if (u->u_ping_stat == MAPLE_PING_MEMCARD)
1366 u->u_ping_stat = MAPLE_PING_NORMAL;
1367 #endif
1368 break;
1369 }
1370
1371 } else {
1372 /*
1373 * Note: Do not rely on the consistency of responses.
1374 */
1375
1376 if (response == MAPLE_RESPONSE_NONE) {
1377 if (maple_retry(sc, u, u->u_dma_stat)) {
1378 /* detach */
1379 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1380 printf("%s: command response %d\n",
1381 maple_unit_name(buf, u->port,
1382 u->subunit),
1383 response);
1384 #endif
1385 maple_detach_unit(sc, u);
1386 }
1387 continue;
1388 }
1389
1390 flags = (u->u_dma_stat == MAPLE_DMA_PCMD) ?
1391 MAPLE_FLAG_CMD_PERIODIC_TIMING : 0;
1392 u->u_dma_stat = MAPLE_DMA_IDLE;
1393
1394 func_code = u->u_dma_func;
1395 fn = &u->u_func[func_code];
1396 if (fn->f_dev == NULL) {
1397 /* detached right now */
1398 #ifdef MAPLE_DEBUG
1399 printf("%s: unknown function: function %d, response %d\n",
1400 maple_unit_name(buf, u->port, u->subunit),
1401 func_code, response);
1402 #endif
1403 continue;
1404 }
1405 if (fn->f_callback != NULL) {
1406 (*fn->f_callback)(fn->f_arg,
1407 (void *)u->u_rxbuf, len, flags);
1408 }
1409 }
1410
1411 /*
1412 * check for subunit change and schedule probing subunits
1413 */
1414 if (u->subunit == 0 && response != MAPLE_RESPONSE_NONE &&
1415 response != MAPLE_RESPONSE_AGAIN &&
1416 ((int8_t *) u->u_rxbuf)[2] != sc->sc_port_unit_map[u->port])
1417 maple_check_subunit_change(sc, u);
1418 }
1419 }
1420
1421 /*
1422 * Main Maple Bus thread
1423 */
1424 static void
1425 maple_event_thread(void *arg)
1426 {
1427 struct maple_softc *sc = arg;
1428 unsigned cnt = 1; /* timing counter */
1429 int s;
1430 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1431 int noreq = 0;
1432 #endif
1433
1434 #ifdef MAPLE_DEBUG
1435 printf("%s: forked event thread, pid %d\n",
1436 sc->sc_dev.dv_xname, sc->event_thread->p_pid);
1437 #endif
1438
1439 /* begin first DMA cycle */
1440 maple_begin_txbuf(sc);
1441
1442 sc->sc_event = 1;
1443
1444 /* OK, continue booting system */
1445 maple_polling = 0;
1446 config_pending_decr();
1447
1448 for (;;) {
1449 /*
1450 * queue requests
1451 */
1452
1453 /* queue async commands */
1454 if (!TAILQ_EMPTY(&sc->sc_acmdq))
1455 maple_queue_cmds(sc, &sc->sc_acmdq);
1456
1457 /* send defered periodic command */
1458 if (!TAILQ_EMPTY(&sc->sc_periodicdeferq))
1459 maple_send_defered_periodic(sc);
1460
1461 /* queue periodic commands */
1462 if (sc->sc_event) {
1463 /* queue commands on periodic timing */
1464 if (!TAILQ_EMPTY(&sc->sc_pcmdq))
1465 maple_queue_cmds(sc, &sc->sc_pcmdq);
1466
1467 /* retry */
1468 if (!SIMPLEQ_EMPTY(&sc->sc_retryq))
1469 maple_queue_retry(sc);
1470
1471 if ((cnt & 31) == 0) /* XXX */
1472 maple_unit_probe(sc);
1473 cnt++;
1474
1475 maple_send_periodic(sc);
1476 if ((cnt & 7) == 0) /* XXX */
1477 maple_unit_ping(sc);
1478
1479 /*
1480 * schedule periodic event
1481 */
1482 sc->sc_event = 0;
1483 callout_reset(&sc->maple_callout_ch,
1484 MAPLE_CALLOUT_TICKS, maple_callout, sc);
1485 }
1486
1487 if (maple_end_txbuf(sc)) {
1488
1489 /*
1490 * start DMA
1491 */
1492 s = splmaple();
1493 maple_start(sc);
1494
1495 /*
1496 * wait until DMA done
1497 */
1498 if (tsleep(&sc->sc_dmadone, PWAIT, "mdma", hz)
1499 == EWOULDBLOCK) {
1500 /* was DDB active? */
1501 printf("%s: timed out\n", sc->sc_dev.dv_xname);
1502 }
1503 splx(s);
1504
1505 /*
1506 * call handlers
1507 */
1508 maple_check_responses(sc);
1509 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1510 noreq = 0;
1511 #endif
1512 }
1513 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1514 else {
1515 /* weird if occurs in succession */
1516 #if MAPLE_DEBUG <= 2
1517 if (noreq) /* ignore first time */
1518 #endif
1519 printf("%s: no request %d\n",
1520 sc->sc_dev.dv_xname, noreq);
1521 noreq++;
1522 }
1523 #endif
1524
1525 /*
1526 * wait for an event
1527 */
1528 s = splsoftclock();
1529 if (TAILQ_EMPTY(&sc->sc_acmdq) && sc->sc_event == 0 &&
1530 TAILQ_EMPTY(&sc->sc_periodicdeferq)) {
1531 if (tsleep(&sc->sc_event, PWAIT, "mslp", hz)
1532 == EWOULDBLOCK) {
1533 printf("%s: event timed out\n",
1534 sc->sc_dev.dv_xname);
1535 }
1536
1537 }
1538 splx(s);
1539
1540 }
1541
1542 #if 0 /* maple root device can't be detached */
1543 kthread_exit(0);
1544 /* NOTREACHED */
1545 #endif
1546 }
1547
1548 static int
1549 maple_intr(void *arg)
1550 {
1551 struct maple_softc *sc = arg;
1552
1553 wakeup(&sc->sc_dmadone);
1554
1555 return 1;
1556 }
1557
1558 static void
1559 maple_callout(void *ctx)
1560 {
1561 struct maple_softc *sc = ctx;
1562
1563 sc->sc_event = 1; /* mark as periodic event */
1564 wakeup(&sc->sc_event);
1565 }
1566
1567 /*
1568 * Install callback handler (called by drivers)
1569 */
1570 /* ARGSUSED */
1571 void
1572 maple_set_callback(struct device *dev, struct maple_unit *u, int func,
1573 void (*callback)(void *, struct maple_response *, int, int), void *arg)
1574 {
1575 #if 0 /* currently unused */
1576 struct maple_softc *sc = (void *) dev;
1577 #endif
1578 struct maple_func *fn;
1579
1580 KASSERT(func >= 0 && func < MAPLE_NFUNC);
1581
1582 fn = &u->u_func[func];
1583
1584 fn->f_callback = callback;
1585 fn->f_arg = arg;
1586 }
1587
1588 /*
1589 * Return function definition data (called by drivers)
1590 */
1591 u_int32_t
1592 maple_get_function_data(struct maple_devinfo *devinfo, int function_code)
1593 {
1594 int i, p = 0;
1595 u_int32_t func;
1596
1597 func = ntohl(devinfo->di_func);
1598 for (i = 31; i >= 0; --i)
1599 if (func & MAPLE_FUNC(i)) {
1600 if (function_code == i)
1601 return ntohl(devinfo->di_function_data[p]);
1602 else
1603 if (++p >= 3)
1604 break;
1605 }
1606
1607 return (0);
1608 }
1609
1610 /* Generic maple device interface */
1611
1612 int
1613 mapleopen(dev_t dev, int flag, int mode, struct proc *p)
1614 {
1615 struct maple_softc *sc;
1616
1617 sc = device_lookup(&maple_cd, MAPLEBUSUNIT(dev));
1618 if (sc == NULL) /* make sure it was attached */
1619 return (ENXIO);
1620
1621 if (MAPLEPORT(dev) >= MAPLE_PORTS)
1622 return (ENXIO);
1623
1624 if (MAPLESUBUNIT(dev) >= MAPLE_SUBUNITS)
1625 return (ENXIO);
1626
1627 if (!(sc->sc_port_units[MAPLEPORT(dev)] & (1 << MAPLESUBUNIT(dev))))
1628 return (ENXIO);
1629
1630 sc->sc_port_units_open[MAPLEPORT(dev)] |= 1 << MAPLESUBUNIT(dev);
1631
1632 return (0);
1633 }
1634
1635 int
1636 mapleclose(dev_t dev, int flag, int mode, struct proc *p)
1637 {
1638 struct maple_softc *sc;
1639
1640 sc = device_lookup(&maple_cd, MAPLEBUSUNIT(dev));
1641
1642 sc->sc_port_units_open[MAPLEPORT(dev)] &= ~(1 << MAPLESUBUNIT(dev));
1643
1644 return (0);
1645 }
1646
1647 int
1648 maple_unit_ioctl(struct device *dev, struct maple_unit *u, u_long cmd,
1649 caddr_t data, int flag, struct proc *p)
1650 {
1651 struct maple_softc *sc = (struct maple_softc *)dev;
1652
1653 if (!(sc->sc_port_units[u->port] & (1 << u->subunit)))
1654 return (ENXIO);
1655
1656 switch(cmd) {
1657 case MAPLEIO_GDEVINFO:
1658 memcpy(data, &u->devinfo, sizeof(struct maple_devinfo));
1659 break;
1660 default:
1661 return (EPASSTHROUGH);
1662 }
1663
1664 return (0);
1665 }
1666
1667 int
1668 mapleioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1669 {
1670 struct maple_softc *sc;
1671 struct maple_unit *u;
1672
1673 sc = device_lookup(&maple_cd, MAPLEBUSUNIT(dev));
1674 u = &sc->sc_unit[MAPLEPORT(dev)][MAPLESUBUNIT(dev)];
1675
1676 return (maple_unit_ioctl(&sc->sc_dev, u, cmd, data, flag, p));
1677 }
1678