if_dmc.c revision 1.1 1 /* $NetBSD: if_dmc.c,v 1.1 2001/05/06 17:36:04 ragge Exp $ */
2 /*
3 * Copyright (c) 1982, 1986 Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)if_dmc.c 7.10 (Berkeley) 12/16/90
35 */
36
37 /*
38 * DMC11 device driver, internet version
39 *
40 * Bill Nesheim
41 * Cornell University
42 *
43 * Lou Salkind
44 * New York University
45 */
46
47 #undef DMCDEBUG /* for base table dump on fatal error */
48
49 #include "opt_inet.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/ioctl.h>
55 #include <sys/socket.h>
56 #include <sys/syslog.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/netisr.h>
61
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_var.h>
65 #endif
66
67 #include <machine/bus.h>
68
69 #include <dev/qbus/ubareg.h>
70 #include <dev/qbus/ubavar.h>
71 #include <dev/qbus/if_uba.h>
72
73 #include <dev/qbus/if_dmcreg.h>
74
75
76 /*
77 * output timeout value, sec.; should depend on line speed.
78 */
79 static int dmc_timeout = 20;
80
81 #define NRCV 7
82 #define NXMT 3
83 #define NCMDS (NRCV+NXMT+4) /* size of command queue */
84
85 #define DMC_WBYTE(csr, val) \
86 bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
87 #define DMC_WWORD(csr, val) \
88 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
89 #define DMC_RBYTE(csr) \
90 bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
91 #define DMC_RWORD(csr) \
92 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
93
94
95 #ifdef DMCDEBUG
96 #define printd if(dmcdebug)printf
97 int dmcdebug = 0;
98 #endif
99
100 /* error reporting intervals */
101 #define DMC_RPNBFS 50
102 #define DMC_RPDSC 1
103 #define DMC_RPTMO 10
104 #define DMC_RPDCK 10
105
106 struct dmc_command {
107 char qp_cmd; /* command */
108 short qp_ubaddr; /* buffer address */
109 short qp_cc; /* character count || XMEM */
110 struct dmc_command *qp_next; /* next command on queue */
111 };
112
113 struct dmcbufs {
114 int ubinfo; /* from uballoc */
115 short cc; /* buffer size */
116 short flags; /* access control */
117 };
118 #define DBUF_OURS 0 /* buffer is available */
119 #define DBUF_DMCS 1 /* buffer claimed by somebody */
120 #define DBUF_XMIT 4 /* transmit buffer */
121 #define DBUF_RCV 8 /* receive buffer */
122
123
124 /*
125 * DMC software status per interface.
126 *
127 * Each interface is referenced by a network interface structure,
128 * sc_if, which the routing code uses to locate the interface.
129 * This structure contains the output queue for the interface, its address, ...
130 * We also have, for each interface, a set of 7 UBA interface structures
131 * for each, which
132 * contain information about the UNIBUS resources held by the interface:
133 * map registers, buffered data paths, etc. Information is cached in this
134 * structure for use by the if_uba.c routines in running the interface
135 * efficiently.
136 */
137 struct dmc_softc {
138 struct device sc_dev; /* Configuration common part */
139 struct ifnet sc_if; /* network-visible interface */
140 short sc_oused; /* output buffers currently in use */
141 short sc_iused; /* input buffers given to DMC */
142 short sc_flag; /* flags */
143 struct ubinfo sc_ui; /* UBA mapping info for base table */
144 int sc_errors[4]; /* non-fatal error counters */
145 bus_space_tag_t sc_iot;
146 bus_addr_t sc_ioh;
147 bus_dma_tag_t sc_dmat;
148 struct evcnt sc_rintrcnt; /* Interrupt counting */
149 struct evcnt sc_tintrcnt; /* Interrupt counting */
150 #define sc_datck sc_errors[0]
151 #define sc_timeo sc_errors[1]
152 #define sc_nobuf sc_errors[2]
153 #define sc_disc sc_errors[3]
154 struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */
155 struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */
156 struct ifubinfo sc_ifuba; /* UNIBUS resources */
157 struct ifrw sc_ifr[NRCV]; /* UNIBUS receive buffer maps */
158 struct ifxmt sc_ifw[NXMT]; /* UNIBUS receive buffer maps */
159 /* command queue stuff */
160 struct dmc_command sc_cmdbuf[NCMDS];
161 struct dmc_command *sc_qhead; /* head of command queue */
162 struct dmc_command *sc_qtail; /* tail of command queue */
163 struct dmc_command *sc_qactive; /* command in progress */
164 struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */
165 struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */
166 /* end command queue stuff */
167 struct dmc_base {
168 short d_base[128]; /* DMC base table */
169 } dmc_base;
170 };
171
172 static int dmcmatch(struct device *, struct cfdata *, void *);
173 static void dmcattach(struct device *, struct device *, void *);
174 static int dmcinit(struct ifnet *);
175 static void dmcrint(void *);
176 static void dmcxint(void *);
177 static void dmcdown(struct dmc_softc *sc);
178 static void dmcrestart(struct dmc_softc *);
179 static void dmcload(struct dmc_softc *, int, u_short, u_short);
180 static void dmcstart(struct ifnet *);
181 static void dmctimeout(struct ifnet *);
182 static int dmcioctl(struct ifnet *, u_long, caddr_t);
183 static int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
184 struct rtentry *);
185 static void dmcreset(struct device *);
186
187 struct cfattach dmc_ca = {
188 sizeof(struct dmc_softc), dmcmatch, dmcattach
189 };
190
191 /* flags */
192 #define DMC_RUNNING 0x01 /* device initialized */
193 #define DMC_BMAPPED 0x02 /* base table mapped */
194 #define DMC_RESTART 0x04 /* software restart in progress */
195 #define DMC_ONLINE 0x08 /* device running (had a RDYO) */
196
197
198 /* queue manipulation macros */
199 #define QUEUE_AT_HEAD(qp, head, tail) \
200 (qp)->qp_next = (head); \
201 (head) = (qp); \
202 if ((tail) == (struct dmc_command *) 0) \
203 (tail) = (head)
204
205 #define QUEUE_AT_TAIL(qp, head, tail) \
206 if ((tail)) \
207 (tail)->qp_next = (qp); \
208 else \
209 (head) = (qp); \
210 (qp)->qp_next = (struct dmc_command *) 0; \
211 (tail) = (qp)
212
213 #define DEQUEUE(head, tail) \
214 (head) = (head)->qp_next;\
215 if ((head) == (struct dmc_command *) 0)\
216 (tail) = (head)
217
218 int
219 dmcmatch(struct device *parent, struct cfdata *cf, void *aux)
220 {
221 struct uba_attach_args *ua = aux;
222 struct dmc_softc ssc;
223 struct dmc_softc *sc = &ssc;
224 int i;
225
226 sc->sc_iot = ua->ua_iot;
227 sc->sc_ioh = ua->ua_ioh;
228
229 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
230 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
231 ;
232 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
233 printf("dmcprobe: can't start device\n" );
234 return (0);
235 }
236 DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
237 /* let's be paranoid */
238 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
239 DELAY(1000000);
240 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
241 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
242 ;
243 return (1);
244 }
245
246 /*
247 * Interface exists: make available by filling in network interface
248 * record. System will initialize the interface when it is ready
249 * to accept packets.
250 */
251 void
252 dmcattach(struct device *parent, struct device *self, void *aux)
253 {
254 struct uba_attach_args *ua = aux;
255 struct dmc_softc *sc = (struct dmc_softc *)self;
256
257 sc->sc_iot = ua->ua_iot;
258 sc->sc_ioh = ua->ua_ioh;
259 sc->sc_dmat = ua->ua_dmat;
260
261 strcpy(sc->sc_if.if_xname, sc->sc_dev.dv_xname);
262 sc->sc_if.if_mtu = DMCMTU;
263 sc->sc_if.if_init = dmcinit;
264 sc->sc_if.if_output = dmcoutput;
265 sc->sc_if.if_ioctl = dmcioctl;
266 sc->sc_if.if_watchdog = dmctimeout;
267 sc->sc_if.if_flags = IFF_POINTOPOINT;
268 sc->sc_if.if_softc = sc;
269
270 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
271 &sc->sc_rintrcnt);
272 uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
273 &sc->sc_tintrcnt);
274 uba_reset_establish(dmcreset, &sc->sc_dev);
275 evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
276 sc->sc_dev.dv_xname, "intr");
277 evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
278 sc->sc_dev.dv_xname, "intr");
279
280 if_attach(&sc->sc_if);
281 }
282
283 /*
284 * Reset of interface after UNIBUS reset.
285 * If interface is on specified UBA, reset its state.
286 */
287 void
288 dmcreset(struct device *dev)
289 {
290 struct dmc_softc *sc = (struct dmc_softc *)dev;
291
292 sc->sc_flag = 0;
293 sc->sc_if.if_flags &= ~IFF_RUNNING;
294 dmcinit(&sc->sc_if);
295 }
296
297 /*
298 * Initialization of interface; reinitialize UNIBUS usage.
299 */
300 int
301 dmcinit(struct ifnet *ifp)
302 {
303 struct dmc_softc *sc = ifp->if_softc;
304 struct ifrw *ifrw;
305 struct ifxmt *ifxp;
306 struct dmcbufs *rp;
307 struct dmc_command *qp;
308 struct ifaddr *ifa;
309 struct cfdata *ui = sc->sc_dev.dv_cfdata;
310 int base;
311 int s;
312
313 /*
314 * Check to see that an address has been set
315 * (both local and destination for an address family).
316 */
317 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)
318 if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
319 break;
320 if (ifa == (struct ifaddr *) 0)
321 return 0;
322
323 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
324 printf("dmcinit: DMC not running\n");
325 ifp->if_flags &= ~IFF_UP;
326 return 0;
327 }
328 /* map base table */
329 if ((sc->sc_flag & DMC_BMAPPED) == 0) {
330 sc->sc_ui.ui_size = sizeof(struct dmc_base);
331 sc->sc_ui.ui_vaddr = (caddr_t)&sc->dmc_base;
332 uballoc((void *)sc->sc_dev.dv_parent, &sc->sc_ui, 0);
333 sc->sc_flag |= DMC_BMAPPED;
334 }
335 /* initialize UNIBUS resources */
336 sc->sc_iused = sc->sc_oused = 0;
337 if ((ifp->if_flags & IFF_RUNNING) == 0) {
338 if (if_ubaminit(&sc->sc_ifuba, (void *)sc->sc_dev.dv_parent,
339 sizeof(struct dmc_header) + DMCMTU,
340 sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
341 printf("%s: can't allocate uba resources\n",
342 sc->sc_dev.dv_xname);
343 ifp->if_flags &= ~IFF_UP;
344 return 0;
345 }
346 ifp->if_flags |= IFF_RUNNING;
347 }
348 sc->sc_flag &= ~DMC_ONLINE;
349 sc->sc_flag |= DMC_RUNNING;
350 /*
351 * Limit packets enqueued until we see if we're on the air.
352 */
353 ifp->if_snd.ifq_maxlen = 3;
354
355 /* initialize buffer pool */
356 /* receives */
357 ifrw = &sc->sc_ifr[0];
358 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
359 rp->ubinfo = ifrw->ifrw_info;
360 rp->cc = DMCMTU + sizeof (struct dmc_header);
361 rp->flags = DBUF_OURS|DBUF_RCV;
362 ifrw++;
363 }
364 /* transmits */
365 ifxp = &sc->sc_ifw[0];
366 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
367 rp->ubinfo = ifxp->ifw_info;
368 rp->cc = 0;
369 rp->flags = DBUF_OURS|DBUF_XMIT;
370 ifxp++;
371 }
372
373 /* set up command queues */
374 sc->sc_qfreeh = sc->sc_qfreet
375 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
376 (struct dmc_command *)0;
377 /* set up free command buffer list */
378 for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
379 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
380 }
381
382 /* base in */
383 base = sc->sc_ui.ui_baddr;
384 dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
385 /* specify half duplex operation, flags tell if primary */
386 /* or secondary station */
387 if (ui->cf_flags == 0)
388 /* use DDCMP mode in full duplex */
389 dmcload(sc, DMC_CNTLI, 0, 0);
390 else if (ui->cf_flags == 1)
391 /* use MAINTENENCE mode */
392 dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
393 else if (ui->cf_flags == 2)
394 /* use DDCMP half duplex as primary station */
395 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
396 else if (ui->cf_flags == 3)
397 /* use DDCMP half duplex as secondary station */
398 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
399
400 /* enable operation done interrupts */
401 while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
402 DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
403 s = splnet();
404 /* queue first NRCV buffers for DMC to fill */
405 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
406 rp->flags |= DBUF_DMCS;
407 dmcload(sc, DMC_READ, rp->ubinfo,
408 (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
409 sc->sc_iused++;
410 }
411 splx(s);
412 return 0;
413 }
414
415 /*
416 * Start output on interface. Get another datagram
417 * to send from the interface queue and map it to
418 * the interface before starting output.
419 *
420 * Must be called at spl 5
421 */
422 void
423 dmcstart(struct ifnet *ifp)
424 {
425 struct dmc_softc *sc = ifp->if_softc;
426 struct mbuf *m;
427 struct dmcbufs *rp;
428 int n;
429
430 /*
431 * Dequeue up to NXMT requests and map them to the UNIBUS.
432 * If no more requests, or no dmc buffers available, just return.
433 */
434 n = 0;
435 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
436 /* find an available buffer */
437 if ((rp->flags & DBUF_DMCS) == 0) {
438 IF_DEQUEUE(&sc->sc_if.if_snd, m);
439 if (m == 0)
440 return;
441 /* mark it dmcs */
442 rp->flags |= (DBUF_DMCS);
443 /*
444 * Have request mapped to UNIBUS for transmission
445 * and start the output.
446 */
447 rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
448 rp->cc &= DMC_CCOUNT;
449 if (++sc->sc_oused == 1)
450 sc->sc_if.if_timer = dmc_timeout;
451 dmcload(sc, DMC_WRITE, rp->ubinfo,
452 rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
453 }
454 n++;
455 }
456 }
457
458 /*
459 * Utility routine to load the DMC device registers.
460 */
461 void
462 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
463 {
464 struct dmc_command *qp;
465 int sps;
466
467 sps = splnet();
468
469 /* grab a command buffer from the free list */
470 if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
471 panic("dmc command queue overflow");
472 DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
473
474 /* fill in requested info */
475 qp->qp_cmd = (type | DMC_RQI);
476 qp->qp_ubaddr = w0;
477 qp->qp_cc = w1;
478
479 if (sc->sc_qactive) { /* command in progress */
480 if (type == DMC_READ) {
481 QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
482 } else {
483 QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
484 }
485 } else { /* command port free */
486 sc->sc_qactive = qp;
487 DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
488 dmcrint(sc);
489 }
490 splx(sps);
491 }
492
493 /*
494 * DMC interface receiver interrupt.
495 * Ready to accept another command,
496 * pull one off the command queue.
497 */
498 void
499 dmcrint(void *arg)
500 {
501 struct dmc_softc *sc = arg;
502 struct dmc_command *qp;
503 int n;
504
505 if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
506 printf("%s: dmcrint no command\n", sc->sc_dev.dv_xname);
507 return;
508 }
509 while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
510 DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
511 DMC_WWORD(DMC_SEL6, qp->qp_cc);
512 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
513 /* free command buffer */
514 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
515 while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
516 /*
517 * Can't check for RDYO here 'cause
518 * this routine isn't reentrant!
519 */
520 DELAY(5);
521 }
522 /* move on to next command */
523 if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
524 break; /* all done */
525 /* more commands to do, start the next one */
526 qp = sc->sc_qactive;
527 DEQUEUE(sc->sc_qhead, sc->sc_qtail);
528 DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
529 n = RDYSCAN;
530 while (n-- > 0)
531 if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
532 (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
533 break;
534 }
535 if (sc->sc_qactive) {
536 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
537 /* VMS does it twice !*$%@# */
538 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
539 }
540
541 }
542
543 /*
544 * DMC interface transmitter interrupt.
545 * A transfer may have completed, check for errors.
546 * If it was a read, notify appropriate protocol.
547 * If it was a write, pull the next one off the queue.
548 */
549 void
550 dmcxint(void *a)
551 {
552 struct dmc_softc *sc = a;
553
554 struct ifnet *ifp;
555 struct mbuf *m;
556 struct ifqueue *inq;
557 int arg, pkaddr, cmd, len, s;
558 struct ifrw *ifrw;
559 struct dmcbufs *rp;
560 struct ifxmt *ifxp;
561 struct dmc_header *dh;
562 char buf[64];
563
564 ifp = &sc->sc_if;
565
566 while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
567
568 cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
569 arg = DMC_RWORD(DMC_SEL6) & 0xffff;
570 /* reconstruct UNIBUS address of buffer returned to us */
571 pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
572 /* release port */
573 DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
574 switch (cmd & 07) {
575
576 case DMC_OUR:
577 /*
578 * A read has completed.
579 * Pass packet to type specific
580 * higher-level input routine.
581 */
582 ifp->if_ipackets++;
583 /* find location in dmcuba struct */
584 ifrw= &sc->sc_ifr[0];
585 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
586 if(rp->ubinfo == pkaddr)
587 break;
588 ifrw++;
589 }
590 if (rp >= &sc->sc_rbufs[NRCV])
591 panic("dmc rcv");
592 if ((rp->flags & DBUF_DMCS) == 0)
593 printf("%s: done unalloc rbuf\n",
594 sc->sc_dev.dv_xname);
595
596 len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
597 if (len < 0 || len > DMCMTU) {
598 ifp->if_ierrors++;
599 #ifdef DMCDEBUG
600 printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
601 sc->sc_dev.dv_xname, pkaddr, len);
602 #endif
603 goto setup;
604 }
605 /*
606 * Deal with trailer protocol: if type is trailer
607 * get true type from first 16-bit word past data.
608 * Remember that type was trailer by setting off.
609 */
610 dh = (struct dmc_header *)ifrw->ifrw_addr;
611 dh->dmc_type = ntohs((u_short)dh->dmc_type);
612 if (len == 0)
613 goto setup;
614
615 /*
616 * Pull packet off interface. Off is nonzero if
617 * packet has trailing header; dmc_get will then
618 * force this header information to be at the front,
619 * but we still have to drop the type and length
620 * which are at the front of any trailer data.
621 */
622 m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
623 if (m == 0)
624 goto setup;
625 /* Shave off dmc_header */
626 m_adj(m, sizeof(struct dmc_header));
627 switch (dh->dmc_type) {
628
629 #ifdef INET
630 case DMC_IPTYPE:
631 schednetisr(NETISR_IP);
632 inq = &ipintrq;
633 break;
634 #endif
635 default:
636 m_freem(m);
637 goto setup;
638 }
639
640 s = splnet();
641 if (IF_QFULL(inq)) {
642 IF_DROP(inq);
643 m_freem(m);
644 } else
645 IF_ENQUEUE(inq, m);
646 splx(s);
647
648 setup:
649 /* is this needed? */
650 rp->ubinfo = ifrw->ifrw_info;
651
652 dmcload(sc, DMC_READ, rp->ubinfo,
653 ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
654 break;
655
656 case DMC_OUX:
657 /*
658 * A write has completed, start another
659 * transfer if there is more data to send.
660 */
661 ifp->if_opackets++;
662 /* find associated dmcbuf structure */
663 ifxp = &sc->sc_ifw[0];
664 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
665 if(rp->ubinfo == pkaddr)
666 break;
667 ifxp++;
668 }
669 if (rp >= &sc->sc_xbufs[NXMT]) {
670 printf("%s: bad packet address 0x%x\n",
671 sc->sc_dev.dv_xname, pkaddr);
672 break;
673 }
674 if ((rp->flags & DBUF_DMCS) == 0)
675 printf("%s: unallocated packet 0x%x\n",
676 sc->sc_dev.dv_xname, pkaddr);
677 /* mark buffer free */
678 if_ubaend(&sc->sc_ifuba, ifxp);
679 rp->flags &= ~DBUF_DMCS;
680 if (--sc->sc_oused == 0)
681 sc->sc_if.if_timer = 0;
682 else
683 sc->sc_if.if_timer = dmc_timeout;
684 if ((sc->sc_flag & DMC_ONLINE) == 0) {
685 extern int ifqmaxlen;
686
687 /*
688 * We're on the air.
689 * Open the queue to the usual value.
690 */
691 sc->sc_flag |= DMC_ONLINE;
692 ifp->if_snd.ifq_maxlen = ifqmaxlen;
693 }
694 break;
695
696 case DMC_CNTLO:
697 arg &= DMC_CNTMASK;
698 if (arg & DMC_FATAL) {
699 if (arg != DMC_START) {
700 bitmask_snprintf(arg, CNTLO_BITS,
701 buf, sizeof(buf));
702 log(LOG_ERR,
703 "%s: fatal error, flags=%s\n",
704 sc->sc_dev.dv_xname, buf);
705 }
706 dmcrestart(sc);
707 break;
708 }
709 /* ACCUMULATE STATISTICS */
710 switch(arg) {
711 case DMC_NOBUFS:
712 ifp->if_ierrors++;
713 if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
714 goto report;
715 break;
716 case DMC_DISCONN:
717 if ((sc->sc_disc++ % DMC_RPDSC) == 0)
718 goto report;
719 break;
720 case DMC_TIMEOUT:
721 if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
722 goto report;
723 break;
724 case DMC_DATACK:
725 ifp->if_oerrors++;
726 if ((sc->sc_datck++ % DMC_RPDCK) == 0)
727 goto report;
728 break;
729 default:
730 goto report;
731 }
732 break;
733 report:
734 #ifdef DMCDEBUG
735 bitmask_snprintf(arg, CNTLO_BITS, buf, sizeof(buf));
736 printd("%s: soft error, flags=%s\n",
737 sc->sc_dev.dv_xname, buf);
738 #endif
739 if ((sc->sc_flag & DMC_RESTART) == 0) {
740 /*
741 * kill off the dmc to get things
742 * going again by generating a
743 * procedure error
744 */
745 sc->sc_flag |= DMC_RESTART;
746 arg = sc->sc_ui.ui_baddr;
747 dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
748 }
749 break;
750
751 default:
752 printf("%s: bad control %o\n",
753 sc->sc_dev.dv_xname, cmd);
754 break;
755 }
756 }
757 dmcstart(ifp);
758 }
759
760 /*
761 * DMC output routine.
762 * Encapsulate a packet of type family for the dmc.
763 * Use trailer local net encapsulation if enough data in first
764 * packet leaves a multiple of 512 bytes of data in remainder.
765 */
766 int
767 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
768 struct rtentry *rt)
769 {
770 int type, error, s;
771 struct mbuf *m = m0;
772 struct dmc_header *dh;
773
774 if ((ifp->if_flags & IFF_UP) == 0) {
775 error = ENETDOWN;
776 goto bad;
777 }
778
779 switch (dst->sa_family) {
780 #ifdef INET
781 case AF_INET:
782 type = DMC_IPTYPE;
783 break;
784 #endif
785
786 case AF_UNSPEC:
787 dh = (struct dmc_header *)dst->sa_data;
788 type = dh->dmc_type;
789 break;
790
791 default:
792 printf("%s: can't handle af%d\n", ifp->if_xname,
793 dst->sa_family);
794 error = EAFNOSUPPORT;
795 goto bad;
796 }
797
798 /*
799 * Add local network header
800 * (there is space for a uba on a vax to step on)
801 */
802 M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
803 if (m == 0) {
804 error = ENOBUFS;
805 goto bad;
806 }
807 dh = mtod(m, struct dmc_header *);
808 dh->dmc_type = htons((u_short)type);
809
810 /*
811 * Queue message on interface, and start output if interface
812 * not yet active.
813 */
814 s = splnet();
815 if (IF_QFULL(&ifp->if_snd)) {
816 IF_DROP(&ifp->if_snd);
817 m_freem(m);
818 splx(s);
819 return (ENOBUFS);
820 }
821 IF_ENQUEUE(&ifp->if_snd, m);
822 dmcstart(ifp);
823 splx(s);
824 return (0);
825
826 bad:
827 m_freem(m0);
828 return (error);
829 }
830
831
832 /*
833 * Process an ioctl request.
834 */
835 /* ARGSUSED */
836 int
837 dmcioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
838 {
839 int s = splnet(), error = 0;
840 register struct dmc_softc *sc = ifp->if_softc;
841
842 switch (cmd) {
843
844 case SIOCSIFADDR:
845 ifp->if_flags |= IFF_UP;
846 if ((ifp->if_flags & IFF_RUNNING) == 0)
847 dmcinit(ifp);
848 break;
849
850 case SIOCSIFDSTADDR:
851 if ((ifp->if_flags & IFF_RUNNING) == 0)
852 dmcinit(ifp);
853 break;
854
855 case SIOCSIFFLAGS:
856 if ((ifp->if_flags & IFF_UP) == 0 &&
857 sc->sc_flag & DMC_RUNNING)
858 dmcdown(sc);
859 else if (ifp->if_flags & IFF_UP &&
860 (sc->sc_flag & DMC_RUNNING) == 0)
861 dmcrestart(sc);
862 break;
863
864 default:
865 error = EINVAL;
866 }
867 splx(s);
868 return (error);
869 }
870
871 /*
872 * Restart after a fatal error.
873 * Clear device and reinitialize.
874 */
875 void
876 dmcrestart(struct dmc_softc *sc)
877 {
878 int s, i;
879
880 #ifdef DMCDEBUG
881 /* dump base table */
882 printf("%s base table:\n", sc->sc_dev.dv_xname);
883 for (i = 0; i < sizeof (struct dmc_base); i++)
884 printf("%o\n" ,dmc_base[unit].d_base[i]);
885 #endif
886
887 dmcdown(sc);
888
889 /*
890 * Let the DMR finish the MCLR. At 1 Mbit, it should do so
891 * in about a max of 6.4 milliseconds with diagnostics enabled.
892 */
893 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
894 ;
895 /* Did the timer expire or did the DMR finish? */
896 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
897 log(LOG_ERR, "%s: M820 Test Failed\n", sc->sc_dev.dv_xname);
898 return;
899 }
900
901 /* restart DMC */
902 dmcinit(&sc->sc_if);
903 sc->sc_flag &= ~DMC_RESTART;
904 s = splnet();
905 dmcstart(&sc->sc_if);
906 splx(s);
907 sc->sc_if.if_collisions++; /* why not? */
908 }
909
910 /*
911 * Reset a device and mark down.
912 * Flush output queue and drop queue limit.
913 */
914 void
915 dmcdown(struct dmc_softc *sc)
916 {
917 struct ifxmt *ifxp;
918
919 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
920 sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
921
922 for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
923 #ifdef notyet
924 if (ifxp->ifw_xtofree) {
925 (void) m_freem(ifxp->ifw_xtofree);
926 ifxp->ifw_xtofree = 0;
927 }
928 #endif
929 }
930 IF_PURGE(&sc->sc_if.if_snd);
931 }
932
933 /*
934 * Watchdog timeout to see that transmitted packets don't
935 * lose interrupts. The device has to be online (the first
936 * transmission may block until the other side comes up).
937 */
938 void
939 dmctimeout(struct ifnet *ifp)
940 {
941 struct dmc_softc *sc = ifp->if_softc;
942 char buf1[64], buf2[64];
943
944 if (sc->sc_flag & DMC_ONLINE) {
945 bitmask_snprintf(DMC_RBYTE(DMC_BSEL0) & 0xff, DMC0BITS,
946 buf1, sizeof(buf1));
947 bitmask_snprintf(DMC_RBYTE(DMC_BSEL2) & 0xff, DMC2BITS,
948 buf2, sizeof(buf2));
949 log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
950 sc->sc_dev.dv_xname, buf1, buf2);
951 dmcrestart(sc);
952 }
953 }
954