hd64570.c revision 1.2 1 /* $Id: hd64570.c,v 1.2 1998/08/08 23:51:40 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1998 Vixie Enterprises
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Vixie Enterprises nor the names
17 * of its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
21 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
22 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * This software has been written for Vixie Enterprises by Michael Graff
35 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
36 * ``http://www.vix.com''.
37 */
38
39 /*
40 * TODO:
41 *
42 * o teach the receive logic about errors, and about long frames that
43 * span more than one input buffer. (Right now, receive/transmit is
44 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
45 * This is currently 1504, which is large enough to hold the HDLC
46 * header and the packet itself. Packets which are too long are
47 * silently dropped on transmit and silently dropped on receive.
48 * o write code to handle the msci interrupts, needed only for CD
49 * and CTS changes.
50 * o consider switching back to a "queue tx with DMA active" model which
51 * should help sustain outgoing traffic
52 * o through clever use of bus_dma*() functions, it should be possible
53 * to map the mbuf's data area directly into a descriptor transmit
54 * buffer, removing the need to allocate extra memory. If, however,
55 * we run out of descriptors for this, we will need to then allocate
56 * one large mbuf, copy the fragmented chain into it, and put it onto
57 * a single descriptor.
58 * o use bus_dmamap_sync() with the right offset and lengths, rather
59 * than cheating and always sync'ing the whole region.
60 */
61
62 #include "bpfilter.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/mbuf.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/kernel.h>
71
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/netisr.h>
75
76 #include <netinet/in.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/in_var.h>
79 #include <netinet/ip.h>
80
81 #if NBPFILTER > 0
82 #include <net/bpf.h>
83 #endif
84
85 #include <machine/cpu.h>
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcireg.h>
91 #include <dev/pci/pcidevs.h>
92
93 #include <dev/ic/hd64570reg.h>
94 #include <dev/ic/hd64570var.h>
95
96 #define SCA_DEBUG_RX 0x0001
97 #define SCA_DEBUG_TX 0x0002
98 #define SCA_DEBUG_CISCO 0x0004
99 #define SCA_DEBUG_DMA 0x0008
100 #define SCA_DEBUG_RXPKT 0x0010
101 #define SCA_DEBUG_TXPKT 0x0020
102 #define SCA_DEBUG_INTR 0x0040
103
104 #if 0
105 #define SCA_DEBUG_LEVEL ( SCA_DEBUG_TX )
106 #else
107 #define SCA_DEBUG_LEVEL 0
108 #endif
109
110 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
111
112 #if SCA_DEBUG_LEVEL > 0
113 #define SCA_DPRINTF(l, x) do { \
114 if ((l) & sca_debug) \
115 printf x;\
116 } while (0)
117 #else
118 #define SCA_DPRINTF(l, x)
119 #endif
120
121 #define SCA_MTU 1500 /* hard coded */
122
123 /*
124 * buffers per tx and rx channels, per port, and the size of each.
125 * Don't use these constants directly, as they are really only hints.
126 * Use the calculated values stored in struct sca_softc instead.
127 *
128 * Each must be at least 2, receive would be better at around 20 or so.
129 *
130 * XXX Due to a damned near impossible to track down bug, transmit buffers
131 * MUST be 2, no more, no less.
132 */
133 #ifndef SCA_NtxBUFS
134 #define SCA_NtxBUFS 2
135 #endif
136 #ifndef SCA_NrxBUFS
137 #define SCA_NrxBUFS 20
138 #endif
139 #ifndef SCA_BSIZE
140 #define SCA_BSIZE (SCA_MTU + 4) /* room for HDLC as well */
141 #endif
142
143 #if 0
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
145 #endif
146
147 static inline void sca_write_1(struct sca_softc *, u_int, u_int8_t);
148 static inline void sca_write_2(struct sca_softc *, u_int, u_int16_t);
149 static inline u_int8_t sca_read_1(struct sca_softc *, u_int);
150 static inline u_int16_t sca_read_2(struct sca_softc *, u_int);
151
152 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
153 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
154
155 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
156 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
157 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
158 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
159
160 static int sca_alloc_dma(struct sca_softc *);
161 static void sca_setup_dma_memory(struct sca_softc *);
162 static void sca_msci_init(struct sca_softc *, sca_port_t *);
163 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
164 static void sca_dmac_rxinit(sca_port_t *);
165
166 static int sca_dmac_intr(sca_port_t *, u_int8_t);
167 static int sca_msci_intr(struct sca_softc *, u_int8_t);
168
169 static void sca_get_packets(sca_port_t *);
170 static void sca_frame_process(sca_port_t *, sca_desc_t *, u_int8_t *);
171 static int sca_frame_avail(sca_port_t *, int *);
172 static void sca_frame_skip(sca_port_t *, int);
173
174 static void sca_port_starttx(sca_port_t *);
175
176 static void sca_port_up(sca_port_t *);
177 static void sca_port_down(sca_port_t *);
178
179 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
180 struct rtentry *));
181 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
182 static void sca_start __P((struct ifnet *));
183 static void sca_watchdog __P((struct ifnet *));
184
185 static struct mbuf *sca_mbuf_alloc(caddr_t, u_int);
186
187 #if SCA_DEBUG_LEVEL > 0
188 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
189 #endif
190
191 static inline void
192 sca_write_1(struct sca_softc *sc, u_int reg, u_int8_t val)
193 {
194 bus_space_write_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
195 }
196
197 static inline void
198 sca_write_2(struct sca_softc *sc, u_int reg, u_int16_t val)
199 {
200 bus_space_write_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
201 }
202
203 static inline u_int8_t
204 sca_read_1(struct sca_softc *sc, u_int reg)
205 {
206 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
207 }
208
209 static inline u_int16_t
210 sca_read_2(struct sca_softc *sc, u_int reg)
211 {
212 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
213 }
214
215 static inline void
216 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
217 {
218 sca_write_1(scp->sca, scp->msci_off + reg, val);
219 }
220
221 static inline u_int8_t
222 msci_read_1(sca_port_t *scp, u_int reg)
223 {
224 return sca_read_1(scp->sca, scp->msci_off + reg);
225 }
226
227 static inline void
228 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
229 {
230 sca_write_1(scp->sca, scp->dmac_off + reg, val);
231 }
232
233 static inline void
234 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
235 {
236 sca_write_2(scp->sca, scp->dmac_off + reg, val);
237 }
238
239 static inline u_int8_t
240 dmac_read_1(sca_port_t *scp, u_int reg)
241 {
242 return sca_read_1(scp->sca, scp->dmac_off + reg);
243 }
244
245 static inline u_int16_t
246 dmac_read_2(sca_port_t *scp, u_int reg)
247 {
248 return sca_read_2(scp->sca, scp->dmac_off + reg);
249 }
250
251 int
252 sca_init(struct sca_softc *sc, u_int nports)
253 {
254 /*
255 * Do a little sanity check: check number of ports.
256 */
257 if (nports < 1 || nports > 2)
258 return 1;
259
260 /*
261 * remember the details
262 */
263 sc->sc_numports = nports;
264
265 /*
266 * allocate the memory and chop it into bits.
267 */
268 if (sca_alloc_dma(sc) != 0)
269 return 1;
270 sca_setup_dma_memory(sc);
271
272 /*
273 * disable DMA and MSCI interrupts
274 */
275 sca_write_1(sc, SCA_DMER, 0);
276 sca_write_1(sc, SCA_IER0, 0);
277 sca_write_1(sc, SCA_IER1, 0);
278 sca_write_1(sc, SCA_IER2, 0);
279
280 /*
281 * configure interrupt system
282 */
283 sca_write_1(sc, SCA_ITCR, 0); /* use ivr, no int ack */
284 sca_write_1(sc, SCA_IVR, 0x40);
285 sca_write_1(sc, SCA_IMVR, 0x40);
286
287 /*
288 * set wait control register to zero wait states
289 */
290 sca_write_1(sc, SCA_PABR0, 0);
291 sca_write_1(sc, SCA_PABR1, 0);
292 sca_write_1(sc, SCA_WCRL, 0);
293 sca_write_1(sc, SCA_WCRM, 0);
294 sca_write_1(sc, SCA_WCRH, 0);
295
296 /*
297 * disable DMA and reset status
298 */
299 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
300
301 /*
302 * disable transmit DMA for all channels
303 */
304 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
305 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
306 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
307 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
308 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
309 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
310 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
311 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
312
313 /*
314 * enable DMA based on channel enable flags for each channel
315 */
316 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
317
318 /*
319 * Should check to see if the chip is responding, but for now
320 * assume it is.
321 */
322 return 0;
323 }
324
325 /*
326 * initialize the port and attach it to the networking layer
327 */
328 void
329 sca_port_attach(struct sca_softc *sc, u_int port)
330 {
331 sca_port_t *scp = &sc->sc_ports[port];
332 struct ifnet *ifp;
333 static u_int ntwo_unit = 0;
334
335 scp->sca = sc; /* point back to the parent */
336
337 scp->sp_port = port;
338
339 if (port == 0) {
340 scp->msci_off = SCA_MSCI_OFF_0;
341 scp->dmac_off = SCA_DMAC_OFF_0;
342 } else {
343 scp->msci_off = SCA_MSCI_OFF_1;
344 scp->dmac_off = SCA_DMAC_OFF_1;
345 }
346
347 sca_msci_init(sc, scp);
348 sca_dmac_init(sc, scp);
349
350 /*
351 * attach to the network layer
352 */
353 ifp = &scp->sp_if;
354 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
355 ifp->if_softc = scp;
356 ifp->if_mtu = SCA_MTU;
357 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
358 ifp->if_type = IFT_OTHER; /* Should be HDLC, but... */
359 ifp->if_hdrlen = HDLC_HDRLEN;
360 ifp->if_ioctl = sca_ioctl;
361 ifp->if_output = sca_output;
362 ifp->if_watchdog = sca_watchdog;
363 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
364 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
365 #ifdef SCA_USE_FASTQ
366 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
367 #endif
368 if_attach(ifp);
369
370 #if NBPFILTER > 0
371 bpfattach(&scp->sp_bpf, ifp, DLT_HDLC, HDLC_HDRLEN);
372 #endif
373
374 if (sc->parent == NULL)
375 printf("%s: port %d\n", ifp->if_xname, port);
376 else
377 printf("%s at %s port %d\n",
378 ifp->if_xname, sc->parent->dv_xname, port);
379
380 /*
381 * reset the last seen times on the cisco keepalive protocol
382 */
383 scp->cka_lasttx = time.tv_usec;
384 scp->cka_lastrx = 0;
385 }
386
387 /*
388 * initialize the port's MSCI
389 */
390 static void
391 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
392 {
393 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
394 msci_write_1(scp, SCA_MD00,
395 ( SCA_MD0_CRC_1
396 | SCA_MD0_CRC_CCITT
397 | SCA_MD0_CRC_ENABLE
398 | SCA_MD0_MODE_HDLC));
399 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
400 msci_write_1(scp, SCA_MD20,
401 (SCA_MD2_DUPLEX | SCA_MD2_NRZ));
402
403 /*
404 * reset the port (and lower RTS)
405 */
406 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
407 msci_write_1(scp, SCA_CTL0,
408 (SCA_CTL_IDLPAT | SCA_CTL_UDRNC | SCA_CTL_RTS));
409 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
410
411 /*
412 * select the RX clock as the TX clock, and set for external
413 * clock source.
414 */
415 msci_write_1(scp, SCA_RXS0, 0);
416 msci_write_1(scp, SCA_TXS0, 0);
417
418 /*
419 * XXX don't pay attention to CTS or CD changes right now. I can't
420 * simulate one, and the transmitter will try to transmit even if
421 * CD isn't there anyway, so nothing bad SHOULD happen.
422 */
423 msci_write_1(scp, SCA_IE00, 0);
424 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
425 msci_write_1(scp, SCA_IE20, 0);
426 msci_write_1(scp, SCA_FIE0, 0);
427
428 msci_write_1(scp, SCA_SA00, 0);
429 msci_write_1(scp, SCA_SA10, 0);
430
431 msci_write_1(scp, SCA_IDL0, 0x7e);
432
433 msci_write_1(scp, SCA_RRC0, 0x0e);
434 msci_write_1(scp, SCA_TRC00, 0x10);
435 msci_write_1(scp, SCA_TRC10, 0x1f);
436 }
437
438 /*
439 * Take the memory for the port and construct two circular linked lists of
440 * descriptors (one tx, one rx) and set the pointers in these descriptors
441 * to point to the buffer space for this port.
442 */
443 static void
444 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
445 {
446 sca_desc_t *desc;
447 u_int32_t desc_p;
448 u_int32_t buf_p;
449 int i;
450
451 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
452 0, sc->sc_allocsize, BUS_DMASYNC_PREWRITE);
453
454 desc = scp->txdesc;
455 desc_p = scp->txdesc_p;
456 buf_p = scp->txbuf_p;
457 scp->txcur = 0;
458 scp->txinuse = 0;
459
460 for (i = 0 ; i < SCA_NtxBUFS ; i++) {
461 /*
462 * desc_p points to the physcial address of the NEXT desc
463 */
464 desc_p += sizeof(sca_desc_t);
465
466 desc->cp = desc_p & 0x0000ffff;
467 desc->bp = buf_p & 0x0000ffff;
468 desc->bpb = (buf_p & 0x00ff0000) >> 16;
469 desc->len = SCA_BSIZE;
470 desc->stat = 0;
471
472 desc++; /* point to the next descriptor */
473 buf_p += SCA_BSIZE;
474 }
475
476 /*
477 * "heal" the circular list by making the last entry point to the
478 * first.
479 */
480 desc--;
481 desc->cp = scp->txdesc_p & 0x0000ffff;
482
483 /*
484 * Now, initialize the transmit DMA logic
485 *
486 * CPB == chain pointer base address
487 */
488 dmac_write_1(scp, SCA_DSR1, 0);
489 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
490 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
491 dmac_write_1(scp, SCA_DIR1,
492 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
493 dmac_write_1(scp, SCA_CPB1,
494 (u_int8_t)((scp->txdesc_p & 0x00ff0000) >> 16));
495
496 /*
497 * now, do the same thing for receive descriptors
498 */
499 desc = scp->rxdesc;
500 desc_p = scp->rxdesc_p;
501 buf_p = scp->rxbuf_p;
502 scp->rxstart = 0;
503 scp->rxend = SCA_NrxBUFS - 1;
504
505 for (i = 0 ; i < SCA_NrxBUFS ; i++) {
506 /*
507 * desc_p points to the physcial address of the NEXT desc
508 */
509 desc_p += sizeof(sca_desc_t);
510
511 desc->cp = desc_p & 0x0000ffff;
512 desc->bp = buf_p & 0x0000ffff;
513 desc->bpb = (buf_p & 0x00ff0000) >> 16;
514 desc->len = SCA_BSIZE;
515 desc->stat = 0x00;
516
517 desc++; /* point to the next descriptor */
518 buf_p += SCA_BSIZE;
519 }
520
521 /*
522 * "heal" the circular list by making the last entry point to the
523 * first.
524 */
525 desc--;
526 desc->cp = scp->rxdesc_p & 0x0000ffff;
527
528 sca_dmac_rxinit(scp);
529
530 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
531 0, sc->sc_allocsize, BUS_DMASYNC_POSTWRITE);
532 }
533
534 /*
535 * reset and reinitialize the receive DMA logic
536 */
537 static void
538 sca_dmac_rxinit(sca_port_t *scp)
539 {
540 /*
541 * ... and the receive DMA logic ...
542 */
543 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
544 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
545
546 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
547 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
548
549 /*
550 * CPB == chain pointer base
551 * CDA == current descriptor address
552 * EDA == error descriptor address (overwrite position)
553 */
554 dmac_write_1(scp, SCA_CPB0,
555 (u_int8_t)((scp->rxdesc_p & 0x00ff0000) >> 16));
556 dmac_write_2(scp, SCA_CDAL0,
557 (u_int16_t)(scp->rxdesc_p & 0xffff));
558 dmac_write_2(scp, SCA_EDAL0,
559 (u_int16_t)(scp->rxdesc_p
560 + sizeof(sca_desc_t) * SCA_NrxBUFS));
561
562 /*
563 * enable receiver DMA
564 */
565 dmac_write_1(scp, SCA_DIR0,
566 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
567 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
568 }
569
570 static int
571 sca_alloc_dma(struct sca_softc *sc)
572 {
573 u_int allocsize;
574 int err;
575 int rsegs;
576 u_int bpp;
577
578 SCA_DPRINTF(SCA_DEBUG_DMA,
579 ("sizeof sca_desc_t: %d bytes\n", sizeof (sca_desc_t)));
580
581 bpp = sc->sc_numports * (SCA_NtxBUFS + SCA_NrxBUFS);
582
583 allocsize = bpp * (SCA_BSIZE + sizeof (sca_desc_t));
584
585 /*
586 * sanity checks:
587 *
588 * Check the total size of the data buffers, and so on. The total
589 * DMAable space needs to fit within a single 16M region, and the
590 * descriptors need to fit within a 64K region.
591 */
592 if (allocsize > 16 * 1024 * 1024)
593 return 1;
594 if (bpp * sizeof (sca_desc_t) > 64 * 1024)
595 return 1;
596
597 sc->sc_allocsize = allocsize;
598
599 /*
600 * Allocate one huge chunk of memory.
601 */
602 if (bus_dmamem_alloc(sc->sc_dmat,
603 allocsize,
604 SCA_DMA_ALIGNMENT,
605 SCA_DMA_BOUNDRY,
606 &sc->sc_seg, 1, &rsegs, BUS_DMA_NOWAIT) != 0) {
607 printf("Could not allocate DMA memory\n");
608 return 1;
609 }
610 SCA_DPRINTF(SCA_DEBUG_DMA,
611 ("DMA memory allocated: %d bytes\n", allocsize));
612
613 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, 1, allocsize,
614 &sc->sc_dma_addr, BUS_DMA_NOWAIT) != 0) {
615 printf("Could not map DMA memory into kernel space\n");
616 return 1;
617 }
618 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA memory mapped\n"));
619
620 if (bus_dmamap_create(sc->sc_dmat, allocsize, 2,
621 allocsize, SCA_DMA_BOUNDRY,
622 BUS_DMA_NOWAIT, &sc->sc_dmam) != 0) {
623 printf("Could not create DMA map\n");
624 return 1;
625 }
626 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map created\n"));
627
628 err = bus_dmamap_load(sc->sc_dmat, sc->sc_dmam, sc->sc_dma_addr,
629 allocsize, NULL, BUS_DMA_NOWAIT);
630 if (err != 0) {
631 printf("Could not load DMA segment: %d\n", err);
632 return 1;
633 }
634 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map loaded\n"));
635
636 return 0;
637 }
638
639 /*
640 * Take the memory allocated with sca_alloc_dma() and divide it among the
641 * two ports.
642 */
643 static void
644 sca_setup_dma_memory(struct sca_softc *sc)
645 {
646 sca_port_t *scp0, *scp1;
647 u_int8_t *vaddr0;
648 u_int32_t paddr0;
649 u_long addroff;
650
651 /*
652 * remember the physical address to 24 bits only, since the upper
653 * 8 bits is programed into the device at a different layer.
654 */
655 paddr0 = (sc->sc_dmam->dm_segs[0].ds_addr & 0x00ffffff);
656 vaddr0 = sc->sc_dma_addr;
657
658 /*
659 * if we have only one port it gets the full range. If we have
660 * two we need to do a little magic to divide things up.
661 *
662 * The descriptors will all end up in the front of the area, while
663 * the remainder of the buffer is used for transmit and receive
664 * data.
665 *
666 * -------------------- start of memory
667 * tx desc port 0
668 * rx desc port 0
669 * tx desc port 1
670 * rx desc port 1
671 * tx buffer port 0
672 * rx buffer port 0
673 * tx buffer port 1
674 * rx buffer port 1
675 * -------------------- end of memory
676 */
677 scp0 = &sc->sc_ports[0];
678 scp1 = &sc->sc_ports[1];
679
680 scp0->txdesc_p = paddr0;
681 scp0->txdesc = (sca_desc_t *)vaddr0;
682 addroff = sizeof(sca_desc_t) * SCA_NtxBUFS;
683
684 /*
685 * point to the range following the tx descriptors, and
686 * set the rx descriptors there.
687 */
688 scp0->rxdesc_p = paddr0 + addroff;
689 scp0->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
690 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
691
692 if (sc->sc_numports == 2) {
693 scp1->txdesc_p = paddr0 + addroff;
694 scp1->txdesc = (sca_desc_t *)(vaddr0 + addroff);
695 addroff += sizeof(sca_desc_t) * SCA_NtxBUFS;
696
697 scp1->rxdesc_p = paddr0 + addroff;
698 scp1->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
699 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
700 }
701
702 /*
703 * point to the memory following the descriptors, and set the
704 * transmit buffer there.
705 */
706 scp0->txbuf_p = paddr0 + addroff;
707 scp0->txbuf = vaddr0 + addroff;
708 addroff += SCA_BSIZE * SCA_NtxBUFS;
709
710 /*
711 * lastly, skip over the transmit buffer and set up pointers into
712 * the receive buffer.
713 */
714 scp0->rxbuf_p = paddr0 + addroff;
715 scp0->rxbuf = vaddr0 + addroff;
716 addroff += SCA_BSIZE * SCA_NrxBUFS;
717
718 if (sc->sc_numports == 2) {
719 scp1->txbuf_p = paddr0 + addroff;
720 scp1->txbuf = vaddr0 + addroff;
721 addroff += SCA_BSIZE * SCA_NtxBUFS;
722
723 scp1->rxbuf_p = paddr0 + addroff;
724 scp1->rxbuf = vaddr0 + addroff;
725 addroff += SCA_BSIZE * SCA_NrxBUFS;
726 }
727
728 /*
729 * as a consistancy check, addroff should be equal to the allocation
730 * size.
731 */
732 if (sc->sc_allocsize != addroff)
733 printf("ERROR: sc_allocsize != addroff: %lu != %lu\n",
734 sc->sc_allocsize, addroff);
735 }
736
737 /*
738 * Queue the packet for our start routine to transmit
739 */
740 static int
741 sca_output(ifp, m, dst, rt0)
742 struct ifnet *ifp;
743 struct mbuf *m;
744 struct sockaddr *dst;
745 struct rtentry *rt0;
746 {
747 int error;
748 int s;
749 u_int16_t protocol;
750 hdlc_header_t *hdlc;
751 struct ifqueue *ifq;
752 #ifdef SCA_USE_FASTQ
753 struct ip *ip;
754 sca_port_t *scp = ifp->if_softc;
755 int highpri;
756 #endif
757
758 error = 0;
759 ifp->if_lastchange = time;
760
761 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
762 error = ENETDOWN;
763 goto bad;
764 }
765
766 if (dst->sa_family != AF_INET) {
767 error = EAFNOSUPPORT;
768 goto bad;
769 }
770
771 #ifdef SCA_USE_FASTQ
772 highpri = 0;
773 #endif
774
775 /*
776 * determine address family, and priority for this packet
777 */
778 switch (dst->sa_family) {
779 case AF_INET:
780 protocol = HDLC_PROTOCOL_IP;
781
782 #ifdef SCA_USE_FASTQ
783 ip = mtod(m, struct ip *);
784 if ((ip->ip_tos & IPTOS_LOWDELAY) == IPTOS_LOWDELAY)
785 highpri = 1;
786 #endif
787 break;
788
789 default:
790 printf("%s: address family %d unsupported\n",
791 ifp->if_xname, dst->sa_family);
792 error = EAFNOSUPPORT;
793 goto bad;
794 }
795
796 if (M_LEADINGSPACE(m) < HDLC_HDRLEN) {
797 m = m_prepend(m, HDLC_HDRLEN, M_DONTWAIT);
798 if (m == NULL) {
799 error = ENOBUFS;
800 goto bad;
801 }
802 m->m_len = 0;
803 } else {
804 m->m_data -= HDLC_HDRLEN;
805 }
806
807 hdlc = mtod(m, hdlc_header_t *);
808 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
809 hdlc->addr = CISCO_MULTICAST;
810 else
811 hdlc->addr = CISCO_UNICAST;
812 hdlc->control = 0;
813 hdlc->protocol = htons(protocol);
814 m->m_len += HDLC_HDRLEN;
815
816 /*
817 * queue the packet. If interactive, use the fast queue.
818 */
819 s = splnet();
820 #ifdef SCA_USE_FASTQ
821 ifq = (highpri == 1 ? &scp->fastq : &ifp->if_snd);
822 #else
823 ifq = &ifp->if_snd;
824 #endif
825 if (IF_QFULL(ifq)) {
826 IF_DROP(ifq);
827 ifp->if_oerrors++;
828 ifp->if_collisions++;
829 error = ENOBUFS;
830 splx(s);
831 goto bad;
832 }
833 ifp->if_obytes += m->m_pkthdr.len;
834 IF_ENQUEUE(ifq, m);
835
836 ifp->if_lastchange = time;
837
838 if (m->m_flags & M_MCAST)
839 ifp->if_omcasts++;
840
841 sca_start(ifp);
842 splx(s);
843
844 return (error);
845
846 bad:
847 if (m)
848 m_freem(m);
849 return (error);
850 }
851
852 static int
853 sca_ioctl(ifp, cmd, addr)
854 struct ifnet *ifp;
855 u_long cmd;
856 caddr_t addr;
857 {
858 struct ifreq *ifr;
859 struct ifaddr *ifa;
860 int error;
861 int s;
862
863 s = splnet();
864
865 ifr = (struct ifreq *)addr;
866 ifa = (struct ifaddr *)addr;
867 error = 0;
868
869 switch (cmd) {
870 case SIOCSIFADDR:
871 if (ifa->ifa_addr->sa_family == AF_INET)
872 sca_port_up(ifp->if_softc);
873 else
874 error = EAFNOSUPPORT;
875 break;
876
877 case SIOCSIFDSTADDR:
878 if (ifa->ifa_addr->sa_family != AF_INET)
879 error = EAFNOSUPPORT;
880 break;
881
882 case SIOCADDMULTI:
883 case SIOCDELMULTI:
884 if (ifr == 0) {
885 error = EAFNOSUPPORT; /* XXX */
886 break;
887 }
888 switch (ifr->ifr_addr.sa_family) {
889
890 #ifdef INET
891 case AF_INET:
892 break;
893 #endif
894
895 default:
896 error = EAFNOSUPPORT;
897 break;
898 }
899 break;
900
901 case SIOCSIFFLAGS:
902 if (ifr->ifr_flags & IFF_UP)
903 sca_port_up(ifp->if_softc);
904 else
905 sca_port_down(ifp->if_softc);
906
907 break;
908
909 default:
910 error = EINVAL;
911 }
912
913 splx(s);
914 return error;
915 }
916
917 /*
918 * start packet transmission on the interface
919 *
920 * MUST BE CALLED AT splnet()
921 */
922 static void
923 sca_start(ifp)
924 struct ifnet *ifp;
925 {
926 sca_port_t *scp = ifp->if_softc;
927 struct sca_softc *sc = scp->sca;
928 struct mbuf *m, *mb_head;
929 sca_desc_t *desc;
930 u_int8_t *buf, *obuf;
931 u_int32_t buf_p;
932 int trigger_xmit;
933
934 /*
935 * can't queue when we are full or transmitter is busy
936 */
937 if ((scp->txinuse >= (SCA_NtxBUFS - 1))
938 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
939 return;
940
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
942 0, sc->sc_allocsize,
943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
944
945 trigger_xmit = 0;
946
947 txloop:
948 IF_DEQUEUE(&scp->linkq, mb_head);
949 if (mb_head == NULL)
950 #ifdef SCA_USE_FASTQ
951 IF_DEQUEUE(&scp->fastq, mb_head);
952 if (mb_head == NULL)
953 #endif
954 IF_DEQUEUE(&ifp->if_snd, mb_head);
955 if (mb_head == NULL)
956 goto start_xmit;
957
958 desc = &scp->txdesc[scp->txcur];
959 if (scp->txinuse != 0) {
960 desc->stat &= ~SCA_DESC_EOT;
961 desc = &scp->txdesc[scp->txcur];
962 }
963 buf = scp->txbuf + SCA_BSIZE * scp->txcur;
964 obuf = buf;
965 buf_p = scp->txbuf_p + SCA_BSIZE * scp->txcur;
966
967 desc->bp = (u_int16_t)(buf_p & 0x0000ffff);
968 desc->bpb = (u_int8_t)((buf_p & 0x00ff0000) >> 16);
969 desc->stat = SCA_DESC_EOT | SCA_DESC_EOM; /* end of frame and xfer */
970 desc->len = 0;
971
972 /*
973 * Run through the chain, copying data into the descriptor as we
974 * go. If it won't fit in one transmission block, drop the packet.
975 * No, this isn't nice, but most of the time it _will_ fit.
976 */
977 for (m = mb_head ; m != NULL ; m = m->m_next) {
978 if (m->m_len != 0) {
979 desc->len += m->m_len;
980 if (desc->len > SCA_BSIZE) {
981 m_freem(mb_head);
982 goto txloop;
983 }
984 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
985 buf += m->m_len;
986 }
987 }
988
989 ifp->if_opackets++;
990
991 #if NBPFILTER > 0
992 /*
993 * Pass packet to bpf if there is a listener.
994 */
995 if (scp->sp_bpf)
996 bpf_mtap(scp->sp_bpf, mb_head);
997 #endif
998
999 m_freem(mb_head);
1000
1001 if (scp->txinuse != 0) {
1002 scp->txcur++;
1003 if (scp->txcur == SCA_NtxBUFS)
1004 scp->txcur = 0;
1005 }
1006 scp->txinuse++;
1007 trigger_xmit = 1;
1008
1009 SCA_DPRINTF(SCA_DEBUG_TX,
1010 ("TX: inuse %d index %d\n", scp->txinuse, scp->txcur));
1011
1012 if (scp->txinuse < (SCA_NtxBUFS - 1))
1013 goto txloop;
1014
1015 start_xmit:
1016 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
1017 0, sc->sc_allocsize,
1018 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1019
1020 if (trigger_xmit != 0)
1021 sca_port_starttx(scp);
1022 }
1023
1024 static void
1025 sca_watchdog(ifp)
1026 struct ifnet *ifp;
1027 {
1028 }
1029
1030 int
1031 sca_hardintr(struct sca_softc *sc)
1032 {
1033 u_int8_t isr0, isr1, isr2;
1034 int ret;
1035
1036 ret = 0; /* non-zero means we processed at least one interrupt */
1037
1038 while (1) {
1039 /*
1040 * read SCA interrupts
1041 */
1042 isr0 = sca_read_1(sc, SCA_ISR0);
1043 isr1 = sca_read_1(sc, SCA_ISR1);
1044 isr2 = sca_read_1(sc, SCA_ISR2);
1045
1046 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1047 break;
1048
1049 SCA_DPRINTF(SCA_DEBUG_INTR,
1050 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1051 isr0, isr1, isr2));
1052
1053 /*
1054 * check DMA interrupt
1055 */
1056 if (isr1 & 0x0f)
1057 ret += sca_dmac_intr(&sc->sc_ports[0],
1058 isr1 & 0x0f);
1059 if (isr1 & 0xf0)
1060 ret += sca_dmac_intr(&sc->sc_ports[1],
1061 (isr1 & 0xf0) >> 4);
1062
1063 if (isr0)
1064 ret += sca_msci_intr(sc, isr0);
1065
1066 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1067 if (isr2)
1068 ret += sca_timer_intr(sc, isr2);
1069 #endif
1070 }
1071
1072 return (ret);
1073 }
1074
1075 static int
1076 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1077 {
1078 u_int8_t dsr;
1079 int ret;
1080
1081 ret = 0;
1082
1083 /*
1084 * Check transmit channel
1085 */
1086 if (isr & 0x0c) {
1087 SCA_DPRINTF(SCA_DEBUG_INTR,
1088 ("TX INTERRUPT port %d\n", scp->sp_port));
1089
1090 dsr = 1;
1091 while (dsr != 0) {
1092 ret++;
1093 /*
1094 * reset interrupt
1095 */
1096 dsr = dmac_read_1(scp, SCA_DSR1);
1097 dmac_write_1(scp, SCA_DSR1,
1098 dsr | SCA_DSR_DEWD);
1099
1100 /*
1101 * filter out the bits we don't care about
1102 */
1103 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1104 if (dsr == 0)
1105 break;
1106
1107 /*
1108 * check for counter overflow
1109 */
1110 if (dsr & SCA_DSR_COF) {
1111 printf("%s: TXDMA counter overflow\n",
1112 scp->sp_if.if_xname);
1113
1114 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1115 scp->txcur = 0;
1116 scp->txinuse = 0;
1117 }
1118
1119 /*
1120 * check for buffer overflow
1121 */
1122 if (dsr & SCA_DSR_BOF) {
1123 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1124 scp->sp_if.if_xname,
1125 dmac_read_2(scp, SCA_CDAL1),
1126 dmac_read_2(scp, SCA_EDAL1),
1127 dmac_read_1(scp, SCA_CPB1));
1128
1129 /*
1130 * Yikes. Arrange for a full
1131 * transmitter restart.
1132 */
1133 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1134 scp->txcur = 0;
1135 scp->txinuse = 0;
1136 }
1137
1138 /*
1139 * check for end of transfer, which is not
1140 * an error. It means that all data queued
1141 * was transmitted, and we mark ourself as
1142 * not in use and stop the watchdog timer.
1143 */
1144 if (dsr & SCA_DSR_EOT) {
1145 SCA_DPRINTF(SCA_DEBUG_TX,
1146 ("Transmit completed.\n"));
1147
1148 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1149 scp->txcur = 0;
1150 scp->txinuse = 0;
1151
1152 /*
1153 * check for more packets
1154 */
1155 sca_start(&scp->sp_if);
1156 }
1157 }
1158 }
1159 /*
1160 * receive channel check
1161 */
1162 if (isr & 0x03) {
1163 SCA_DPRINTF(SCA_DEBUG_INTR,
1164 ("RX INTERRUPT port %d\n", mch));
1165
1166 dsr = 1;
1167 while (dsr != 0) {
1168 ret++;
1169
1170 dsr = dmac_read_1(scp, SCA_DSR0);
1171 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1172
1173 /*
1174 * filter out the bits we don't care about
1175 */
1176 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1177 | SCA_DSR_BOF | SCA_DSR_EOT);
1178 if (dsr == 0)
1179 break;
1180
1181 /*
1182 * End of frame
1183 */
1184 if (dsr & SCA_DSR_EOM) {
1185 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1186
1187 sca_get_packets(scp);
1188 }
1189
1190 /*
1191 * check for counter overflow
1192 */
1193 if (dsr & SCA_DSR_COF) {
1194 printf("%s: RXDMA counter overflow\n",
1195 scp->sp_if.if_xname);
1196
1197 sca_dmac_rxinit(scp);
1198 }
1199
1200 /*
1201 * check for end of transfer, which means we
1202 * ran out of descriptors to receive into.
1203 * This means the line is much faster than
1204 * we can handle.
1205 */
1206 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1207 printf("%s: RXDMA buffer overflow\n",
1208 scp->sp_if.if_xname);
1209
1210 sca_dmac_rxinit(scp);
1211 }
1212 }
1213 }
1214
1215 return ret;
1216 }
1217
1218 static int
1219 sca_msci_intr(struct sca_softc *sc, u_int8_t isr)
1220 {
1221 printf("Got msci interrupt XXX\n");
1222
1223 return 0;
1224 }
1225
1226 static void
1227 sca_get_packets(sca_port_t *scp)
1228 {
1229 int descidx;
1230 sca_desc_t *desc;
1231 u_int8_t *buf;
1232
1233 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1234 0, scp->sca->sc_allocsize,
1235 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1236
1237 /*
1238 * Loop while there are packets to receive. After each is processed,
1239 * call sca_frame_skip() to update the DMA registers to the new
1240 * state.
1241 */
1242 while (sca_frame_avail(scp, &descidx)) {
1243 desc = &scp->rxdesc[descidx];
1244 buf = scp->rxbuf + SCA_BSIZE * descidx;
1245
1246 sca_frame_process(scp, desc, buf);
1247 #if SCA_DEBUG_LEVEL > 0
1248 if (sca_debug & SCA_DEBUG_RXPKT)
1249 sca_frame_print(scp, desc, buf);
1250 #endif
1251 sca_frame_skip(scp, descidx);
1252 }
1253
1254 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1255 0, scp->sca->sc_allocsize,
1256 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1257 }
1258
1259 /*
1260 * Starting with the first descriptor we wanted to read into, up to but
1261 * not including the current SCA read descriptor, look for a packet.
1262 */
1263 static int
1264 sca_frame_avail(sca_port_t *scp, int *descindx)
1265 {
1266 u_int16_t cda;
1267 int cdaidx;
1268 u_int32_t desc_p; /* physical address (lower 16 bits) */
1269 sca_desc_t *desc;
1270 u_int8_t rxstat;
1271
1272 /*
1273 * Read the current descriptor from the SCA.
1274 */
1275 cda = dmac_read_2(scp, SCA_CDAL0);
1276
1277 /*
1278 * calculate the index of the current descriptor
1279 */
1280 desc_p = cda - (u_int16_t)(scp->rxdesc_p & 0x0000ffff);
1281 cdaidx = desc_p / sizeof(sca_desc_t);
1282
1283 if (cdaidx >= SCA_NrxBUFS)
1284 return 0;
1285
1286 for (;;) {
1287 /*
1288 * if the SCA is reading into the first descriptor, we somehow
1289 * got this interrupt incorrectly. Just return that there are
1290 * no packets ready.
1291 */
1292 if (cdaidx == scp->rxstart)
1293 return 0;
1294
1295 /*
1296 * We might have a valid descriptor. Set up a pointer
1297 * to the kva address for it so we can more easily examine
1298 * the contents.
1299 */
1300 desc = &scp->rxdesc[scp->rxstart];
1301
1302 rxstat = desc->stat;
1303
1304 /*
1305 * check for errors
1306 */
1307 if (rxstat & SCA_DESC_ERRORS)
1308 goto nextpkt;
1309
1310 /*
1311 * full packet? Good.
1312 */
1313 if (rxstat & SCA_DESC_EOM) {
1314 *descindx = scp->rxstart;
1315 return 1;
1316 }
1317
1318 /*
1319 * increment the rxstart address, since this frame is
1320 * somehow damaged. Skip over it in later calls.
1321 * XXX This breaks multidescriptor receives, so each
1322 * frame HAS to fit within one descriptor's buffer
1323 * space now...
1324 */
1325 nextpkt:
1326 scp->rxstart++;
1327 if (scp->rxstart == SCA_NrxBUFS)
1328 scp->rxstart = 0;
1329 }
1330
1331 return 0;
1332 }
1333
1334 /*
1335 * Pass the packet up to the kernel if it is a packet we want to pay
1336 * attention to.
1337 *
1338 * MUST BE CALLED AT splnet()
1339 */
1340 static void
1341 sca_frame_process(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1342 {
1343 hdlc_header_t *hdlc;
1344 cisco_pkt_t *cisco, *ncisco;
1345 u_int16_t len;
1346 struct mbuf *m;
1347 u_int8_t *nbuf;
1348 u_int32_t t = (time.tv_sec - boottime.tv_sec) * 1000;
1349 struct ifqueue *ifq;
1350
1351 len = desc->len;
1352
1353 /*
1354 * skip packets that are too short
1355 */
1356 if (len < sizeof(hdlc_header_t))
1357 return;
1358
1359 #if NBPFILTER > 0
1360 if (scp->sp_bpf)
1361 bpf_tap(scp->sp_bpf, p, len);
1362 #endif
1363
1364 /*
1365 * read and then strip off the HDLC information
1366 */
1367 hdlc = (hdlc_header_t *)p;
1368
1369 scp->sp_if.if_ipackets++;
1370 scp->sp_if.if_lastchange = time;
1371
1372 switch (ntohs(hdlc->protocol)) {
1373 case HDLC_PROTOCOL_IP:
1374 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1375
1376 m = sca_mbuf_alloc(p, len);
1377 if (m == NULL) {
1378 scp->sp_if.if_iqdrops++;
1379 return;
1380 }
1381 m->m_pkthdr.rcvif = &scp->sp_if;
1382
1383 if (IF_QFULL(&ipintrq)) {
1384 IF_DROP(&ipintrq);
1385 scp->sp_if.if_ierrors++;
1386 scp->sp_if.if_iqdrops++;
1387 m_freem(m);
1388 } else {
1389 /*
1390 * strip off the HDLC header and hand off to IP stack
1391 */
1392 m->m_pkthdr.len -= HDLC_HDRLEN;
1393 m->m_data += HDLC_HDRLEN;
1394 m->m_len -= HDLC_HDRLEN;
1395 IF_ENQUEUE(&ipintrq, m);
1396 schednetisr(NETISR_IP);
1397 }
1398
1399 break;
1400
1401 case CISCO_KEEPALIVE:
1402 SCA_DPRINTF(SCA_DEBUG_CISCO,
1403 ("Received CISCO keepalive packet\n"));
1404
1405 if (len < CISCO_PKT_LEN) {
1406 SCA_DPRINTF(SCA_DEBUG_CISCO,
1407 ("short CISCO packet %d, wanted %d\n",
1408 len, CISCO_PKT_LEN));
1409 return;
1410 }
1411
1412 /*
1413 * allocate an mbuf and copy the important bits of data
1414 * into it.
1415 */
1416 m = sca_mbuf_alloc(p, HDLC_HDRLEN + CISCO_PKT_LEN);
1417 if (m == NULL)
1418 return;
1419
1420 nbuf = mtod(m, u_int8_t *);
1421 ncisco = (cisco_pkt_t *)(nbuf + HDLC_HDRLEN);
1422 m->m_pkthdr.rcvif = &scp->sp_if;
1423
1424 cisco = (cisco_pkt_t *)(p + HDLC_HDRLEN);
1425
1426 switch (ntohl(cisco->type)) {
1427 case CISCO_ADDR_REQ:
1428 printf("Got CISCO addr_req, ignoring\n");
1429 m_freem(m);
1430 break;
1431
1432 case CISCO_ADDR_REPLY:
1433 printf("Got CISCO addr_reply, ignoring\n");
1434 m_freem(m);
1435 break;
1436
1437 case CISCO_KEEPALIVE_REQ:
1438 SCA_DPRINTF(SCA_DEBUG_CISCO,
1439 ("Received KA, mseq %d,"
1440 " yseq %d, rel 0x%04x, t0"
1441 " %04x, t1 %04x\n",
1442 ntohl(cisco->par1), ntohl(cisco->par2),
1443 ntohs(cisco->rel), ntohs(cisco->time0),
1444 ntohs(cisco->time1)));
1445
1446 scp->cka_lastrx = ntohl(cisco->par1);
1447 scp->cka_lasttx++;
1448
1449 /*
1450 * schedule the transmit right here.
1451 */
1452 ncisco->par2 = cisco->par1;
1453 ncisco->par1 = htonl(scp->cka_lasttx);
1454 ncisco->time0 = htons((u_int16_t)(t >> 16));
1455 ncisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1456
1457 ifq = &scp->linkq;
1458 if (IF_QFULL(ifq)) {
1459 IF_DROP(ifq);
1460 m_freem(m);
1461 return;
1462 }
1463 IF_ENQUEUE(ifq, m);
1464
1465 sca_start(&scp->sp_if);
1466
1467 break;
1468
1469 default:
1470 m_freem(m);
1471 SCA_DPRINTF(SCA_DEBUG_CISCO,
1472 ("Unknown CISCO keepalive protocol 0x%04x\n",
1473 ntohl(cisco->type)));
1474 return;
1475 }
1476
1477 break;
1478
1479 default:
1480 SCA_DPRINTF(SCA_DEBUG_RX,
1481 ("Unknown/unexpected ethertype 0x%04x\n",
1482 ntohs(hdlc->protocol)));
1483 }
1484 }
1485
1486 #if SCA_DEBUG_LEVEL > 0
1487 /*
1488 * do a hex dump of the packet received into descriptor "desc" with
1489 * data buffer "p"
1490 */
1491 static void
1492 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1493 {
1494 int i;
1495 int nothing_yet = 1;
1496
1497 printf("descriptor va %p: cp 0x%x bpb 0x%0x bp 0x%0x stat 0x%0x len %d\n",
1498 desc, desc->cp, desc->bpb, desc->bp, desc->stat, desc->len);
1499
1500 for (i = 0 ; i < desc->len ; i++) {
1501 if (nothing_yet == 1 && *p == 0) {
1502 p++;
1503 continue;
1504 }
1505 nothing_yet = 0;
1506 if (i % 16 == 0)
1507 printf("\n");
1508 printf("%02x ", *p++);
1509 }
1510
1511 if (i % 16 != 1)
1512 printf("\n");
1513 }
1514 #endif
1515
1516 /*
1517 * skip all frames before the descriptor index "indx" -- we do this by
1518 * moving the rxstart pointer to the index following this one, and
1519 * setting the end descriptor to this index.
1520 */
1521 static void
1522 sca_frame_skip(sca_port_t *scp, int indx)
1523 {
1524 u_int32_t desc_p;
1525
1526 scp->rxstart++;
1527 if (scp->rxstart == SCA_NrxBUFS)
1528 scp->rxstart = 0;
1529
1530 desc_p = scp->rxdesc_p * sizeof(sca_desc_t) * indx;
1531 dmac_write_2(scp, SCA_EDAL0,
1532 (u_int16_t)(desc_p & 0x0000ffff));
1533 }
1534
1535 /*
1536 * set a port to the "up" state
1537 */
1538 static void
1539 sca_port_up(sca_port_t *scp)
1540 {
1541 struct sca_softc *sc = scp->sca;
1542
1543 /*
1544 * reset things
1545 */
1546 #if 0
1547 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1548 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1549 #endif
1550 /*
1551 * clear in-use flag
1552 */
1553 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1554
1555 /*
1556 * raise DTR
1557 */
1558 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 1);
1559
1560 /*
1561 * raise RTS
1562 */
1563 msci_write_1(scp, SCA_CTL0,
1564 msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS);
1565
1566 /*
1567 * enable interrupts
1568 */
1569 if (scp->sp_port == 0) {
1570 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1571 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1572 } else {
1573 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1574 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1575 }
1576
1577 /*
1578 * enable transmit and receive
1579 */
1580 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1581 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1582
1583 /*
1584 * reset internal state
1585 */
1586 scp->txinuse = 0;
1587 scp->txcur = 0;
1588 scp->cka_lasttx = time.tv_usec;
1589 scp->cka_lastrx = 0;
1590 }
1591
1592 /*
1593 * set a port to the "down" state
1594 */
1595 static void
1596 sca_port_down(sca_port_t *scp)
1597 {
1598 struct sca_softc *sc = scp->sca;
1599
1600 /*
1601 * lower DTR
1602 */
1603 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 0);
1604
1605 /*
1606 * lower RTS
1607 */
1608 msci_write_1(scp, SCA_CTL0,
1609 msci_read_1(scp, SCA_CTL0) | SCA_CTL_RTS);
1610
1611 /*
1612 * disable interrupts
1613 */
1614 if (scp->sp_port == 0) {
1615 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1616 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1617 } else {
1618 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1619 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1620 }
1621
1622 /*
1623 * disable transmit and receive
1624 */
1625 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1626 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1627
1628 /*
1629 * no, we're not in use anymore
1630 */
1631 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1632 }
1633
1634 /*
1635 * disable all DMA and interrupts for all ports at once.
1636 */
1637 void
1638 sca_shutdown(struct sca_softc *sca)
1639 {
1640 /*
1641 * disable DMA and interrupts
1642 */
1643 sca_write_1(sca, SCA_DMER, 0);
1644 sca_write_1(sca, SCA_IER0, 0);
1645 sca_write_1(sca, SCA_IER1, 0);
1646 }
1647
1648 /*
1649 * If there are packets to transmit, start the transmit DMA logic.
1650 */
1651 static void
1652 sca_port_starttx(sca_port_t *scp)
1653 {
1654 struct sca_softc *sc;
1655 u_int32_t startdesc_p, enddesc_p;
1656 int enddesc;
1657
1658 sc = scp->sca;
1659
1660 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1661 || scp->txinuse == 0)
1662 return;
1663 scp->sp_if.if_flags |= IFF_OACTIVE;
1664
1665 /*
1666 * We have something to do, since we have at least one packet
1667 * waiting, and we are not already marked as active.
1668 */
1669 enddesc = scp->txcur;
1670 enddesc++;
1671 if (enddesc == SCA_NtxBUFS)
1672 enddesc = 0;
1673
1674 startdesc_p = scp->txdesc_p;
1675 enddesc_p = scp->txdesc_p + sizeof(sca_desc_t) * enddesc;
1676
1677 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1678 dmac_write_2(scp, SCA_CDAL1,
1679 (u_int16_t)(startdesc_p & 0x0000ffff));
1680
1681 /*
1682 * enable the DMA
1683 */
1684 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1685 }
1686
1687 /*
1688 * allocate an mbuf at least long enough to hold "len" bytes.
1689 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1690 * otherwise let the caller handle copying the data in.
1691 */
1692 static struct mbuf *
1693 sca_mbuf_alloc(caddr_t p, u_int len)
1694 {
1695 struct mbuf *m;
1696
1697 /*
1698 * allocate an mbuf and copy the important bits of data
1699 * into it. If the packet won't fit in the header,
1700 * allocate a cluster for it and store it there.
1701 */
1702 MGETHDR(m, M_DONTWAIT, MT_DATA);
1703 if (m == NULL)
1704 return NULL;
1705 if (len > MHLEN) {
1706 if (len > MCLBYTES) {
1707 m_freem(m);
1708 return NULL;
1709 }
1710 MCLGET(m, M_DONTWAIT);
1711 if ((m->m_flags & M_EXT) == 0) {
1712 m_freem(m);
1713 return NULL;
1714 }
1715 }
1716 if (p != NULL)
1717 bcopy(p, mtod(m, caddr_t), len);
1718 m->m_len = len;
1719 m->m_pkthdr.len = len;
1720
1721 return (m);
1722 }
1723