hd64570.c revision 1.6 1 /* $NetBSD: hd64570.c,v 1.6 1999/03/19 22:43:11 erh Exp $ */
2
3 /*
4 * Copyright (c) 1998 Vixie Enterprises
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Vixie Enterprises nor the names
17 * of its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
21 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
22 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * This software has been written for Vixie Enterprises by Michael Graff
35 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
36 * ``http://www.vix.com''.
37 */
38
39 /*
40 * TODO:
41 *
42 * o teach the receive logic about errors, and about long frames that
43 * span more than one input buffer. (Right now, receive/transmit is
44 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
45 * This is currently 1504, which is large enough to hold the HDLC
46 * header and the packet itself. Packets which are too long are
47 * silently dropped on transmit and silently dropped on receive.
48 * o write code to handle the msci interrupts, needed only for CD
49 * and CTS changes.
50 * o consider switching back to a "queue tx with DMA active" model which
51 * should help sustain outgoing traffic
52 * o through clever use of bus_dma*() functions, it should be possible
53 * to map the mbuf's data area directly into a descriptor transmit
54 * buffer, removing the need to allocate extra memory. If, however,
55 * we run out of descriptors for this, we will need to then allocate
56 * one large mbuf, copy the fragmented chain into it, and put it onto
57 * a single descriptor.
58 * o use bus_dmamap_sync() with the right offset and lengths, rather
59 * than cheating and always sync'ing the whole region.
60 */
61
62 #include "bpfilter.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/mbuf.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/kernel.h>
71
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/netisr.h>
75
76 #include <netinet/in.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/in_var.h>
79 #include <netinet/ip.h>
80
81 #if NBPFILTER > 0
82 #include <net/bpf.h>
83 #endif
84
85 #include <machine/cpu.h>
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcireg.h>
91 #include <dev/pci/pcidevs.h>
92
93 #include <dev/ic/hd64570reg.h>
94 #include <dev/ic/hd64570var.h>
95
96 #define SCA_DEBUG_RX 0x0001
97 #define SCA_DEBUG_TX 0x0002
98 #define SCA_DEBUG_CISCO 0x0004
99 #define SCA_DEBUG_DMA 0x0008
100 #define SCA_DEBUG_RXPKT 0x0010
101 #define SCA_DEBUG_TXPKT 0x0020
102 #define SCA_DEBUG_INTR 0x0040
103
104 #if 0
105 #define SCA_DEBUG_LEVEL ( SCA_DEBUG_TX )
106 #else
107 #define SCA_DEBUG_LEVEL 0
108 #endif
109
110 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
111
112 #if SCA_DEBUG_LEVEL > 0
113 #define SCA_DPRINTF(l, x) do { \
114 if ((l) & sca_debug) \
115 printf x;\
116 } while (0)
117 #else
118 #define SCA_DPRINTF(l, x)
119 #endif
120
121 #define SCA_MTU 1500 /* hard coded */
122
123 /*
124 * buffers per tx and rx channels, per port, and the size of each.
125 * Don't use these constants directly, as they are really only hints.
126 * Use the calculated values stored in struct sca_softc instead.
127 *
128 * Each must be at least 2, receive would be better at around 20 or so.
129 *
130 * XXX Due to a damned near impossible to track down bug, transmit buffers
131 * MUST be 2, no more, no less.
132 */
133 #ifndef SCA_NtxBUFS
134 #define SCA_NtxBUFS 2
135 #endif
136 #ifndef SCA_NrxBUFS
137 #define SCA_NrxBUFS 20
138 #endif
139 #ifndef SCA_BSIZE
140 #define SCA_BSIZE (SCA_MTU + 4) /* room for HDLC as well */
141 #endif
142
143 #if 0
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
145 #endif
146
147 static inline void sca_write_1(struct sca_softc *, u_int, u_int8_t);
148 static inline void sca_write_2(struct sca_softc *, u_int, u_int16_t);
149 static inline u_int8_t sca_read_1(struct sca_softc *, u_int);
150 static inline u_int16_t sca_read_2(struct sca_softc *, u_int);
151
152 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
153 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
154
155 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
156 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
157 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
158 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
159
160 static int sca_alloc_dma(struct sca_softc *);
161 static void sca_setup_dma_memory(struct sca_softc *);
162 static void sca_msci_init(struct sca_softc *, sca_port_t *);
163 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
164 static void sca_dmac_rxinit(sca_port_t *);
165
166 static int sca_dmac_intr(sca_port_t *, u_int8_t);
167 static int sca_msci_intr(struct sca_softc *, u_int8_t);
168
169 static void sca_get_packets(sca_port_t *);
170 static void sca_frame_process(sca_port_t *, sca_desc_t *, u_int8_t *);
171 static int sca_frame_avail(sca_port_t *, int *);
172 static void sca_frame_skip(sca_port_t *, int);
173
174 static void sca_port_starttx(sca_port_t *);
175
176 static void sca_port_up(sca_port_t *);
177 static void sca_port_down(sca_port_t *);
178
179 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
180 struct rtentry *));
181 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
182 static void sca_start __P((struct ifnet *));
183 static void sca_watchdog __P((struct ifnet *));
184
185 static struct mbuf *sca_mbuf_alloc(caddr_t, u_int);
186
187 #if SCA_DEBUG_LEVEL > 0
188 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
189 #endif
190
191 static inline void
192 sca_write_1(struct sca_softc *sc, u_int reg, u_int8_t val)
193 {
194 bus_space_write_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
195 }
196
197 static inline void
198 sca_write_2(struct sca_softc *sc, u_int reg, u_int16_t val)
199 {
200 bus_space_write_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
201 }
202
203 static inline u_int8_t
204 sca_read_1(struct sca_softc *sc, u_int reg)
205 {
206 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
207 }
208
209 static inline u_int16_t
210 sca_read_2(struct sca_softc *sc, u_int reg)
211 {
212 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
213 }
214
215 static inline void
216 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
217 {
218 sca_write_1(scp->sca, scp->msci_off + reg, val);
219 }
220
221 static inline u_int8_t
222 msci_read_1(sca_port_t *scp, u_int reg)
223 {
224 return sca_read_1(scp->sca, scp->msci_off + reg);
225 }
226
227 static inline void
228 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
229 {
230 sca_write_1(scp->sca, scp->dmac_off + reg, val);
231 }
232
233 static inline void
234 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
235 {
236 sca_write_2(scp->sca, scp->dmac_off + reg, val);
237 }
238
239 static inline u_int8_t
240 dmac_read_1(sca_port_t *scp, u_int reg)
241 {
242 return sca_read_1(scp->sca, scp->dmac_off + reg);
243 }
244
245 static inline u_int16_t
246 dmac_read_2(sca_port_t *scp, u_int reg)
247 {
248 return sca_read_2(scp->sca, scp->dmac_off + reg);
249 }
250
251 int
252 sca_init(struct sca_softc *sc, u_int nports)
253 {
254 /*
255 * Do a little sanity check: check number of ports.
256 */
257 if (nports < 1 || nports > 2)
258 return 1;
259
260 /*
261 * remember the details
262 */
263 sc->sc_numports = nports;
264
265 /*
266 * allocate the memory and chop it into bits.
267 */
268 if (sca_alloc_dma(sc) != 0)
269 return 1;
270 sca_setup_dma_memory(sc);
271
272 /*
273 * disable DMA and MSCI interrupts
274 */
275 sca_write_1(sc, SCA_DMER, 0);
276 sca_write_1(sc, SCA_IER0, 0);
277 sca_write_1(sc, SCA_IER1, 0);
278 sca_write_1(sc, SCA_IER2, 0);
279
280 /*
281 * configure interrupt system
282 */
283 sca_write_1(sc, SCA_ITCR, 0); /* use ivr, no int ack */
284 sca_write_1(sc, SCA_IVR, 0x40);
285 sca_write_1(sc, SCA_IMVR, 0x40);
286
287 /*
288 * set wait control register to zero wait states
289 */
290 sca_write_1(sc, SCA_PABR0, 0);
291 sca_write_1(sc, SCA_PABR1, 0);
292 sca_write_1(sc, SCA_WCRL, 0);
293 sca_write_1(sc, SCA_WCRM, 0);
294 sca_write_1(sc, SCA_WCRH, 0);
295
296 /*
297 * disable DMA and reset status
298 */
299 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
300
301 /*
302 * disable transmit DMA for all channels
303 */
304 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
305 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
306 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
307 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
308 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
309 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
310 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
311 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
312
313 /*
314 * enable DMA based on channel enable flags for each channel
315 */
316 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
317
318 /*
319 * Should check to see if the chip is responding, but for now
320 * assume it is.
321 */
322 return 0;
323 }
324
325 /*
326 * initialize the port and attach it to the networking layer
327 */
328 void
329 sca_port_attach(struct sca_softc *sc, u_int port)
330 {
331 sca_port_t *scp = &sc->sc_ports[port];
332 struct ifnet *ifp;
333 static u_int ntwo_unit = 0;
334
335 scp->sca = sc; /* point back to the parent */
336
337 scp->sp_port = port;
338
339 if (port == 0) {
340 scp->msci_off = SCA_MSCI_OFF_0;
341 scp->dmac_off = SCA_DMAC_OFF_0;
342 if(sc->parent != NULL)
343 ntwo_unit=sc->parent->dv_unit * 2 + 0;
344 else
345 ntwo_unit = 0; /* XXX */
346 } else {
347 scp->msci_off = SCA_MSCI_OFF_1;
348 scp->dmac_off = SCA_DMAC_OFF_1;
349 if(sc->parent != NULL)
350 ntwo_unit=sc->parent->dv_unit * 2 + 1;
351 else
352 ntwo_unit = 1; /* XXX */
353 }
354
355 sca_msci_init(sc, scp);
356 sca_dmac_init(sc, scp);
357
358 /*
359 * attach to the network layer
360 */
361 ifp = &scp->sp_if;
362 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
363 ifp->if_softc = scp;
364 ifp->if_mtu = SCA_MTU;
365 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
366 ifp->if_type = IFT_OTHER; /* Should be HDLC, but... */
367 ifp->if_hdrlen = HDLC_HDRLEN;
368 ifp->if_ioctl = sca_ioctl;
369 ifp->if_output = sca_output;
370 ifp->if_watchdog = sca_watchdog;
371 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
372 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
373 #ifdef SCA_USE_FASTQ
374 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
375 #endif
376 if_attach(ifp);
377
378 #if NBPFILTER > 0
379 bpfattach(&scp->sp_bpf, ifp, DLT_HDLC, HDLC_HDRLEN);
380 #endif
381
382 if (sc->parent == NULL)
383 printf("%s: port %d\n", ifp->if_xname, port);
384 else
385 printf("%s at %s port %d\n",
386 ifp->if_xname, sc->parent->dv_xname, port);
387
388 /*
389 * reset the last seen times on the cisco keepalive protocol
390 */
391 scp->cka_lasttx = time.tv_usec;
392 scp->cka_lastrx = 0;
393 }
394
395 /*
396 * initialize the port's MSCI
397 */
398 static void
399 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
400 {
401 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
402 msci_write_1(scp, SCA_MD00,
403 ( SCA_MD0_CRC_1
404 | SCA_MD0_CRC_CCITT
405 | SCA_MD0_CRC_ENABLE
406 | SCA_MD0_MODE_HDLC));
407 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
408 msci_write_1(scp, SCA_MD20,
409 (SCA_MD2_DUPLEX | SCA_MD2_NRZ));
410
411 /*
412 * reset the port (and lower RTS)
413 */
414 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
415 msci_write_1(scp, SCA_CTL0,
416 (SCA_CTL_IDLPAT | SCA_CTL_UDRNC | SCA_CTL_RTS));
417 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
418
419 /*
420 * select the RX clock as the TX clock, and set for external
421 * clock source.
422 */
423 msci_write_1(scp, SCA_RXS0, 0);
424 msci_write_1(scp, SCA_TXS0, 0);
425
426 /*
427 * XXX don't pay attention to CTS or CD changes right now. I can't
428 * simulate one, and the transmitter will try to transmit even if
429 * CD isn't there anyway, so nothing bad SHOULD happen.
430 */
431 msci_write_1(scp, SCA_IE00, 0);
432 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
433 msci_write_1(scp, SCA_IE20, 0);
434 msci_write_1(scp, SCA_FIE0, 0);
435
436 msci_write_1(scp, SCA_SA00, 0);
437 msci_write_1(scp, SCA_SA10, 0);
438
439 msci_write_1(scp, SCA_IDL0, 0x7e);
440
441 msci_write_1(scp, SCA_RRC0, 0x0e);
442 msci_write_1(scp, SCA_TRC00, 0x10);
443 msci_write_1(scp, SCA_TRC10, 0x1f);
444 }
445
446 /*
447 * Take the memory for the port and construct two circular linked lists of
448 * descriptors (one tx, one rx) and set the pointers in these descriptors
449 * to point to the buffer space for this port.
450 */
451 static void
452 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
453 {
454 sca_desc_t *desc;
455 u_int32_t desc_p;
456 u_int32_t buf_p;
457 int i;
458
459 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
460 0, sc->sc_allocsize, BUS_DMASYNC_PREWRITE);
461
462 desc = scp->txdesc;
463 desc_p = scp->txdesc_p;
464 buf_p = scp->txbuf_p;
465 scp->txcur = 0;
466 scp->txinuse = 0;
467
468 for (i = 0 ; i < SCA_NtxBUFS ; i++) {
469 /*
470 * desc_p points to the physcial address of the NEXT desc
471 */
472 desc_p += sizeof(sca_desc_t);
473
474 desc->cp = desc_p & 0x0000ffff;
475 desc->bp = buf_p & 0x0000ffff;
476 desc->bpb = (buf_p & 0x00ff0000) >> 16;
477 desc->len = SCA_BSIZE;
478 desc->stat = 0;
479
480 desc++; /* point to the next descriptor */
481 buf_p += SCA_BSIZE;
482 }
483
484 /*
485 * "heal" the circular list by making the last entry point to the
486 * first.
487 */
488 desc--;
489 desc->cp = scp->txdesc_p & 0x0000ffff;
490
491 /*
492 * Now, initialize the transmit DMA logic
493 *
494 * CPB == chain pointer base address
495 */
496 dmac_write_1(scp, SCA_DSR1, 0);
497 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
498 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
499 dmac_write_1(scp, SCA_DIR1,
500 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
501 dmac_write_1(scp, SCA_CPB1,
502 (u_int8_t)((scp->txdesc_p & 0x00ff0000) >> 16));
503
504 /*
505 * now, do the same thing for receive descriptors
506 */
507 desc = scp->rxdesc;
508 desc_p = scp->rxdesc_p;
509 buf_p = scp->rxbuf_p;
510 scp->rxstart = 0;
511 scp->rxend = SCA_NrxBUFS - 1;
512
513 for (i = 0 ; i < SCA_NrxBUFS ; i++) {
514 /*
515 * desc_p points to the physcial address of the NEXT desc
516 */
517 desc_p += sizeof(sca_desc_t);
518
519 desc->cp = desc_p & 0x0000ffff;
520 desc->bp = buf_p & 0x0000ffff;
521 desc->bpb = (buf_p & 0x00ff0000) >> 16;
522 desc->len = SCA_BSIZE;
523 desc->stat = 0x00;
524
525 desc++; /* point to the next descriptor */
526 buf_p += SCA_BSIZE;
527 }
528
529 /*
530 * "heal" the circular list by making the last entry point to the
531 * first.
532 */
533 desc--;
534 desc->cp = scp->rxdesc_p & 0x0000ffff;
535
536 sca_dmac_rxinit(scp);
537
538 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
539 0, sc->sc_allocsize, BUS_DMASYNC_POSTWRITE);
540 }
541
542 /*
543 * reset and reinitialize the receive DMA logic
544 */
545 static void
546 sca_dmac_rxinit(sca_port_t *scp)
547 {
548 /*
549 * ... and the receive DMA logic ...
550 */
551 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
552 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
553
554 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
555 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
556
557 /*
558 * CPB == chain pointer base
559 * CDA == current descriptor address
560 * EDA == error descriptor address (overwrite position)
561 */
562 dmac_write_1(scp, SCA_CPB0,
563 (u_int8_t)((scp->rxdesc_p & 0x00ff0000) >> 16));
564 dmac_write_2(scp, SCA_CDAL0,
565 (u_int16_t)(scp->rxdesc_p & 0xffff));
566 dmac_write_2(scp, SCA_EDAL0,
567 (u_int16_t)(scp->rxdesc_p
568 + sizeof(sca_desc_t) * SCA_NrxBUFS));
569
570 /*
571 * enable receiver DMA
572 */
573 dmac_write_1(scp, SCA_DIR0,
574 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
575 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
576 }
577
578 static int
579 sca_alloc_dma(struct sca_softc *sc)
580 {
581 u_int allocsize;
582 int err;
583 int rsegs;
584 u_int bpp;
585
586 SCA_DPRINTF(SCA_DEBUG_DMA,
587 ("sizeof sca_desc_t: %d bytes\n", sizeof (sca_desc_t)));
588
589 bpp = sc->sc_numports * (SCA_NtxBUFS + SCA_NrxBUFS);
590
591 allocsize = bpp * (SCA_BSIZE + sizeof (sca_desc_t));
592
593 /*
594 * sanity checks:
595 *
596 * Check the total size of the data buffers, and so on. The total
597 * DMAable space needs to fit within a single 16M region, and the
598 * descriptors need to fit within a 64K region.
599 */
600 if (allocsize > 16 * 1024 * 1024)
601 return 1;
602 if (bpp * sizeof (sca_desc_t) > 64 * 1024)
603 return 1;
604
605 sc->sc_allocsize = allocsize;
606
607 /*
608 * Allocate one huge chunk of memory.
609 */
610 if (bus_dmamem_alloc(sc->sc_dmat,
611 allocsize,
612 SCA_DMA_ALIGNMENT,
613 SCA_DMA_BOUNDRY,
614 &sc->sc_seg, 1, &rsegs, BUS_DMA_NOWAIT) != 0) {
615 printf("Could not allocate DMA memory\n");
616 return 1;
617 }
618 SCA_DPRINTF(SCA_DEBUG_DMA,
619 ("DMA memory allocated: %d bytes\n", allocsize));
620
621 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, 1, allocsize,
622 &sc->sc_dma_addr, BUS_DMA_NOWAIT) != 0) {
623 printf("Could not map DMA memory into kernel space\n");
624 return 1;
625 }
626 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA memory mapped\n"));
627
628 if (bus_dmamap_create(sc->sc_dmat, allocsize, 2,
629 allocsize, SCA_DMA_BOUNDRY,
630 BUS_DMA_NOWAIT, &sc->sc_dmam) != 0) {
631 printf("Could not create DMA map\n");
632 return 1;
633 }
634 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map created\n"));
635
636 err = bus_dmamap_load(sc->sc_dmat, sc->sc_dmam, sc->sc_dma_addr,
637 allocsize, NULL, BUS_DMA_NOWAIT);
638 if (err != 0) {
639 printf("Could not load DMA segment: %d\n", err);
640 return 1;
641 }
642 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map loaded\n"));
643
644 return 0;
645 }
646
647 /*
648 * Take the memory allocated with sca_alloc_dma() and divide it among the
649 * two ports.
650 */
651 static void
652 sca_setup_dma_memory(struct sca_softc *sc)
653 {
654 sca_port_t *scp0, *scp1;
655 u_int8_t *vaddr0;
656 u_int32_t paddr0;
657 u_long addroff;
658
659 /*
660 * remember the physical address to 24 bits only, since the upper
661 * 8 bits is programed into the device at a different layer.
662 */
663 paddr0 = (sc->sc_dmam->dm_segs[0].ds_addr & 0x00ffffff);
664 vaddr0 = sc->sc_dma_addr;
665
666 /*
667 * if we have only one port it gets the full range. If we have
668 * two we need to do a little magic to divide things up.
669 *
670 * The descriptors will all end up in the front of the area, while
671 * the remainder of the buffer is used for transmit and receive
672 * data.
673 *
674 * -------------------- start of memory
675 * tx desc port 0
676 * rx desc port 0
677 * tx desc port 1
678 * rx desc port 1
679 * tx buffer port 0
680 * rx buffer port 0
681 * tx buffer port 1
682 * rx buffer port 1
683 * -------------------- end of memory
684 */
685 scp0 = &sc->sc_ports[0];
686 scp1 = &sc->sc_ports[1];
687
688 scp0->txdesc_p = paddr0;
689 scp0->txdesc = (sca_desc_t *)vaddr0;
690 addroff = sizeof(sca_desc_t) * SCA_NtxBUFS;
691
692 /*
693 * point to the range following the tx descriptors, and
694 * set the rx descriptors there.
695 */
696 scp0->rxdesc_p = paddr0 + addroff;
697 scp0->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
698 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
699
700 if (sc->sc_numports == 2) {
701 scp1->txdesc_p = paddr0 + addroff;
702 scp1->txdesc = (sca_desc_t *)(vaddr0 + addroff);
703 addroff += sizeof(sca_desc_t) * SCA_NtxBUFS;
704
705 scp1->rxdesc_p = paddr0 + addroff;
706 scp1->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
707 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
708 }
709
710 /*
711 * point to the memory following the descriptors, and set the
712 * transmit buffer there.
713 */
714 scp0->txbuf_p = paddr0 + addroff;
715 scp0->txbuf = vaddr0 + addroff;
716 addroff += SCA_BSIZE * SCA_NtxBUFS;
717
718 /*
719 * lastly, skip over the transmit buffer and set up pointers into
720 * the receive buffer.
721 */
722 scp0->rxbuf_p = paddr0 + addroff;
723 scp0->rxbuf = vaddr0 + addroff;
724 addroff += SCA_BSIZE * SCA_NrxBUFS;
725
726 if (sc->sc_numports == 2) {
727 scp1->txbuf_p = paddr0 + addroff;
728 scp1->txbuf = vaddr0 + addroff;
729 addroff += SCA_BSIZE * SCA_NtxBUFS;
730
731 scp1->rxbuf_p = paddr0 + addroff;
732 scp1->rxbuf = vaddr0 + addroff;
733 addroff += SCA_BSIZE * SCA_NrxBUFS;
734 }
735
736 /*
737 * as a consistancy check, addroff should be equal to the allocation
738 * size.
739 */
740 if (sc->sc_allocsize != addroff)
741 printf("ERROR: sc_allocsize != addroff: %lu != %lu\n",
742 sc->sc_allocsize, addroff);
743 }
744
745 /*
746 * Queue the packet for our start routine to transmit
747 */
748 static int
749 sca_output(ifp, m, dst, rt0)
750 struct ifnet *ifp;
751 struct mbuf *m;
752 struct sockaddr *dst;
753 struct rtentry *rt0;
754 {
755 int error;
756 int s;
757 u_int16_t protocol;
758 hdlc_header_t *hdlc;
759 struct ifqueue *ifq;
760 #ifdef SCA_USE_FASTQ
761 struct ip *ip;
762 sca_port_t *scp = ifp->if_softc;
763 int highpri;
764 #endif
765
766 error = 0;
767 ifp->if_lastchange = time;
768
769 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
770 error = ENETDOWN;
771 goto bad;
772 }
773
774 #ifdef SCA_USE_FASTQ
775 highpri = 0;
776 #endif
777
778 /*
779 * determine address family, and priority for this packet
780 */
781 switch (dst->sa_family) {
782 case AF_INET:
783 protocol = HDLC_PROTOCOL_IP;
784
785 #ifdef SCA_USE_FASTQ
786 ip = mtod(m, struct ip *);
787 if ((ip->ip_tos & IPTOS_LOWDELAY) == IPTOS_LOWDELAY)
788 highpri = 1;
789 #endif
790 break;
791
792 default:
793 printf("%s: address family %d unsupported\n",
794 ifp->if_xname, dst->sa_family);
795 error = EAFNOSUPPORT;
796 goto bad;
797 }
798
799 if (M_LEADINGSPACE(m) < HDLC_HDRLEN) {
800 m = m_prepend(m, HDLC_HDRLEN, M_DONTWAIT);
801 if (m == NULL) {
802 error = ENOBUFS;
803 goto bad;
804 }
805 m->m_len = 0;
806 } else {
807 m->m_data -= HDLC_HDRLEN;
808 }
809
810 hdlc = mtod(m, hdlc_header_t *);
811 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
812 hdlc->addr = CISCO_MULTICAST;
813 else
814 hdlc->addr = CISCO_UNICAST;
815 hdlc->control = 0;
816 hdlc->protocol = htons(protocol);
817 m->m_len += HDLC_HDRLEN;
818
819 /*
820 * queue the packet. If interactive, use the fast queue.
821 */
822 s = splnet();
823 #ifdef SCA_USE_FASTQ
824 ifq = (highpri == 1 ? &scp->fastq : &ifp->if_snd);
825 #else
826 ifq = &ifp->if_snd;
827 #endif
828 if (IF_QFULL(ifq)) {
829 IF_DROP(ifq);
830 ifp->if_oerrors++;
831 ifp->if_collisions++;
832 error = ENOBUFS;
833 splx(s);
834 goto bad;
835 }
836 ifp->if_obytes += m->m_pkthdr.len;
837 IF_ENQUEUE(ifq, m);
838
839 ifp->if_lastchange = time;
840
841 if (m->m_flags & M_MCAST)
842 ifp->if_omcasts++;
843
844 sca_start(ifp);
845 splx(s);
846
847 return (error);
848
849 bad:
850 if (m)
851 m_freem(m);
852 return (error);
853 }
854
855 static int
856 sca_ioctl(ifp, cmd, addr)
857 struct ifnet *ifp;
858 u_long cmd;
859 caddr_t addr;
860 {
861 struct ifreq *ifr;
862 struct ifaddr *ifa;
863 int error;
864 int s;
865
866 s = splnet();
867
868 ifr = (struct ifreq *)addr;
869 ifa = (struct ifaddr *)addr;
870 error = 0;
871
872 switch (cmd) {
873 case SIOCSIFADDR:
874 if (ifa->ifa_addr->sa_family == AF_INET)
875 sca_port_up(ifp->if_softc);
876 else
877 error = EAFNOSUPPORT;
878 break;
879
880 case SIOCSIFDSTADDR:
881 if (ifa->ifa_addr->sa_family != AF_INET)
882 error = EAFNOSUPPORT;
883 break;
884
885 case SIOCADDMULTI:
886 case SIOCDELMULTI:
887 if (ifr == 0) {
888 error = EAFNOSUPPORT; /* XXX */
889 break;
890 }
891 switch (ifr->ifr_addr.sa_family) {
892
893 #ifdef INET
894 case AF_INET:
895 break;
896 #endif
897
898 default:
899 error = EAFNOSUPPORT;
900 break;
901 }
902 break;
903
904 case SIOCSIFFLAGS:
905 if (ifr->ifr_flags & IFF_UP)
906 sca_port_up(ifp->if_softc);
907 else
908 sca_port_down(ifp->if_softc);
909
910 break;
911
912 default:
913 error = EINVAL;
914 }
915
916 splx(s);
917 return error;
918 }
919
920 /*
921 * start packet transmission on the interface
922 *
923 * MUST BE CALLED AT splnet()
924 */
925 static void
926 sca_start(ifp)
927 struct ifnet *ifp;
928 {
929 sca_port_t *scp = ifp->if_softc;
930 struct sca_softc *sc = scp->sca;
931 struct mbuf *m, *mb_head;
932 sca_desc_t *desc;
933 u_int8_t *buf;
934 u_int32_t buf_p;
935 int nexttx;
936 int trigger_xmit;
937
938 /*
939 * can't queue when we are full or transmitter is busy
940 */
941 if ((scp->txinuse >= (SCA_NtxBUFS - 1))
942 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
943 return;
944
945 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
946 0, sc->sc_allocsize,
947 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
948
949 trigger_xmit = 0;
950
951 txloop:
952 IF_DEQUEUE(&scp->linkq, mb_head);
953 if (mb_head == NULL)
954 #ifdef SCA_USE_FASTQ
955 IF_DEQUEUE(&scp->fastq, mb_head);
956 if (mb_head == NULL)
957 #endif
958 IF_DEQUEUE(&ifp->if_snd, mb_head);
959 if (mb_head == NULL)
960 goto start_xmit;
961
962 if (scp->txinuse != 0) {
963 /* Kill EOT interrupts on the previous descriptor. */
964 desc = &scp->txdesc[scp->txcur];
965 desc->stat &= ~SCA_DESC_EOT;
966
967 /* Figure out what the next free descriptor is. */
968 if ((scp->txcur + 1) == SCA_NtxBUFS)
969 nexttx = 0;
970 else
971 nexttx = scp->txcur + 1;
972 } else
973 nexttx = 0;
974
975 desc = &scp->txdesc[nexttx];
976 buf = scp->txbuf + SCA_BSIZE * nexttx;
977 buf_p = scp->txbuf_p + SCA_BSIZE * nexttx;
978
979 desc->bp = (u_int16_t)(buf_p & 0x0000ffff);
980 desc->bpb = (u_int8_t)((buf_p & 0x00ff0000) >> 16);
981 desc->stat = SCA_DESC_EOT | SCA_DESC_EOM; /* end of frame and xfer */
982 desc->len = 0;
983
984 /*
985 * Run through the chain, copying data into the descriptor as we
986 * go. If it won't fit in one transmission block, drop the packet.
987 * No, this isn't nice, but most of the time it _will_ fit.
988 */
989 for (m = mb_head ; m != NULL ; m = m->m_next) {
990 if (m->m_len != 0) {
991 desc->len += m->m_len;
992 if (desc->len > SCA_BSIZE) {
993 m_freem(mb_head);
994 goto txloop;
995 }
996 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
997 buf += m->m_len;
998 }
999 }
1000
1001 ifp->if_opackets++;
1002
1003 #if NBPFILTER > 0
1004 /*
1005 * Pass packet to bpf if there is a listener.
1006 */
1007 if (scp->sp_bpf)
1008 bpf_mtap(scp->sp_bpf, mb_head);
1009 #endif
1010
1011 m_freem(mb_head);
1012
1013 if (scp->txinuse != 0) {
1014 scp->txcur++;
1015 if (scp->txcur == SCA_NtxBUFS)
1016 scp->txcur = 0;
1017 }
1018 scp->txinuse++;
1019 trigger_xmit = 1;
1020
1021 SCA_DPRINTF(SCA_DEBUG_TX,
1022 ("TX: inuse %d index %d\n", scp->txinuse, scp->txcur));
1023
1024 if (scp->txinuse < (SCA_NtxBUFS - 1))
1025 goto txloop;
1026
1027 start_xmit:
1028 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
1029 0, sc->sc_allocsize,
1030 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1031
1032 if (trigger_xmit != 0)
1033 sca_port_starttx(scp);
1034 }
1035
1036 static void
1037 sca_watchdog(ifp)
1038 struct ifnet *ifp;
1039 {
1040 }
1041
1042 int
1043 sca_hardintr(struct sca_softc *sc)
1044 {
1045 u_int8_t isr0, isr1, isr2;
1046 int ret;
1047
1048 ret = 0; /* non-zero means we processed at least one interrupt */
1049
1050 while (1) {
1051 /*
1052 * read SCA interrupts
1053 */
1054 isr0 = sca_read_1(sc, SCA_ISR0);
1055 isr1 = sca_read_1(sc, SCA_ISR1);
1056 isr2 = sca_read_1(sc, SCA_ISR2);
1057
1058 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1059 break;
1060
1061 SCA_DPRINTF(SCA_DEBUG_INTR,
1062 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1063 isr0, isr1, isr2));
1064
1065 /*
1066 * check DMA interrupt
1067 */
1068 if (isr1 & 0x0f)
1069 ret += sca_dmac_intr(&sc->sc_ports[0],
1070 isr1 & 0x0f);
1071 if (isr1 & 0xf0)
1072 ret += sca_dmac_intr(&sc->sc_ports[1],
1073 (isr1 & 0xf0) >> 4);
1074
1075 if (isr0)
1076 ret += sca_msci_intr(sc, isr0);
1077
1078 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1079 if (isr2)
1080 ret += sca_timer_intr(sc, isr2);
1081 #endif
1082 }
1083
1084 return (ret);
1085 }
1086
1087 static int
1088 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1089 {
1090 u_int8_t dsr;
1091 int ret;
1092
1093 ret = 0;
1094
1095 /*
1096 * Check transmit channel
1097 */
1098 if (isr & 0x0c) {
1099 SCA_DPRINTF(SCA_DEBUG_INTR,
1100 ("TX INTERRUPT port %d\n", scp->sp_port));
1101
1102 dsr = 1;
1103 while (dsr != 0) {
1104 ret++;
1105 /*
1106 * reset interrupt
1107 */
1108 dsr = dmac_read_1(scp, SCA_DSR1);
1109 dmac_write_1(scp, SCA_DSR1,
1110 dsr | SCA_DSR_DEWD);
1111
1112 /*
1113 * filter out the bits we don't care about
1114 */
1115 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1116 if (dsr == 0)
1117 break;
1118
1119 /*
1120 * check for counter overflow
1121 */
1122 if (dsr & SCA_DSR_COF) {
1123 printf("%s: TXDMA counter overflow\n",
1124 scp->sp_if.if_xname);
1125
1126 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1127 scp->txcur = 0;
1128 scp->txinuse = 0;
1129 }
1130
1131 /*
1132 * check for buffer overflow
1133 */
1134 if (dsr & SCA_DSR_BOF) {
1135 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1136 scp->sp_if.if_xname,
1137 dmac_read_2(scp, SCA_CDAL1),
1138 dmac_read_2(scp, SCA_EDAL1),
1139 dmac_read_1(scp, SCA_CPB1));
1140
1141 /*
1142 * Yikes. Arrange for a full
1143 * transmitter restart.
1144 */
1145 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1146 scp->txcur = 0;
1147 scp->txinuse = 0;
1148 }
1149
1150 /*
1151 * check for end of transfer, which is not
1152 * an error. It means that all data queued
1153 * was transmitted, and we mark ourself as
1154 * not in use and stop the watchdog timer.
1155 */
1156 if (dsr & SCA_DSR_EOT) {
1157 SCA_DPRINTF(SCA_DEBUG_TX,
1158 ("Transmit completed.\n"));
1159
1160 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1161 scp->txcur = 0;
1162 scp->txinuse = 0;
1163
1164 /*
1165 * check for more packets
1166 */
1167 sca_start(&scp->sp_if);
1168 }
1169 }
1170 }
1171 /*
1172 * receive channel check
1173 */
1174 if (isr & 0x03) {
1175 SCA_DPRINTF(SCA_DEBUG_INTR,
1176 ("RX INTERRUPT port %d\n", mch));
1177
1178 dsr = 1;
1179 while (dsr != 0) {
1180 ret++;
1181
1182 dsr = dmac_read_1(scp, SCA_DSR0);
1183 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1184
1185 /*
1186 * filter out the bits we don't care about
1187 */
1188 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1189 | SCA_DSR_BOF | SCA_DSR_EOT);
1190 if (dsr == 0)
1191 break;
1192
1193 /*
1194 * End of frame
1195 */
1196 if (dsr & SCA_DSR_EOM) {
1197 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1198
1199 sca_get_packets(scp);
1200 }
1201
1202 /*
1203 * check for counter overflow
1204 */
1205 if (dsr & SCA_DSR_COF) {
1206 printf("%s: RXDMA counter overflow\n",
1207 scp->sp_if.if_xname);
1208
1209 sca_dmac_rxinit(scp);
1210 }
1211
1212 /*
1213 * check for end of transfer, which means we
1214 * ran out of descriptors to receive into.
1215 * This means the line is much faster than
1216 * we can handle.
1217 */
1218 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1219 printf("%s: RXDMA buffer overflow\n",
1220 scp->sp_if.if_xname);
1221
1222 sca_dmac_rxinit(scp);
1223 }
1224 }
1225 }
1226
1227 return ret;
1228 }
1229
1230 static int
1231 sca_msci_intr(struct sca_softc *sc, u_int8_t isr)
1232 {
1233 printf("Got msci interrupt XXX\n");
1234
1235 return 0;
1236 }
1237
1238 static void
1239 sca_get_packets(sca_port_t *scp)
1240 {
1241 int descidx;
1242 sca_desc_t *desc;
1243 u_int8_t *buf;
1244
1245 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1246 0, scp->sca->sc_allocsize,
1247 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1248
1249 /*
1250 * Loop while there are packets to receive. After each is processed,
1251 * call sca_frame_skip() to update the DMA registers to the new
1252 * state.
1253 */
1254 while (sca_frame_avail(scp, &descidx)) {
1255 desc = &scp->rxdesc[descidx];
1256 buf = scp->rxbuf + SCA_BSIZE * descidx;
1257
1258 sca_frame_process(scp, desc, buf);
1259 #if SCA_DEBUG_LEVEL > 0
1260 if (sca_debug & SCA_DEBUG_RXPKT)
1261 sca_frame_print(scp, desc, buf);
1262 #endif
1263 sca_frame_skip(scp, descidx);
1264 }
1265
1266 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1267 0, scp->sca->sc_allocsize,
1268 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1269 }
1270
1271 /*
1272 * Starting with the first descriptor we wanted to read into, up to but
1273 * not including the current SCA read descriptor, look for a packet.
1274 */
1275 static int
1276 sca_frame_avail(sca_port_t *scp, int *descindx)
1277 {
1278 u_int16_t cda;
1279 int cdaidx;
1280 u_int32_t desc_p; /* physical address (lower 16 bits) */
1281 sca_desc_t *desc;
1282 u_int8_t rxstat;
1283
1284 /*
1285 * Read the current descriptor from the SCA.
1286 */
1287 cda = dmac_read_2(scp, SCA_CDAL0);
1288
1289 /*
1290 * calculate the index of the current descriptor
1291 */
1292 desc_p = cda - (u_int16_t)(scp->rxdesc_p & 0x0000ffff);
1293 cdaidx = desc_p / sizeof(sca_desc_t);
1294
1295 if (cdaidx >= SCA_NrxBUFS)
1296 return 0;
1297
1298 for (;;) {
1299 /*
1300 * if the SCA is reading into the first descriptor, we somehow
1301 * got this interrupt incorrectly. Just return that there are
1302 * no packets ready.
1303 */
1304 if (cdaidx == scp->rxstart)
1305 return 0;
1306
1307 /*
1308 * We might have a valid descriptor. Set up a pointer
1309 * to the kva address for it so we can more easily examine
1310 * the contents.
1311 */
1312 desc = &scp->rxdesc[scp->rxstart];
1313
1314 rxstat = desc->stat;
1315
1316 /*
1317 * check for errors
1318 */
1319 if (rxstat & SCA_DESC_ERRORS)
1320 goto nextpkt;
1321
1322 /*
1323 * full packet? Good.
1324 */
1325 if (rxstat & SCA_DESC_EOM) {
1326 *descindx = scp->rxstart;
1327 return 1;
1328 }
1329
1330 /*
1331 * increment the rxstart address, since this frame is
1332 * somehow damaged. Skip over it in later calls.
1333 * XXX This breaks multidescriptor receives, so each
1334 * frame HAS to fit within one descriptor's buffer
1335 * space now...
1336 */
1337 nextpkt:
1338 scp->rxstart++;
1339 if (scp->rxstart == SCA_NrxBUFS)
1340 scp->rxstart = 0;
1341 }
1342
1343 return 0;
1344 }
1345
1346 /*
1347 * Pass the packet up to the kernel if it is a packet we want to pay
1348 * attention to.
1349 *
1350 * MUST BE CALLED AT splnet()
1351 */
1352 static void
1353 sca_frame_process(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1354 {
1355 hdlc_header_t *hdlc;
1356 cisco_pkt_t *cisco, *ncisco;
1357 u_int16_t len;
1358 struct mbuf *m;
1359 u_int8_t *nbuf;
1360 u_int32_t t = (time.tv_sec - boottime.tv_sec) * 1000;
1361 struct ifqueue *ifq;
1362
1363 len = desc->len;
1364
1365 /*
1366 * skip packets that are too short
1367 */
1368 if (len < sizeof(hdlc_header_t))
1369 return;
1370
1371 #if NBPFILTER > 0
1372 if (scp->sp_bpf)
1373 bpf_tap(scp->sp_bpf, p, len);
1374 #endif
1375
1376 /*
1377 * read and then strip off the HDLC information
1378 */
1379 hdlc = (hdlc_header_t *)p;
1380
1381 scp->sp_if.if_ipackets++;
1382 scp->sp_if.if_lastchange = time;
1383
1384 switch (ntohs(hdlc->protocol)) {
1385 case HDLC_PROTOCOL_IP:
1386 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1387
1388 m = sca_mbuf_alloc(p, len);
1389 if (m == NULL) {
1390 scp->sp_if.if_iqdrops++;
1391 return;
1392 }
1393 m->m_pkthdr.rcvif = &scp->sp_if;
1394
1395 if (IF_QFULL(&ipintrq)) {
1396 IF_DROP(&ipintrq);
1397 scp->sp_if.if_ierrors++;
1398 scp->sp_if.if_iqdrops++;
1399 m_freem(m);
1400 } else {
1401 /*
1402 * strip off the HDLC header and hand off to IP stack
1403 */
1404 m->m_pkthdr.len -= HDLC_HDRLEN;
1405 m->m_data += HDLC_HDRLEN;
1406 m->m_len -= HDLC_HDRLEN;
1407 IF_ENQUEUE(&ipintrq, m);
1408 schednetisr(NETISR_IP);
1409 }
1410
1411 break;
1412
1413 case CISCO_KEEPALIVE:
1414 SCA_DPRINTF(SCA_DEBUG_CISCO,
1415 ("Received CISCO keepalive packet\n"));
1416
1417 if (len < CISCO_PKT_LEN) {
1418 SCA_DPRINTF(SCA_DEBUG_CISCO,
1419 ("short CISCO packet %d, wanted %d\n",
1420 len, CISCO_PKT_LEN));
1421 return;
1422 }
1423
1424 /*
1425 * allocate an mbuf and copy the important bits of data
1426 * into it.
1427 */
1428 m = sca_mbuf_alloc(p, HDLC_HDRLEN + CISCO_PKT_LEN);
1429 if (m == NULL)
1430 return;
1431
1432 nbuf = mtod(m, u_int8_t *);
1433 ncisco = (cisco_pkt_t *)(nbuf + HDLC_HDRLEN);
1434 m->m_pkthdr.rcvif = &scp->sp_if;
1435
1436 cisco = (cisco_pkt_t *)(p + HDLC_HDRLEN);
1437
1438 switch (ntohl(cisco->type)) {
1439 case CISCO_ADDR_REQ:
1440 printf("Got CISCO addr_req, ignoring\n");
1441 m_freem(m);
1442 break;
1443
1444 case CISCO_ADDR_REPLY:
1445 printf("Got CISCO addr_reply, ignoring\n");
1446 m_freem(m);
1447 break;
1448
1449 case CISCO_KEEPALIVE_REQ:
1450 SCA_DPRINTF(SCA_DEBUG_CISCO,
1451 ("Received KA, mseq %d,"
1452 " yseq %d, rel 0x%04x, t0"
1453 " %04x, t1 %04x\n",
1454 ntohl(cisco->par1), ntohl(cisco->par2),
1455 ntohs(cisco->rel), ntohs(cisco->time0),
1456 ntohs(cisco->time1)));
1457
1458 scp->cka_lastrx = ntohl(cisco->par1);
1459 scp->cka_lasttx++;
1460
1461 /*
1462 * schedule the transmit right here.
1463 */
1464 ncisco->par2 = cisco->par1;
1465 ncisco->par1 = htonl(scp->cka_lasttx);
1466 ncisco->time0 = htons((u_int16_t)(t >> 16));
1467 ncisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1468
1469 ifq = &scp->linkq;
1470 if (IF_QFULL(ifq)) {
1471 IF_DROP(ifq);
1472 m_freem(m);
1473 return;
1474 }
1475 IF_ENQUEUE(ifq, m);
1476
1477 sca_start(&scp->sp_if);
1478
1479 break;
1480
1481 default:
1482 m_freem(m);
1483 SCA_DPRINTF(SCA_DEBUG_CISCO,
1484 ("Unknown CISCO keepalive protocol 0x%04x\n",
1485 ntohl(cisco->type)));
1486 return;
1487 }
1488
1489 break;
1490
1491 default:
1492 SCA_DPRINTF(SCA_DEBUG_RX,
1493 ("Unknown/unexpected ethertype 0x%04x\n",
1494 ntohs(hdlc->protocol)));
1495 }
1496 }
1497
1498 #if SCA_DEBUG_LEVEL > 0
1499 /*
1500 * do a hex dump of the packet received into descriptor "desc" with
1501 * data buffer "p"
1502 */
1503 static void
1504 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1505 {
1506 int i;
1507 int nothing_yet = 1;
1508
1509 printf("descriptor va %p: cp 0x%x bpb 0x%0x bp 0x%0x stat 0x%0x len %d\n",
1510 desc, desc->cp, desc->bpb, desc->bp, desc->stat, desc->len);
1511
1512 for (i = 0 ; i < desc->len ; i++) {
1513 if (nothing_yet == 1 && *p == 0) {
1514 p++;
1515 continue;
1516 }
1517 nothing_yet = 0;
1518 if (i % 16 == 0)
1519 printf("\n");
1520 printf("%02x ", *p++);
1521 }
1522
1523 if (i % 16 != 1)
1524 printf("\n");
1525 }
1526 #endif
1527
1528 /*
1529 * skip all frames before the descriptor index "indx" -- we do this by
1530 * moving the rxstart pointer to the index following this one, and
1531 * setting the end descriptor to this index.
1532 */
1533 static void
1534 sca_frame_skip(sca_port_t *scp, int indx)
1535 {
1536 u_int32_t desc_p;
1537
1538 scp->rxstart++;
1539 if (scp->rxstart == SCA_NrxBUFS)
1540 scp->rxstart = 0;
1541
1542 desc_p = scp->rxdesc_p * sizeof(sca_desc_t) * indx;
1543 dmac_write_2(scp, SCA_EDAL0,
1544 (u_int16_t)(desc_p & 0x0000ffff));
1545 }
1546
1547 /*
1548 * set a port to the "up" state
1549 */
1550 static void
1551 sca_port_up(sca_port_t *scp)
1552 {
1553 struct sca_softc *sc = scp->sca;
1554
1555 /*
1556 * reset things
1557 */
1558 #if 0
1559 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1560 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1561 #endif
1562 /*
1563 * clear in-use flag
1564 */
1565 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1566
1567 /*
1568 * raise DTR
1569 */
1570 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 1);
1571
1572 /*
1573 * raise RTS
1574 */
1575 msci_write_1(scp, SCA_CTL0,
1576 msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS);
1577
1578 /*
1579 * enable interrupts
1580 */
1581 if (scp->sp_port == 0) {
1582 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1583 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1584 } else {
1585 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1586 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1587 }
1588
1589 /*
1590 * enable transmit and receive
1591 */
1592 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1593 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1594
1595 /*
1596 * reset internal state
1597 */
1598 scp->txinuse = 0;
1599 scp->txcur = 0;
1600 scp->cka_lasttx = time.tv_usec;
1601 scp->cka_lastrx = 0;
1602 }
1603
1604 /*
1605 * set a port to the "down" state
1606 */
1607 static void
1608 sca_port_down(sca_port_t *scp)
1609 {
1610 struct sca_softc *sc = scp->sca;
1611
1612 /*
1613 * lower DTR
1614 */
1615 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 0);
1616
1617 /*
1618 * lower RTS
1619 */
1620 msci_write_1(scp, SCA_CTL0,
1621 msci_read_1(scp, SCA_CTL0) | SCA_CTL_RTS);
1622
1623 /*
1624 * disable interrupts
1625 */
1626 if (scp->sp_port == 0) {
1627 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1628 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1629 } else {
1630 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1631 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1632 }
1633
1634 /*
1635 * disable transmit and receive
1636 */
1637 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1638 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1639
1640 /*
1641 * no, we're not in use anymore
1642 */
1643 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1644 }
1645
1646 /*
1647 * disable all DMA and interrupts for all ports at once.
1648 */
1649 void
1650 sca_shutdown(struct sca_softc *sca)
1651 {
1652 /*
1653 * disable DMA and interrupts
1654 */
1655 sca_write_1(sca, SCA_DMER, 0);
1656 sca_write_1(sca, SCA_IER0, 0);
1657 sca_write_1(sca, SCA_IER1, 0);
1658 }
1659
1660 /*
1661 * If there are packets to transmit, start the transmit DMA logic.
1662 */
1663 static void
1664 sca_port_starttx(sca_port_t *scp)
1665 {
1666 struct sca_softc *sc;
1667 u_int32_t startdesc_p, enddesc_p;
1668 int enddesc;
1669
1670 sc = scp->sca;
1671
1672 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1673 || scp->txinuse == 0)
1674 return;
1675 scp->sp_if.if_flags |= IFF_OACTIVE;
1676
1677 /*
1678 * We have something to do, since we have at least one packet
1679 * waiting, and we are not already marked as active.
1680 */
1681 enddesc = scp->txcur;
1682 enddesc++;
1683 if (enddesc == SCA_NtxBUFS)
1684 enddesc = 0;
1685
1686 startdesc_p = scp->txdesc_p;
1687 enddesc_p = scp->txdesc_p + sizeof(sca_desc_t) * enddesc;
1688
1689 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1690 dmac_write_2(scp, SCA_CDAL1,
1691 (u_int16_t)(startdesc_p & 0x0000ffff));
1692
1693 /*
1694 * enable the DMA
1695 */
1696 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1697 }
1698
1699 /*
1700 * allocate an mbuf at least long enough to hold "len" bytes.
1701 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1702 * otherwise let the caller handle copying the data in.
1703 */
1704 static struct mbuf *
1705 sca_mbuf_alloc(caddr_t p, u_int len)
1706 {
1707 struct mbuf *m;
1708
1709 /*
1710 * allocate an mbuf and copy the important bits of data
1711 * into it. If the packet won't fit in the header,
1712 * allocate a cluster for it and store it there.
1713 */
1714 MGETHDR(m, M_DONTWAIT, MT_DATA);
1715 if (m == NULL)
1716 return NULL;
1717 if (len > MHLEN) {
1718 if (len > MCLBYTES) {
1719 m_freem(m);
1720 return NULL;
1721 }
1722 MCLGET(m, M_DONTWAIT);
1723 if ((m->m_flags & M_EXT) == 0) {
1724 m_freem(m);
1725 return NULL;
1726 }
1727 }
1728 if (p != NULL)
1729 bcopy(p, mtod(m, caddr_t), len);
1730 m->m_len = len;
1731 m->m_pkthdr.len = len;
1732
1733 return (m);
1734 }
1735