hd64570.c revision 1.7 1 /* $NetBSD: hd64570.c,v 1.7 1999/10/23 22:20:11 erh Exp $ */
2
3 /*
4 * Copyright (c) 1998 Vixie Enterprises
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Vixie Enterprises nor the names
17 * of its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
21 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
22 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * This software has been written for Vixie Enterprises by Michael Graff
35 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
36 * ``http://www.vix.com''.
37 */
38
39 /*
40 * hd64570:
41 * From the hitachi docs:
42 * The HD64570 serial communications adaptor (SCA) peripheral chip enables
43 * a host microprocessor to perform asynchronous, byte-synchronous, or
44 * bit-synchronous serial communication. Its two full-duplex,
45 * multiprotocol serial channels support a wide variety of protocols,
46 * including frame relay, LAPB, LAPD, bisync and DDCMP. Its build-in
47 * direct memory access controller (DMAC) is equipped with a 32-stage
48 * FIFO and can execure chained-block transfers. Due to its DMAC and
49 * 16-bit bus interface, the SCA supports serial data transfer rates up
50 * to 12 Mbits/s without monopolizing the bus, even in full-duplex
51 * communication. Other on-chip features of the SCA, including four
52 * types of MPU interfaces, a bus arbiter, timers, and an interrupt
53 * controller, provide added functionality in a wide range of
54 * applications, such as frame relay exchanges/system multiplexes, private
55 * branch exchanges, computer networks, workstations, ISDN terminals,
56 * and facsimile.
57 *
58 * For more info: http://semiconductor.hitachi.com
59 * ----
60 *
61 * This driver not only talks to the HD64570 chip, but also implements
62 * a version of the HDLC protocol that includes the CISCO keepalive
63 * protocol. It publishes itself as a network interface that can
64 * handle IP traffic only.
65 */
66
67 /*
68 * TODO:
69 *
70 * o teach the receive logic about errors, and about long frames that
71 * span more than one input buffer. (Right now, receive/transmit is
72 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
73 * This is currently 1504, which is large enough to hold the HDLC
74 * header and the packet itself. Packets which are too long are
75 * silently dropped on transmit and silently dropped on receive.
76 * o write code to handle the msci interrupts, needed only for CD
77 * and CTS changes.
78 * o consider switching back to a "queue tx with DMA active" model which
79 * should help sustain outgoing traffic
80 * o through clever use of bus_dma*() functions, it should be possible
81 * to map the mbuf's data area directly into a descriptor transmit
82 * buffer, removing the need to allocate extra memory. If, however,
83 * we run out of descriptors for this, we will need to then allocate
84 * one large mbuf, copy the fragmented chain into it, and put it onto
85 * a single descriptor.
86 * o use bus_dmamap_sync() with the right offset and lengths, rather
87 * than cheating and always sync'ing the whole region.
88 */
89
90 #include "bpfilter.h"
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/device.h>
95 #include <sys/mbuf.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/kernel.h>
99
100 #include <net/if.h>
101 #include <net/if_types.h>
102 #include <net/netisr.h>
103
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108
109 #if NBPFILTER > 0
110 #include <net/bpf.h>
111 #endif
112
113 #include <machine/cpu.h>
114 #include <machine/bus.h>
115 #include <machine/intr.h>
116
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcidevs.h>
120
121 #include <dev/ic/hd64570reg.h>
122 #include <dev/ic/hd64570var.h>
123
124 #define SCA_DEBUG_RX 0x0001
125 #define SCA_DEBUG_TX 0x0002
126 #define SCA_DEBUG_CISCO 0x0004
127 #define SCA_DEBUG_DMA 0x0008
128 #define SCA_DEBUG_RXPKT 0x0010
129 #define SCA_DEBUG_TXPKT 0x0020
130 #define SCA_DEBUG_INTR 0x0040
131
132 #if 0
133 #define SCA_DEBUG_LEVEL ( SCA_DEBUG_TX )
134 #else
135 #define SCA_DEBUG_LEVEL 0
136 #endif
137
138 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
139
140 #if SCA_DEBUG_LEVEL > 0
141 #define SCA_DPRINTF(l, x) do { \
142 if ((l) & sca_debug) \
143 printf x;\
144 } while (0)
145 #else
146 #define SCA_DPRINTF(l, x)
147 #endif
148
149 #define SCA_MTU 1500 /* hard coded */
150
151 /*
152 * buffers per tx and rx channels, per port, and the size of each.
153 * Don't use these constants directly, as they are really only hints.
154 * Use the calculated values stored in struct sca_softc instead.
155 *
156 * Each must be at least 2, receive would be better at around 20 or so.
157 *
158 * XXX Due to a damned near impossible to track down bug, transmit buffers
159 * MUST be 2, no more, no less.
160 */
161 #ifndef SCA_NtxBUFS
162 #define SCA_NtxBUFS 2
163 #endif
164 #ifndef SCA_NrxBUFS
165 #define SCA_NrxBUFS 20
166 #endif
167 #ifndef SCA_BSIZE
168 #define SCA_BSIZE (SCA_MTU + 4) /* room for HDLC as well */
169 #endif
170
171 #if 0
172 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
173 #endif
174
175 static inline void sca_write_1(struct sca_softc *, u_int, u_int8_t);
176 static inline void sca_write_2(struct sca_softc *, u_int, u_int16_t);
177 static inline u_int8_t sca_read_1(struct sca_softc *, u_int);
178 static inline u_int16_t sca_read_2(struct sca_softc *, u_int);
179
180 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
181 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
182
183 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
184 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
185 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
186 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
187
188 static int sca_alloc_dma(struct sca_softc *);
189 static void sca_setup_dma_memory(struct sca_softc *);
190 static void sca_msci_init(struct sca_softc *, sca_port_t *);
191 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
192 static void sca_dmac_rxinit(sca_port_t *);
193
194 static int sca_dmac_intr(sca_port_t *, u_int8_t);
195 static int sca_msci_intr(struct sca_softc *, u_int8_t);
196
197 static void sca_get_packets(sca_port_t *);
198 static void sca_frame_process(sca_port_t *, sca_desc_t *, u_int8_t *);
199 static int sca_frame_avail(sca_port_t *, int *);
200 static void sca_frame_skip(sca_port_t *, int);
201
202 static void sca_port_starttx(sca_port_t *);
203
204 static void sca_port_up(sca_port_t *);
205 static void sca_port_down(sca_port_t *);
206
207 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
208 struct rtentry *));
209 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
210 static void sca_start __P((struct ifnet *));
211 static void sca_watchdog __P((struct ifnet *));
212
213 static struct mbuf *sca_mbuf_alloc(caddr_t, u_int);
214
215 #if SCA_DEBUG_LEVEL > 0
216 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
217 #endif
218
219 static inline void
220 sca_write_1(struct sca_softc *sc, u_int reg, u_int8_t val)
221 {
222 bus_space_write_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
223 }
224
225 static inline void
226 sca_write_2(struct sca_softc *sc, u_int reg, u_int16_t val)
227 {
228 bus_space_write_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg), val);
229 }
230
231 static inline u_int8_t
232 sca_read_1(struct sca_softc *sc, u_int reg)
233 {
234 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
235 }
236
237 static inline u_int16_t
238 sca_read_2(struct sca_softc *sc, u_int reg)
239 {
240 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, SCADDR(reg));
241 }
242
243 static inline void
244 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
245 {
246 sca_write_1(scp->sca, scp->msci_off + reg, val);
247 }
248
249 static inline u_int8_t
250 msci_read_1(sca_port_t *scp, u_int reg)
251 {
252 return sca_read_1(scp->sca, scp->msci_off + reg);
253 }
254
255 static inline void
256 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
257 {
258 sca_write_1(scp->sca, scp->dmac_off + reg, val);
259 }
260
261 static inline void
262 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
263 {
264 sca_write_2(scp->sca, scp->dmac_off + reg, val);
265 }
266
267 static inline u_int8_t
268 dmac_read_1(sca_port_t *scp, u_int reg)
269 {
270 return sca_read_1(scp->sca, scp->dmac_off + reg);
271 }
272
273 static inline u_int16_t
274 dmac_read_2(sca_port_t *scp, u_int reg)
275 {
276 return sca_read_2(scp->sca, scp->dmac_off + reg);
277 }
278
279 int
280 sca_init(struct sca_softc *sc, u_int nports)
281 {
282 /*
283 * Do a little sanity check: check number of ports.
284 */
285 if (nports < 1 || nports > 2)
286 return 1;
287
288 /*
289 * remember the details
290 */
291 sc->sc_numports = nports;
292
293 /*
294 * allocate the memory and chop it into bits.
295 */
296 if (sca_alloc_dma(sc) != 0)
297 return 1;
298 sca_setup_dma_memory(sc);
299
300 /*
301 * disable DMA and MSCI interrupts
302 */
303 sca_write_1(sc, SCA_DMER, 0);
304 sca_write_1(sc, SCA_IER0, 0);
305 sca_write_1(sc, SCA_IER1, 0);
306 sca_write_1(sc, SCA_IER2, 0);
307
308 /*
309 * configure interrupt system
310 */
311 sca_write_1(sc, SCA_ITCR, 0); /* use ivr, no int ack */
312 sca_write_1(sc, SCA_IVR, 0x40);
313 sca_write_1(sc, SCA_IMVR, 0x40);
314
315 /*
316 * set wait control register to zero wait states
317 */
318 sca_write_1(sc, SCA_PABR0, 0);
319 sca_write_1(sc, SCA_PABR1, 0);
320 sca_write_1(sc, SCA_WCRL, 0);
321 sca_write_1(sc, SCA_WCRM, 0);
322 sca_write_1(sc, SCA_WCRH, 0);
323
324 /*
325 * disable DMA and reset status
326 */
327 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
328
329 /*
330 * disable transmit DMA for all channels
331 */
332 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
333 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
334 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
335 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
336 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
337 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
338 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
339 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
340
341 /*
342 * enable DMA based on channel enable flags for each channel
343 */
344 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
345
346 /*
347 * Should check to see if the chip is responding, but for now
348 * assume it is.
349 */
350 return 0;
351 }
352
353 /*
354 * initialize the port and attach it to the networking layer
355 */
356 void
357 sca_port_attach(struct sca_softc *sc, u_int port)
358 {
359 sca_port_t *scp = &sc->sc_ports[port];
360 struct ifnet *ifp;
361 static u_int ntwo_unit = 0;
362
363 scp->sca = sc; /* point back to the parent */
364
365 scp->sp_port = port;
366
367 if (port == 0) {
368 scp->msci_off = SCA_MSCI_OFF_0;
369 scp->dmac_off = SCA_DMAC_OFF_0;
370 if(sc->parent != NULL)
371 ntwo_unit=sc->parent->dv_unit * 2 + 0;
372 else
373 ntwo_unit = 0; /* XXX */
374 } else {
375 scp->msci_off = SCA_MSCI_OFF_1;
376 scp->dmac_off = SCA_DMAC_OFF_1;
377 if(sc->parent != NULL)
378 ntwo_unit=sc->parent->dv_unit * 2 + 1;
379 else
380 ntwo_unit = 1; /* XXX */
381 }
382
383 sca_msci_init(sc, scp);
384 sca_dmac_init(sc, scp);
385
386 /*
387 * attach to the network layer
388 */
389 ifp = &scp->sp_if;
390 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
391 ifp->if_softc = scp;
392 ifp->if_mtu = SCA_MTU;
393 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
394 ifp->if_type = IFT_OTHER; /* Should be HDLC, but... */
395 ifp->if_hdrlen = HDLC_HDRLEN;
396 ifp->if_ioctl = sca_ioctl;
397 ifp->if_output = sca_output;
398 ifp->if_watchdog = sca_watchdog;
399 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
400 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
401 #ifdef SCA_USE_FASTQ
402 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
403 #endif
404 if_attach(ifp);
405
406 #if NBPFILTER > 0
407 bpfattach(&scp->sp_bpf, ifp, DLT_HDLC, HDLC_HDRLEN);
408 #endif
409
410 if (sc->parent == NULL)
411 printf("%s: port %d\n", ifp->if_xname, port);
412 else
413 printf("%s at %s port %d\n",
414 ifp->if_xname, sc->parent->dv_xname, port);
415
416 /*
417 * reset the last seen times on the cisco keepalive protocol
418 */
419 scp->cka_lasttx = time.tv_usec;
420 scp->cka_lastrx = 0;
421 }
422
423 /*
424 * initialize the port's MSCI
425 */
426 static void
427 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
428 {
429 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
430 msci_write_1(scp, SCA_MD00,
431 ( SCA_MD0_CRC_1
432 | SCA_MD0_CRC_CCITT
433 | SCA_MD0_CRC_ENABLE
434 | SCA_MD0_MODE_HDLC));
435 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
436 msci_write_1(scp, SCA_MD20,
437 (SCA_MD2_DUPLEX | SCA_MD2_NRZ));
438
439 /*
440 * reset the port (and lower RTS)
441 */
442 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
443 msci_write_1(scp, SCA_CTL0,
444 (SCA_CTL_IDLPAT | SCA_CTL_UDRNC | SCA_CTL_RTS));
445 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
446
447 /*
448 * select the RX clock as the TX clock, and set for external
449 * clock source.
450 */
451 msci_write_1(scp, SCA_RXS0, 0);
452 msci_write_1(scp, SCA_TXS0, 0);
453
454 /*
455 * XXX don't pay attention to CTS or CD changes right now. I can't
456 * simulate one, and the transmitter will try to transmit even if
457 * CD isn't there anyway, so nothing bad SHOULD happen.
458 */
459 msci_write_1(scp, SCA_IE00, 0);
460 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
461 msci_write_1(scp, SCA_IE20, 0);
462 msci_write_1(scp, SCA_FIE0, 0);
463
464 msci_write_1(scp, SCA_SA00, 0);
465 msci_write_1(scp, SCA_SA10, 0);
466
467 msci_write_1(scp, SCA_IDL0, 0x7e);
468
469 msci_write_1(scp, SCA_RRC0, 0x0e);
470 msci_write_1(scp, SCA_TRC00, 0x10);
471 msci_write_1(scp, SCA_TRC10, 0x1f);
472 }
473
474 /*
475 * Take the memory for the port and construct two circular linked lists of
476 * descriptors (one tx, one rx) and set the pointers in these descriptors
477 * to point to the buffer space for this port.
478 */
479 static void
480 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
481 {
482 sca_desc_t *desc;
483 u_int32_t desc_p;
484 u_int32_t buf_p;
485 int i;
486
487 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
488 0, sc->sc_allocsize, BUS_DMASYNC_PREWRITE);
489
490 desc = scp->txdesc;
491 desc_p = scp->txdesc_p;
492 buf_p = scp->txbuf_p;
493 scp->txcur = 0;
494 scp->txinuse = 0;
495
496 for (i = 0 ; i < SCA_NtxBUFS ; i++) {
497 /*
498 * desc_p points to the physcial address of the NEXT desc
499 */
500 desc_p += sizeof(sca_desc_t);
501
502 desc->cp = desc_p & 0x0000ffff;
503 desc->bp = buf_p & 0x0000ffff;
504 desc->bpb = (buf_p & 0x00ff0000) >> 16;
505 desc->len = SCA_BSIZE;
506 desc->stat = 0;
507
508 desc++; /* point to the next descriptor */
509 buf_p += SCA_BSIZE;
510 }
511
512 /*
513 * "heal" the circular list by making the last entry point to the
514 * first.
515 */
516 desc--;
517 desc->cp = scp->txdesc_p & 0x0000ffff;
518
519 /*
520 * Now, initialize the transmit DMA logic
521 *
522 * CPB == chain pointer base address
523 */
524 dmac_write_1(scp, SCA_DSR1, 0);
525 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
526 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
527 dmac_write_1(scp, SCA_DIR1,
528 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
529 dmac_write_1(scp, SCA_CPB1,
530 (u_int8_t)((scp->txdesc_p & 0x00ff0000) >> 16));
531
532 /*
533 * now, do the same thing for receive descriptors
534 */
535 desc = scp->rxdesc;
536 desc_p = scp->rxdesc_p;
537 buf_p = scp->rxbuf_p;
538 scp->rxstart = 0;
539 scp->rxend = SCA_NrxBUFS - 1;
540
541 for (i = 0 ; i < SCA_NrxBUFS ; i++) {
542 /*
543 * desc_p points to the physcial address of the NEXT desc
544 */
545 desc_p += sizeof(sca_desc_t);
546
547 desc->cp = desc_p & 0x0000ffff;
548 desc->bp = buf_p & 0x0000ffff;
549 desc->bpb = (buf_p & 0x00ff0000) >> 16;
550 desc->len = SCA_BSIZE;
551 desc->stat = 0x00;
552
553 desc++; /* point to the next descriptor */
554 buf_p += SCA_BSIZE;
555 }
556
557 /*
558 * "heal" the circular list by making the last entry point to the
559 * first.
560 */
561 desc--;
562 desc->cp = scp->rxdesc_p & 0x0000ffff;
563
564 sca_dmac_rxinit(scp);
565
566 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
567 0, sc->sc_allocsize, BUS_DMASYNC_POSTWRITE);
568 }
569
570 /*
571 * reset and reinitialize the receive DMA logic
572 */
573 static void
574 sca_dmac_rxinit(sca_port_t *scp)
575 {
576 /*
577 * ... and the receive DMA logic ...
578 */
579 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
580 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
581
582 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
583 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
584
585 /*
586 * CPB == chain pointer base
587 * CDA == current descriptor address
588 * EDA == error descriptor address (overwrite position)
589 */
590 dmac_write_1(scp, SCA_CPB0,
591 (u_int8_t)((scp->rxdesc_p & 0x00ff0000) >> 16));
592 dmac_write_2(scp, SCA_CDAL0,
593 (u_int16_t)(scp->rxdesc_p & 0xffff));
594 dmac_write_2(scp, SCA_EDAL0,
595 (u_int16_t)(scp->rxdesc_p
596 + sizeof(sca_desc_t) * SCA_NrxBUFS));
597
598 /*
599 * enable receiver DMA
600 */
601 dmac_write_1(scp, SCA_DIR0,
602 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
603 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
604 }
605
606 static int
607 sca_alloc_dma(struct sca_softc *sc)
608 {
609 u_int allocsize;
610 int err;
611 int rsegs;
612 u_int bpp;
613
614 SCA_DPRINTF(SCA_DEBUG_DMA,
615 ("sizeof sca_desc_t: %d bytes\n", sizeof (sca_desc_t)));
616
617 bpp = sc->sc_numports * (SCA_NtxBUFS + SCA_NrxBUFS);
618
619 allocsize = bpp * (SCA_BSIZE + sizeof (sca_desc_t));
620
621 /*
622 * sanity checks:
623 *
624 * Check the total size of the data buffers, and so on. The total
625 * DMAable space needs to fit within a single 16M region, and the
626 * descriptors need to fit within a 64K region.
627 */
628 if (allocsize > 16 * 1024 * 1024)
629 return 1;
630 if (bpp * sizeof (sca_desc_t) > 64 * 1024)
631 return 1;
632
633 sc->sc_allocsize = allocsize;
634
635 /*
636 * Allocate one huge chunk of memory.
637 */
638 if (bus_dmamem_alloc(sc->sc_dmat,
639 allocsize,
640 SCA_DMA_ALIGNMENT,
641 SCA_DMA_BOUNDRY,
642 &sc->sc_seg, 1, &rsegs, BUS_DMA_NOWAIT) != 0) {
643 printf("Could not allocate DMA memory\n");
644 return 1;
645 }
646 SCA_DPRINTF(SCA_DEBUG_DMA,
647 ("DMA memory allocated: %d bytes\n", allocsize));
648
649 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, 1, allocsize,
650 &sc->sc_dma_addr, BUS_DMA_NOWAIT) != 0) {
651 printf("Could not map DMA memory into kernel space\n");
652 return 1;
653 }
654 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA memory mapped\n"));
655
656 if (bus_dmamap_create(sc->sc_dmat, allocsize, 2,
657 allocsize, SCA_DMA_BOUNDRY,
658 BUS_DMA_NOWAIT, &sc->sc_dmam) != 0) {
659 printf("Could not create DMA map\n");
660 return 1;
661 }
662 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map created\n"));
663
664 err = bus_dmamap_load(sc->sc_dmat, sc->sc_dmam, sc->sc_dma_addr,
665 allocsize, NULL, BUS_DMA_NOWAIT);
666 if (err != 0) {
667 printf("Could not load DMA segment: %d\n", err);
668 return 1;
669 }
670 SCA_DPRINTF(SCA_DEBUG_DMA, ("DMA map loaded\n"));
671
672 return 0;
673 }
674
675 /*
676 * Take the memory allocated with sca_alloc_dma() and divide it among the
677 * two ports.
678 */
679 static void
680 sca_setup_dma_memory(struct sca_softc *sc)
681 {
682 sca_port_t *scp0, *scp1;
683 u_int8_t *vaddr0;
684 u_int32_t paddr0;
685 u_long addroff;
686
687 /*
688 * remember the physical address to 24 bits only, since the upper
689 * 8 bits is programed into the device at a different layer.
690 */
691 paddr0 = (sc->sc_dmam->dm_segs[0].ds_addr & 0x00ffffff);
692 vaddr0 = sc->sc_dma_addr;
693
694 /*
695 * if we have only one port it gets the full range. If we have
696 * two we need to do a little magic to divide things up.
697 *
698 * The descriptors will all end up in the front of the area, while
699 * the remainder of the buffer is used for transmit and receive
700 * data.
701 *
702 * -------------------- start of memory
703 * tx desc port 0
704 * rx desc port 0
705 * tx desc port 1
706 * rx desc port 1
707 * tx buffer port 0
708 * rx buffer port 0
709 * tx buffer port 1
710 * rx buffer port 1
711 * -------------------- end of memory
712 */
713 scp0 = &sc->sc_ports[0];
714 scp1 = &sc->sc_ports[1];
715
716 scp0->txdesc_p = paddr0;
717 scp0->txdesc = (sca_desc_t *)vaddr0;
718 addroff = sizeof(sca_desc_t) * SCA_NtxBUFS;
719
720 /*
721 * point to the range following the tx descriptors, and
722 * set the rx descriptors there.
723 */
724 scp0->rxdesc_p = paddr0 + addroff;
725 scp0->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
726 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
727
728 if (sc->sc_numports == 2) {
729 scp1->txdesc_p = paddr0 + addroff;
730 scp1->txdesc = (sca_desc_t *)(vaddr0 + addroff);
731 addroff += sizeof(sca_desc_t) * SCA_NtxBUFS;
732
733 scp1->rxdesc_p = paddr0 + addroff;
734 scp1->rxdesc = (sca_desc_t *)(vaddr0 + addroff);
735 addroff += sizeof(sca_desc_t) * SCA_NrxBUFS;
736 }
737
738 /*
739 * point to the memory following the descriptors, and set the
740 * transmit buffer there.
741 */
742 scp0->txbuf_p = paddr0 + addroff;
743 scp0->txbuf = vaddr0 + addroff;
744 addroff += SCA_BSIZE * SCA_NtxBUFS;
745
746 /*
747 * lastly, skip over the transmit buffer and set up pointers into
748 * the receive buffer.
749 */
750 scp0->rxbuf_p = paddr0 + addroff;
751 scp0->rxbuf = vaddr0 + addroff;
752 addroff += SCA_BSIZE * SCA_NrxBUFS;
753
754 if (sc->sc_numports == 2) {
755 scp1->txbuf_p = paddr0 + addroff;
756 scp1->txbuf = vaddr0 + addroff;
757 addroff += SCA_BSIZE * SCA_NtxBUFS;
758
759 scp1->rxbuf_p = paddr0 + addroff;
760 scp1->rxbuf = vaddr0 + addroff;
761 addroff += SCA_BSIZE * SCA_NrxBUFS;
762 }
763
764 /*
765 * as a consistancy check, addroff should be equal to the allocation
766 * size.
767 */
768 if (sc->sc_allocsize != addroff)
769 printf("ERROR: sc_allocsize != addroff: %lu != %lu\n",
770 sc->sc_allocsize, addroff);
771 }
772
773 /*
774 * Queue the packet for our start routine to transmit
775 */
776 static int
777 sca_output(ifp, m, dst, rt0)
778 struct ifnet *ifp;
779 struct mbuf *m;
780 struct sockaddr *dst;
781 struct rtentry *rt0;
782 {
783 int error;
784 int s;
785 u_int16_t protocol;
786 hdlc_header_t *hdlc;
787 struct ifqueue *ifq;
788 #ifdef SCA_USE_FASTQ
789 struct ip *ip;
790 sca_port_t *scp = ifp->if_softc;
791 int highpri;
792 #endif
793
794 error = 0;
795 ifp->if_lastchange = time;
796
797 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
798 error = ENETDOWN;
799 goto bad;
800 }
801
802 #ifdef SCA_USE_FASTQ
803 highpri = 0;
804 #endif
805
806 /*
807 * determine address family, and priority for this packet
808 */
809 switch (dst->sa_family) {
810 case AF_INET:
811 protocol = HDLC_PROTOCOL_IP;
812
813 #ifdef SCA_USE_FASTQ
814 ip = mtod(m, struct ip *);
815 if ((ip->ip_tos & IPTOS_LOWDELAY) == IPTOS_LOWDELAY)
816 highpri = 1;
817 #endif
818 break;
819
820 default:
821 printf("%s: address family %d unsupported\n",
822 ifp->if_xname, dst->sa_family);
823 error = EAFNOSUPPORT;
824 goto bad;
825 }
826
827 if (M_LEADINGSPACE(m) < HDLC_HDRLEN) {
828 m = m_prepend(m, HDLC_HDRLEN, M_DONTWAIT);
829 if (m == NULL) {
830 error = ENOBUFS;
831 goto bad;
832 }
833 m->m_len = 0;
834 } else {
835 m->m_data -= HDLC_HDRLEN;
836 }
837
838 hdlc = mtod(m, hdlc_header_t *);
839 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
840 hdlc->addr = CISCO_MULTICAST;
841 else
842 hdlc->addr = CISCO_UNICAST;
843 hdlc->control = 0;
844 hdlc->protocol = htons(protocol);
845 m->m_len += HDLC_HDRLEN;
846
847 /*
848 * queue the packet. If interactive, use the fast queue.
849 */
850 s = splnet();
851 #ifdef SCA_USE_FASTQ
852 ifq = (highpri == 1 ? &scp->fastq : &ifp->if_snd);
853 #else
854 ifq = &ifp->if_snd;
855 #endif
856 if (IF_QFULL(ifq)) {
857 IF_DROP(ifq);
858 ifp->if_oerrors++;
859 ifp->if_collisions++;
860 error = ENOBUFS;
861 splx(s);
862 goto bad;
863 }
864 ifp->if_obytes += m->m_pkthdr.len;
865 IF_ENQUEUE(ifq, m);
866
867 ifp->if_lastchange = time;
868
869 if (m->m_flags & M_MCAST)
870 ifp->if_omcasts++;
871
872 sca_start(ifp);
873 splx(s);
874
875 return (error);
876
877 bad:
878 if (m)
879 m_freem(m);
880 return (error);
881 }
882
883 static int
884 sca_ioctl(ifp, cmd, addr)
885 struct ifnet *ifp;
886 u_long cmd;
887 caddr_t addr;
888 {
889 struct ifreq *ifr;
890 struct ifaddr *ifa;
891 int error;
892 int s;
893
894 s = splnet();
895
896 ifr = (struct ifreq *)addr;
897 ifa = (struct ifaddr *)addr;
898 error = 0;
899
900 switch (cmd) {
901 case SIOCSIFADDR:
902 if (ifa->ifa_addr->sa_family == AF_INET)
903 sca_port_up(ifp->if_softc);
904 else
905 error = EAFNOSUPPORT;
906 break;
907
908 case SIOCSIFDSTADDR:
909 if (ifa->ifa_addr->sa_family != AF_INET)
910 error = EAFNOSUPPORT;
911 break;
912
913 case SIOCADDMULTI:
914 case SIOCDELMULTI:
915 if (ifr == 0) {
916 error = EAFNOSUPPORT; /* XXX */
917 break;
918 }
919 switch (ifr->ifr_addr.sa_family) {
920
921 #ifdef INET
922 case AF_INET:
923 break;
924 #endif
925
926 default:
927 error = EAFNOSUPPORT;
928 break;
929 }
930 break;
931
932 case SIOCSIFFLAGS:
933 if (ifr->ifr_flags & IFF_UP)
934 sca_port_up(ifp->if_softc);
935 else
936 sca_port_down(ifp->if_softc);
937
938 break;
939
940 default:
941 error = EINVAL;
942 }
943
944 splx(s);
945 return error;
946 }
947
948 /*
949 * start packet transmission on the interface
950 *
951 * MUST BE CALLED AT splnet()
952 */
953 static void
954 sca_start(ifp)
955 struct ifnet *ifp;
956 {
957 sca_port_t *scp = ifp->if_softc;
958 struct sca_softc *sc = scp->sca;
959 struct mbuf *m, *mb_head;
960 sca_desc_t *desc;
961 u_int8_t *buf;
962 u_int32_t buf_p;
963 int nexttx;
964 int trigger_xmit;
965
966 /*
967 * can't queue when we are full or transmitter is busy
968 */
969 if ((scp->txinuse >= (SCA_NtxBUFS - 1))
970 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
971 return;
972
973 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
974 0, sc->sc_allocsize,
975 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
976
977 trigger_xmit = 0;
978
979 txloop:
980 IF_DEQUEUE(&scp->linkq, mb_head);
981 if (mb_head == NULL)
982 #ifdef SCA_USE_FASTQ
983 IF_DEQUEUE(&scp->fastq, mb_head);
984 if (mb_head == NULL)
985 #endif
986 IF_DEQUEUE(&ifp->if_snd, mb_head);
987 if (mb_head == NULL)
988 goto start_xmit;
989
990 if (scp->txinuse != 0) {
991 /* Kill EOT interrupts on the previous descriptor. */
992 desc = &scp->txdesc[scp->txcur];
993 desc->stat &= ~SCA_DESC_EOT;
994
995 /* Figure out what the next free descriptor is. */
996 if ((scp->txcur + 1) == SCA_NtxBUFS)
997 nexttx = 0;
998 else
999 nexttx = scp->txcur + 1;
1000 } else
1001 nexttx = 0;
1002
1003 desc = &scp->txdesc[nexttx];
1004 buf = scp->txbuf + SCA_BSIZE * nexttx;
1005 buf_p = scp->txbuf_p + SCA_BSIZE * nexttx;
1006
1007 desc->bp = (u_int16_t)(buf_p & 0x0000ffff);
1008 desc->bpb = (u_int8_t)((buf_p & 0x00ff0000) >> 16);
1009 desc->stat = SCA_DESC_EOT | SCA_DESC_EOM; /* end of frame and xfer */
1010 desc->len = 0;
1011
1012 /*
1013 * Run through the chain, copying data into the descriptor as we
1014 * go. If it won't fit in one transmission block, drop the packet.
1015 * No, this isn't nice, but most of the time it _will_ fit.
1016 */
1017 for (m = mb_head ; m != NULL ; m = m->m_next) {
1018 if (m->m_len != 0) {
1019 desc->len += m->m_len;
1020 if (desc->len > SCA_BSIZE) {
1021 m_freem(mb_head);
1022 goto txloop;
1023 }
1024 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
1025 buf += m->m_len;
1026 }
1027 }
1028
1029 ifp->if_opackets++;
1030
1031 #if NBPFILTER > 0
1032 /*
1033 * Pass packet to bpf if there is a listener.
1034 */
1035 if (scp->sp_bpf)
1036 bpf_mtap(scp->sp_bpf, mb_head);
1037 #endif
1038
1039 m_freem(mb_head);
1040
1041 if (scp->txinuse != 0) {
1042 scp->txcur++;
1043 if (scp->txcur == SCA_NtxBUFS)
1044 scp->txcur = 0;
1045 }
1046 scp->txinuse++;
1047 trigger_xmit = 1;
1048
1049 SCA_DPRINTF(SCA_DEBUG_TX,
1050 ("TX: inuse %d index %d\n", scp->txinuse, scp->txcur));
1051
1052 if (scp->txinuse < (SCA_NtxBUFS - 1))
1053 goto txloop;
1054
1055 start_xmit:
1056 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmam,
1057 0, sc->sc_allocsize,
1058 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1059
1060 if (trigger_xmit != 0)
1061 sca_port_starttx(scp);
1062 }
1063
1064 static void
1065 sca_watchdog(ifp)
1066 struct ifnet *ifp;
1067 {
1068 }
1069
1070 int
1071 sca_hardintr(struct sca_softc *sc)
1072 {
1073 u_int8_t isr0, isr1, isr2;
1074 int ret;
1075
1076 ret = 0; /* non-zero means we processed at least one interrupt */
1077
1078 while (1) {
1079 /*
1080 * read SCA interrupts
1081 */
1082 isr0 = sca_read_1(sc, SCA_ISR0);
1083 isr1 = sca_read_1(sc, SCA_ISR1);
1084 isr2 = sca_read_1(sc, SCA_ISR2);
1085
1086 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1087 break;
1088
1089 SCA_DPRINTF(SCA_DEBUG_INTR,
1090 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1091 isr0, isr1, isr2));
1092
1093 /*
1094 * check DMA interrupt
1095 */
1096 if (isr1 & 0x0f)
1097 ret += sca_dmac_intr(&sc->sc_ports[0],
1098 isr1 & 0x0f);
1099 if (isr1 & 0xf0)
1100 ret += sca_dmac_intr(&sc->sc_ports[1],
1101 (isr1 & 0xf0) >> 4);
1102
1103 if (isr0)
1104 ret += sca_msci_intr(sc, isr0);
1105
1106 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1107 if (isr2)
1108 ret += sca_timer_intr(sc, isr2);
1109 #endif
1110 }
1111
1112 return (ret);
1113 }
1114
1115 static int
1116 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1117 {
1118 u_int8_t dsr;
1119 int ret;
1120
1121 ret = 0;
1122
1123 /*
1124 * Check transmit channel
1125 */
1126 if (isr & 0x0c) {
1127 SCA_DPRINTF(SCA_DEBUG_INTR,
1128 ("TX INTERRUPT port %d\n", scp->sp_port));
1129
1130 dsr = 1;
1131 while (dsr != 0) {
1132 ret++;
1133 /*
1134 * reset interrupt
1135 */
1136 dsr = dmac_read_1(scp, SCA_DSR1);
1137 dmac_write_1(scp, SCA_DSR1,
1138 dsr | SCA_DSR_DEWD);
1139
1140 /*
1141 * filter out the bits we don't care about
1142 */
1143 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1144 if (dsr == 0)
1145 break;
1146
1147 /*
1148 * check for counter overflow
1149 */
1150 if (dsr & SCA_DSR_COF) {
1151 printf("%s: TXDMA counter overflow\n",
1152 scp->sp_if.if_xname);
1153
1154 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1155 scp->txcur = 0;
1156 scp->txinuse = 0;
1157 }
1158
1159 /*
1160 * check for buffer overflow
1161 */
1162 if (dsr & SCA_DSR_BOF) {
1163 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1164 scp->sp_if.if_xname,
1165 dmac_read_2(scp, SCA_CDAL1),
1166 dmac_read_2(scp, SCA_EDAL1),
1167 dmac_read_1(scp, SCA_CPB1));
1168
1169 /*
1170 * Yikes. Arrange for a full
1171 * transmitter restart.
1172 */
1173 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1174 scp->txcur = 0;
1175 scp->txinuse = 0;
1176 }
1177
1178 /*
1179 * check for end of transfer, which is not
1180 * an error. It means that all data queued
1181 * was transmitted, and we mark ourself as
1182 * not in use and stop the watchdog timer.
1183 */
1184 if (dsr & SCA_DSR_EOT) {
1185 SCA_DPRINTF(SCA_DEBUG_TX,
1186 ("Transmit completed.\n"));
1187
1188 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1189 scp->txcur = 0;
1190 scp->txinuse = 0;
1191
1192 /*
1193 * check for more packets
1194 */
1195 sca_start(&scp->sp_if);
1196 }
1197 }
1198 }
1199 /*
1200 * receive channel check
1201 */
1202 if (isr & 0x03) {
1203 SCA_DPRINTF(SCA_DEBUG_INTR,
1204 ("RX INTERRUPT port %d\n", mch));
1205
1206 dsr = 1;
1207 while (dsr != 0) {
1208 ret++;
1209
1210 dsr = dmac_read_1(scp, SCA_DSR0);
1211 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1212
1213 /*
1214 * filter out the bits we don't care about
1215 */
1216 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1217 | SCA_DSR_BOF | SCA_DSR_EOT);
1218 if (dsr == 0)
1219 break;
1220
1221 /*
1222 * End of frame
1223 */
1224 if (dsr & SCA_DSR_EOM) {
1225 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1226
1227 sca_get_packets(scp);
1228 }
1229
1230 /*
1231 * check for counter overflow
1232 */
1233 if (dsr & SCA_DSR_COF) {
1234 printf("%s: RXDMA counter overflow\n",
1235 scp->sp_if.if_xname);
1236
1237 sca_dmac_rxinit(scp);
1238 }
1239
1240 /*
1241 * check for end of transfer, which means we
1242 * ran out of descriptors to receive into.
1243 * This means the line is much faster than
1244 * we can handle.
1245 */
1246 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1247 printf("%s: RXDMA buffer overflow\n",
1248 scp->sp_if.if_xname);
1249
1250 sca_dmac_rxinit(scp);
1251 }
1252 }
1253 }
1254
1255 return ret;
1256 }
1257
1258 static int
1259 sca_msci_intr(struct sca_softc *sc, u_int8_t isr)
1260 {
1261 printf("Got msci interrupt XXX\n");
1262
1263 return 0;
1264 }
1265
1266 static void
1267 sca_get_packets(sca_port_t *scp)
1268 {
1269 int descidx;
1270 sca_desc_t *desc;
1271 u_int8_t *buf;
1272
1273 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1274 0, scp->sca->sc_allocsize,
1275 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1276
1277 /*
1278 * Loop while there are packets to receive. After each is processed,
1279 * call sca_frame_skip() to update the DMA registers to the new
1280 * state.
1281 */
1282 while (sca_frame_avail(scp, &descidx)) {
1283 desc = &scp->rxdesc[descidx];
1284 buf = scp->rxbuf + SCA_BSIZE * descidx;
1285
1286 sca_frame_process(scp, desc, buf);
1287 #if SCA_DEBUG_LEVEL > 0
1288 if (sca_debug & SCA_DEBUG_RXPKT)
1289 sca_frame_print(scp, desc, buf);
1290 #endif
1291 sca_frame_skip(scp, descidx);
1292 }
1293
1294 bus_dmamap_sync(scp->sca->sc_dmat, scp->sca->sc_dmam,
1295 0, scp->sca->sc_allocsize,
1296 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1297 }
1298
1299 /*
1300 * Starting with the first descriptor we wanted to read into, up to but
1301 * not including the current SCA read descriptor, look for a packet.
1302 */
1303 static int
1304 sca_frame_avail(sca_port_t *scp, int *descindx)
1305 {
1306 u_int16_t cda;
1307 int cdaidx;
1308 u_int32_t desc_p; /* physical address (lower 16 bits) */
1309 sca_desc_t *desc;
1310 u_int8_t rxstat;
1311
1312 /*
1313 * Read the current descriptor from the SCA.
1314 */
1315 cda = dmac_read_2(scp, SCA_CDAL0);
1316
1317 /*
1318 * calculate the index of the current descriptor
1319 */
1320 desc_p = cda - (u_int16_t)(scp->rxdesc_p & 0x0000ffff);
1321 cdaidx = desc_p / sizeof(sca_desc_t);
1322
1323 if (cdaidx >= SCA_NrxBUFS)
1324 return 0;
1325
1326 for (;;) {
1327 /*
1328 * if the SCA is reading into the first descriptor, we somehow
1329 * got this interrupt incorrectly. Just return that there are
1330 * no packets ready.
1331 */
1332 if (cdaidx == scp->rxstart)
1333 return 0;
1334
1335 /*
1336 * We might have a valid descriptor. Set up a pointer
1337 * to the kva address for it so we can more easily examine
1338 * the contents.
1339 */
1340 desc = &scp->rxdesc[scp->rxstart];
1341
1342 rxstat = desc->stat;
1343
1344 /*
1345 * check for errors
1346 */
1347 if (rxstat & SCA_DESC_ERRORS)
1348 goto nextpkt;
1349
1350 /*
1351 * full packet? Good.
1352 */
1353 if (rxstat & SCA_DESC_EOM) {
1354 *descindx = scp->rxstart;
1355 return 1;
1356 }
1357
1358 /*
1359 * increment the rxstart address, since this frame is
1360 * somehow damaged. Skip over it in later calls.
1361 * XXX This breaks multidescriptor receives, so each
1362 * frame HAS to fit within one descriptor's buffer
1363 * space now...
1364 */
1365 nextpkt:
1366 scp->rxstart++;
1367 if (scp->rxstart == SCA_NrxBUFS)
1368 scp->rxstart = 0;
1369 }
1370
1371 return 0;
1372 }
1373
1374 /*
1375 * Pass the packet up to the kernel if it is a packet we want to pay
1376 * attention to.
1377 *
1378 * MUST BE CALLED AT splnet()
1379 */
1380 static void
1381 sca_frame_process(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1382 {
1383 hdlc_header_t *hdlc;
1384 cisco_pkt_t *cisco, *ncisco;
1385 u_int16_t len;
1386 struct mbuf *m;
1387 u_int8_t *nbuf;
1388 u_int32_t t = (time.tv_sec - boottime.tv_sec) * 1000;
1389 struct ifqueue *ifq;
1390
1391 len = desc->len;
1392
1393 /*
1394 * skip packets that are too short
1395 */
1396 if (len < sizeof(hdlc_header_t))
1397 return;
1398
1399 #if NBPFILTER > 0
1400 if (scp->sp_bpf)
1401 bpf_tap(scp->sp_bpf, p, len);
1402 #endif
1403
1404 /*
1405 * read and then strip off the HDLC information
1406 */
1407 hdlc = (hdlc_header_t *)p;
1408
1409 scp->sp_if.if_ipackets++;
1410 scp->sp_if.if_lastchange = time;
1411
1412 switch (ntohs(hdlc->protocol)) {
1413 case HDLC_PROTOCOL_IP:
1414 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1415
1416 m = sca_mbuf_alloc(p, len);
1417 if (m == NULL) {
1418 scp->sp_if.if_iqdrops++;
1419 return;
1420 }
1421 m->m_pkthdr.rcvif = &scp->sp_if;
1422
1423 if (IF_QFULL(&ipintrq)) {
1424 IF_DROP(&ipintrq);
1425 scp->sp_if.if_ierrors++;
1426 scp->sp_if.if_iqdrops++;
1427 m_freem(m);
1428 } else {
1429 /*
1430 * strip off the HDLC header and hand off to IP stack
1431 */
1432 m->m_pkthdr.len -= HDLC_HDRLEN;
1433 m->m_data += HDLC_HDRLEN;
1434 m->m_len -= HDLC_HDRLEN;
1435 IF_ENQUEUE(&ipintrq, m);
1436 schednetisr(NETISR_IP);
1437 }
1438
1439 break;
1440
1441 case CISCO_KEEPALIVE:
1442 SCA_DPRINTF(SCA_DEBUG_CISCO,
1443 ("Received CISCO keepalive packet\n"));
1444
1445 if (len < CISCO_PKT_LEN) {
1446 SCA_DPRINTF(SCA_DEBUG_CISCO,
1447 ("short CISCO packet %d, wanted %d\n",
1448 len, CISCO_PKT_LEN));
1449 return;
1450 }
1451
1452 /*
1453 * allocate an mbuf and copy the important bits of data
1454 * into it.
1455 */
1456 m = sca_mbuf_alloc(p, HDLC_HDRLEN + CISCO_PKT_LEN);
1457 if (m == NULL)
1458 return;
1459
1460 nbuf = mtod(m, u_int8_t *);
1461 ncisco = (cisco_pkt_t *)(nbuf + HDLC_HDRLEN);
1462 m->m_pkthdr.rcvif = &scp->sp_if;
1463
1464 cisco = (cisco_pkt_t *)(p + HDLC_HDRLEN);
1465
1466 switch (ntohl(cisco->type)) {
1467 case CISCO_ADDR_REQ:
1468 printf("Got CISCO addr_req, ignoring\n");
1469 m_freem(m);
1470 break;
1471
1472 case CISCO_ADDR_REPLY:
1473 printf("Got CISCO addr_reply, ignoring\n");
1474 m_freem(m);
1475 break;
1476
1477 case CISCO_KEEPALIVE_REQ:
1478 SCA_DPRINTF(SCA_DEBUG_CISCO,
1479 ("Received KA, mseq %d,"
1480 " yseq %d, rel 0x%04x, t0"
1481 " %04x, t1 %04x\n",
1482 ntohl(cisco->par1), ntohl(cisco->par2),
1483 ntohs(cisco->rel), ntohs(cisco->time0),
1484 ntohs(cisco->time1)));
1485
1486 scp->cka_lastrx = ntohl(cisco->par1);
1487 scp->cka_lasttx++;
1488
1489 /*
1490 * schedule the transmit right here.
1491 */
1492 ncisco->par2 = cisco->par1;
1493 ncisco->par1 = htonl(scp->cka_lasttx);
1494 ncisco->time0 = htons((u_int16_t)(t >> 16));
1495 ncisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1496
1497 ifq = &scp->linkq;
1498 if (IF_QFULL(ifq)) {
1499 IF_DROP(ifq);
1500 m_freem(m);
1501 return;
1502 }
1503 IF_ENQUEUE(ifq, m);
1504
1505 sca_start(&scp->sp_if);
1506
1507 break;
1508
1509 default:
1510 m_freem(m);
1511 SCA_DPRINTF(SCA_DEBUG_CISCO,
1512 ("Unknown CISCO keepalive protocol 0x%04x\n",
1513 ntohl(cisco->type)));
1514 return;
1515 }
1516
1517 break;
1518
1519 default:
1520 SCA_DPRINTF(SCA_DEBUG_RX,
1521 ("Unknown/unexpected ethertype 0x%04x\n",
1522 ntohs(hdlc->protocol)));
1523 }
1524 }
1525
1526 #if SCA_DEBUG_LEVEL > 0
1527 /*
1528 * do a hex dump of the packet received into descriptor "desc" with
1529 * data buffer "p"
1530 */
1531 static void
1532 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1533 {
1534 int i;
1535 int nothing_yet = 1;
1536
1537 printf("descriptor va %p: cp 0x%x bpb 0x%0x bp 0x%0x stat 0x%0x len %d\n",
1538 desc, desc->cp, desc->bpb, desc->bp, desc->stat, desc->len);
1539
1540 for (i = 0 ; i < desc->len ; i++) {
1541 if (nothing_yet == 1 && *p == 0) {
1542 p++;
1543 continue;
1544 }
1545 nothing_yet = 0;
1546 if (i % 16 == 0)
1547 printf("\n");
1548 printf("%02x ", *p++);
1549 }
1550
1551 if (i % 16 != 1)
1552 printf("\n");
1553 }
1554 #endif
1555
1556 /*
1557 * skip all frames before the descriptor index "indx" -- we do this by
1558 * moving the rxstart pointer to the index following this one, and
1559 * setting the end descriptor to this index.
1560 */
1561 static void
1562 sca_frame_skip(sca_port_t *scp, int indx)
1563 {
1564 u_int32_t desc_p;
1565
1566 scp->rxstart++;
1567 if (scp->rxstart == SCA_NrxBUFS)
1568 scp->rxstart = 0;
1569
1570 desc_p = scp->rxdesc_p * sizeof(sca_desc_t) * indx;
1571 dmac_write_2(scp, SCA_EDAL0,
1572 (u_int16_t)(desc_p & 0x0000ffff));
1573 }
1574
1575 /*
1576 * set a port to the "up" state
1577 */
1578 static void
1579 sca_port_up(sca_port_t *scp)
1580 {
1581 struct sca_softc *sc = scp->sca;
1582
1583 /*
1584 * reset things
1585 */
1586 #if 0
1587 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1588 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1589 #endif
1590 /*
1591 * clear in-use flag
1592 */
1593 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1594
1595 /*
1596 * raise DTR
1597 */
1598 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 1);
1599
1600 /*
1601 * raise RTS
1602 */
1603 msci_write_1(scp, SCA_CTL0,
1604 msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS);
1605
1606 /*
1607 * enable interrupts
1608 */
1609 if (scp->sp_port == 0) {
1610 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1611 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1612 } else {
1613 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1614 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1615 }
1616
1617 /*
1618 * enable transmit and receive
1619 */
1620 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1621 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1622
1623 /*
1624 * reset internal state
1625 */
1626 scp->txinuse = 0;
1627 scp->txcur = 0;
1628 scp->cka_lasttx = time.tv_usec;
1629 scp->cka_lastrx = 0;
1630 }
1631
1632 /*
1633 * set a port to the "down" state
1634 */
1635 static void
1636 sca_port_down(sca_port_t *scp)
1637 {
1638 struct sca_softc *sc = scp->sca;
1639
1640 /*
1641 * lower DTR
1642 */
1643 sc->dtr_callback(sc->dtr_aux, scp->sp_port, 0);
1644
1645 /*
1646 * lower RTS
1647 */
1648 msci_write_1(scp, SCA_CTL0,
1649 msci_read_1(scp, SCA_CTL0) | SCA_CTL_RTS);
1650
1651 /*
1652 * disable interrupts
1653 */
1654 if (scp->sp_port == 0) {
1655 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1656 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1657 } else {
1658 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1659 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1660 }
1661
1662 /*
1663 * disable transmit and receive
1664 */
1665 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1666 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1667
1668 /*
1669 * no, we're not in use anymore
1670 */
1671 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1672 }
1673
1674 /*
1675 * disable all DMA and interrupts for all ports at once.
1676 */
1677 void
1678 sca_shutdown(struct sca_softc *sca)
1679 {
1680 /*
1681 * disable DMA and interrupts
1682 */
1683 sca_write_1(sca, SCA_DMER, 0);
1684 sca_write_1(sca, SCA_IER0, 0);
1685 sca_write_1(sca, SCA_IER1, 0);
1686 }
1687
1688 /*
1689 * If there are packets to transmit, start the transmit DMA logic.
1690 */
1691 static void
1692 sca_port_starttx(sca_port_t *scp)
1693 {
1694 struct sca_softc *sc;
1695 u_int32_t startdesc_p, enddesc_p;
1696 int enddesc;
1697
1698 sc = scp->sca;
1699
1700 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1701 || scp->txinuse == 0)
1702 return;
1703 scp->sp_if.if_flags |= IFF_OACTIVE;
1704
1705 /*
1706 * We have something to do, since we have at least one packet
1707 * waiting, and we are not already marked as active.
1708 */
1709 enddesc = scp->txcur;
1710 enddesc++;
1711 if (enddesc == SCA_NtxBUFS)
1712 enddesc = 0;
1713
1714 startdesc_p = scp->txdesc_p;
1715 enddesc_p = scp->txdesc_p + sizeof(sca_desc_t) * enddesc;
1716
1717 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1718 dmac_write_2(scp, SCA_CDAL1,
1719 (u_int16_t)(startdesc_p & 0x0000ffff));
1720
1721 /*
1722 * enable the DMA
1723 */
1724 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1725 }
1726
1727 /*
1728 * allocate an mbuf at least long enough to hold "len" bytes.
1729 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1730 * otherwise let the caller handle copying the data in.
1731 */
1732 static struct mbuf *
1733 sca_mbuf_alloc(caddr_t p, u_int len)
1734 {
1735 struct mbuf *m;
1736
1737 /*
1738 * allocate an mbuf and copy the important bits of data
1739 * into it. If the packet won't fit in the header,
1740 * allocate a cluster for it and store it there.
1741 */
1742 MGETHDR(m, M_DONTWAIT, MT_DATA);
1743 if (m == NULL)
1744 return NULL;
1745 if (len > MHLEN) {
1746 if (len > MCLBYTES) {
1747 m_freem(m);
1748 return NULL;
1749 }
1750 MCLGET(m, M_DONTWAIT);
1751 if ((m->m_flags & M_EXT) == 0) {
1752 m_freem(m);
1753 return NULL;
1754 }
1755 }
1756 if (p != NULL)
1757 bcopy(p, mtod(m, caddr_t), len);
1758 m->m_len = len;
1759 m->m_pkthdr.len = len;
1760
1761 return (m);
1762 }
1763