hd64570.c revision 1.20 1 /* $NetBSD: hd64570.c,v 1.20 2001/11/13 13:14:37 lukem Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using dma. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.20 2001/11/13 13:14:37 lukem Exp $");
69
70 #include "bpfilter.h"
71 #include "opt_inet.h"
72 #include "opt_iso.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/device.h>
77 #include <sys/mbuf.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/kernel.h>
81
82 #include <net/if.h>
83 #include <net/if_types.h>
84 #include <net/netisr.h>
85
86 #if defined(INET) || defined(INET6)
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet6/in6_var.h>
93 #endif
94 #endif
95
96 #ifdef ISO
97 #include <net/if_llc.h>
98 #include <netiso/iso.h>
99 #include <netiso/iso_var.h>
100 #endif
101
102 #if NBPFILTER > 0
103 #include <net/bpf.h>
104 #endif
105
106 #include <machine/cpu.h>
107 #include <machine/bus.h>
108 #include <machine/intr.h>
109
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/ic/hd64570reg.h>
115 #include <dev/ic/hd64570var.h>
116
117 #define SCA_DEBUG_RX 0x0001
118 #define SCA_DEBUG_TX 0x0002
119 #define SCA_DEBUG_CISCO 0x0004
120 #define SCA_DEBUG_DMA 0x0008
121 #define SCA_DEBUG_RXPKT 0x0010
122 #define SCA_DEBUG_TXPKT 0x0020
123 #define SCA_DEBUG_INTR 0x0040
124 #define SCA_DEBUG_CLOCK 0x0080
125
126 #if 0
127 #define SCA_DEBUG_LEVEL ( 0xFFFF )
128 #else
129 #define SCA_DEBUG_LEVEL 0
130 #endif
131
132 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
133
134 #if SCA_DEBUG_LEVEL > 0
135 #define SCA_DPRINTF(l, x) do { \
136 if ((l) & sca_debug) \
137 printf x;\
138 } while (0)
139 #else
140 #define SCA_DPRINTF(l, x)
141 #endif
142
143 #if 0
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
145 #endif
146
147 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
149
150 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
151 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
152 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
153 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
154
155 static void sca_msci_init(struct sca_softc *, sca_port_t *);
156 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
157 static void sca_dmac_rxinit(sca_port_t *);
158
159 static int sca_dmac_intr(sca_port_t *, u_int8_t);
160 static int sca_msci_intr(sca_port_t *, u_int8_t);
161
162 static void sca_get_packets(sca_port_t *);
163 static int sca_frame_avail(sca_port_t *);
164 static void sca_frame_process(sca_port_t *);
165 static void sca_frame_read_done(sca_port_t *);
166
167 static void sca_port_starttx(sca_port_t *);
168
169 static void sca_port_up(sca_port_t *);
170 static void sca_port_down(sca_port_t *);
171
172 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
173 struct rtentry *));
174 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
175 static void sca_start __P((struct ifnet *));
176 static void sca_watchdog __P((struct ifnet *));
177
178 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
179
180 #if SCA_DEBUG_LEVEL > 0
181 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
182 #endif
183
184
185 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
186 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
187 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
188 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
189
190 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
191
192 static inline void
193 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
194 {
195 sca_write_1(scp->sca, scp->msci_off + reg, val);
196 }
197
198 static inline u_int8_t
199 msci_read_1(sca_port_t *scp, u_int reg)
200 {
201 return sca_read_1(scp->sca, scp->msci_off + reg);
202 }
203
204 static inline void
205 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
206 {
207 sca_write_1(scp->sca, scp->dmac_off + reg, val);
208 }
209
210 static inline void
211 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
212 {
213 sca_write_2(scp->sca, scp->dmac_off + reg, val);
214 }
215
216 static inline u_int8_t
217 dmac_read_1(sca_port_t *scp, u_int reg)
218 {
219 return sca_read_1(scp->sca, scp->dmac_off + reg);
220 }
221
222 static inline u_int16_t
223 dmac_read_2(sca_port_t *scp, u_int reg)
224 {
225 return sca_read_2(scp->sca, scp->dmac_off + reg);
226 }
227
228 /*
229 * read the chain pointer
230 */
231 static inline u_int16_t
232 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
233 {
234 if (sc->sc_usedma)
235 return ((dp)->sd_chainp);
236 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
237 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
238 }
239
240 /*
241 * write the chain pointer
242 */
243 static inline void
244 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
245 {
246 if (sc->sc_usedma)
247 (dp)->sd_chainp = cp;
248 else
249 bus_space_write_2(sc->scu_memt, sc->scu_memh,
250 sca_page_addr(sc, dp)
251 + offsetof(struct sca_desc, sd_chainp), cp);
252 }
253
254 /*
255 * read the buffer pointer
256 */
257 static inline u_int32_t
258 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
259 {
260 u_int32_t address;
261
262 if (sc->sc_usedma)
263 address = dp->sd_bufp | dp->sd_hbufp << 16;
264 else {
265 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
266 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
267 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
268 sca_page_addr(sc, dp)
269 + offsetof(struct sca_desc, sd_hbufp)) << 16;
270 }
271 return (address);
272 }
273
274 /*
275 * write the buffer pointer
276 */
277 static inline void
278 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
279 {
280 if (sc->sc_usedma) {
281 dp->sd_bufp = bufp & 0xFFFF;
282 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
283 } else {
284 bus_space_write_2(sc->scu_memt, sc->scu_memh,
285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
286 bufp & 0xFFFF);
287 bus_space_write_1(sc->scu_memt, sc->scu_memh,
288 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
289 (bufp & 0x00FF0000) >> 16);
290 }
291 }
292
293 /*
294 * read the buffer length
295 */
296 static inline u_int16_t
297 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
298 {
299 if (sc->sc_usedma)
300 return ((dp)->sd_buflen);
301 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
302 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
303 }
304
305 /*
306 * write the buffer length
307 */
308 static inline void
309 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
310 {
311 if (sc->sc_usedma)
312 (dp)->sd_buflen = len;
313 else
314 bus_space_write_2(sc->scu_memt, sc->scu_memh,
315 sca_page_addr(sc, dp)
316 + offsetof(struct sca_desc, sd_buflen), len);
317 }
318
319 /*
320 * read the descriptor status
321 */
322 static inline u_int8_t
323 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
324 {
325 if (sc->sc_usedma)
326 return ((dp)->sd_stat);
327 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
328 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
329 }
330
331 /*
332 * write the descriptor status
333 */
334 static inline void
335 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
336 {
337 if (sc->sc_usedma)
338 (dp)->sd_stat = stat;
339 else
340 bus_space_write_1(sc->scu_memt, sc->scu_memh,
341 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
342 stat);
343 }
344
345 void
346 sca_init(struct sca_softc *sc)
347 {
348 /*
349 * Do a little sanity check: check number of ports.
350 */
351 if (sc->sc_numports < 1 || sc->sc_numports > 2)
352 panic("sca can\'t handle more than 2 or less than 1 ports");
353
354 /*
355 * disable DMA and MSCI interrupts
356 */
357 sca_write_1(sc, SCA_DMER, 0);
358 sca_write_1(sc, SCA_IER0, 0);
359 sca_write_1(sc, SCA_IER1, 0);
360 sca_write_1(sc, SCA_IER2, 0);
361
362 /*
363 * configure interrupt system
364 */
365 sca_write_1(sc, SCA_ITCR,
366 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
367 #if 0
368 /* these are for the intrerrupt ack cycle which we don't use */
369 sca_write_1(sc, SCA_IVR, 0x40);
370 sca_write_1(sc, SCA_IMVR, 0x40);
371 #endif
372
373 /*
374 * set wait control register to zero wait states
375 */
376 sca_write_1(sc, SCA_PABR0, 0);
377 sca_write_1(sc, SCA_PABR1, 0);
378 sca_write_1(sc, SCA_WCRL, 0);
379 sca_write_1(sc, SCA_WCRM, 0);
380 sca_write_1(sc, SCA_WCRH, 0);
381
382 /*
383 * disable DMA and reset status
384 */
385 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
386
387 /*
388 * disable transmit DMA for all channels
389 */
390 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
391 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
392 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
393 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
394 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
395 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
396 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
397 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
398
399 /*
400 * enable DMA based on channel enable flags for each channel
401 */
402 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
403
404 /*
405 * Should check to see if the chip is responding, but for now
406 * assume it is.
407 */
408 }
409
410 /*
411 * initialize the port and attach it to the networking layer
412 */
413 void
414 sca_port_attach(struct sca_softc *sc, u_int port)
415 {
416 sca_port_t *scp = &sc->sc_ports[port];
417 struct ifnet *ifp;
418 static u_int ntwo_unit = 0;
419
420 scp->sca = sc; /* point back to the parent */
421
422 scp->sp_port = port;
423
424 if (port == 0) {
425 scp->msci_off = SCA_MSCI_OFF_0;
426 scp->dmac_off = SCA_DMAC_OFF_0;
427 if(sc->sc_parent != NULL)
428 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
429 else
430 ntwo_unit = 0; /* XXX */
431 } else {
432 scp->msci_off = SCA_MSCI_OFF_1;
433 scp->dmac_off = SCA_DMAC_OFF_1;
434 if(sc->sc_parent != NULL)
435 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
436 else
437 ntwo_unit = 1; /* XXX */
438 }
439
440 sca_msci_init(sc, scp);
441 sca_dmac_init(sc, scp);
442
443 /*
444 * attach to the network layer
445 */
446 ifp = &scp->sp_if;
447 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
448 ifp->if_softc = scp;
449 ifp->if_mtu = SCA_MTU;
450 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
451 ifp->if_type = IFT_PTPSERIAL;
452 ifp->if_hdrlen = HDLC_HDRLEN;
453 ifp->if_ioctl = sca_ioctl;
454 ifp->if_output = sca_output;
455 ifp->if_watchdog = sca_watchdog;
456 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
457 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
458 #ifdef SCA_USE_FASTQ
459 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
460 #endif
461 if_attach(ifp);
462 if_alloc_sadl(ifp);
463
464 #if NBPFILTER > 0
465 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
466 #endif
467
468 if (sc->sc_parent == NULL)
469 printf("%s: port %d\n", ifp->if_xname, port);
470 else
471 printf("%s at %s port %d\n",
472 ifp->if_xname, sc->sc_parent->dv_xname, port);
473
474 /*
475 * reset the last seen times on the cisco keepalive protocol
476 */
477 scp->cka_lasttx = time.tv_usec;
478 scp->cka_lastrx = 0;
479 }
480
481 #if 0
482 /*
483 * returns log2(div), sets 'tmc' for the required freq 'hz'
484 */
485 static u_int8_t
486 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
487 {
488 u_int32_t tmc, div;
489 u_int32_t clock;
490
491 /* clock hz = (chipclock / tmc) / 2^(div); */
492 /*
493 * TD == tmc * 2^(n)
494 *
495 * note:
496 * 1 <= TD <= 256 TD is inc of 1
497 * 2 <= TD <= 512 TD is inc of 2
498 * 4 <= TD <= 1024 TD is inc of 4
499 * ...
500 * 512 <= TD <= 256*512 TD is inc of 512
501 *
502 * so note there are overlaps. We lose prec
503 * as div increases so we wish to minize div.
504 *
505 * basically we want to do
506 *
507 * tmc = chip / hz, but have tmc <= 256
508 */
509
510 /* assume system clock is 9.8304Mhz or 9830400hz */
511 clock = clock = 9830400 >> 1;
512
513 /* round down */
514 div = 0;
515 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
516 clock >>= 1;
517 div++;
518 }
519 if (clock / tmc > hz)
520 tmc++;
521 if (!tmc)
522 tmc = 1;
523
524 if (div > SCA_RXS_DIV_512) {
525 /* set to maximums */
526 div = SCA_RXS_DIV_512;
527 tmc = 0;
528 }
529
530 *tmcp = (tmc & 0xFF); /* 0 == 256 */
531 return (div & 0xFF);
532 }
533 #endif
534
535 /*
536 * initialize the port's MSCI
537 */
538 static void
539 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
540 {
541 /* reset the channel */
542 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
543
544 msci_write_1(scp, SCA_MD00,
545 ( SCA_MD0_CRC_1
546 | SCA_MD0_CRC_CCITT
547 | SCA_MD0_CRC_ENABLE
548 | SCA_MD0_MODE_HDLC));
549 #if 0
550 /* immediately send receive reset so the above takes */
551 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
552 #endif
553
554 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
555 msci_write_1(scp, SCA_MD20,
556 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
557
558 /* be safe and do it again */
559 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
560
561 /* setup underrun and idle control, and initial RTS state */
562 msci_write_1(scp, SCA_CTL0,
563 (SCA_CTL_IDLC_PATTERN
564 | SCA_CTL_UDRNC_AFTER_FCS
565 | SCA_CTL_RTS_LOW));
566
567 /* reset the transmitter */
568 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
569
570 /*
571 * set the clock sources
572 */
573 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
574 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
575 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
576
577 /* set external clock generate as requested */
578 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
579
580 /*
581 * XXX don't pay attention to CTS or CD changes right now. I can't
582 * simulate one, and the transmitter will try to transmit even if
583 * CD isn't there anyway, so nothing bad SHOULD happen.
584 */
585 #if 0
586 msci_write_1(scp, SCA_IE00, 0);
587 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
588 #else
589 /* this would deliver transmitter underrun to ST1/ISR1 */
590 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
591 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
592 #endif
593 msci_write_1(scp, SCA_IE20, 0);
594
595 msci_write_1(scp, SCA_FIE0, 0);
596
597 msci_write_1(scp, SCA_SA00, 0);
598 msci_write_1(scp, SCA_SA10, 0);
599
600 msci_write_1(scp, SCA_IDL0, 0x7e);
601
602 msci_write_1(scp, SCA_RRC0, 0x0e);
603 /* msci_write_1(scp, SCA_TRC00, 0x10); */
604 /*
605 * the correct values here are important for avoiding underruns
606 * for any value less than or equal to TRC0 txrdy is activated
607 * which will start the dmac transfer to the fifo.
608 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma.
609 *
610 * thus if we are using a very fast clock that empties the fifo
611 * quickly, delays in the dmac starting to fill the fifo can
612 * lead to underruns so we want a fairly full fifo to still
613 * cause the dmac to start. for cards with on board ram this
614 * has no effect on system performance. For cards that dma
615 * to/from system memory it will cause more, shorter,
616 * bus accesses rather than fewer longer ones.
617 */
618 msci_write_1(scp, SCA_TRC00, 0x00);
619 msci_write_1(scp, SCA_TRC10, 0x1f);
620 }
621
622 /*
623 * Take the memory for the port and construct two circular linked lists of
624 * descriptors (one tx, one rx) and set the pointers in these descriptors
625 * to point to the buffer space for this port.
626 */
627 static void
628 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
629 {
630 sca_desc_t *desc;
631 u_int32_t desc_p;
632 u_int32_t buf_p;
633 int i;
634
635 if (sc->sc_usedma)
636 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
637 BUS_DMASYNC_PREWRITE);
638 else {
639 /*
640 * XXX assumes that all tx desc and bufs in same page
641 */
642 sc->scu_page_on(sc);
643 sc->scu_set_page(sc, scp->sp_txdesc_p);
644 }
645
646 desc = scp->sp_txdesc;
647 desc_p = scp->sp_txdesc_p;
648 buf_p = scp->sp_txbuf_p;
649 scp->sp_txcur = 0;
650 scp->sp_txinuse = 0;
651
652 #ifdef DEBUG
653 /* make sure that we won't wrap */
654 if ((desc_p & 0xffff0000) !=
655 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
656 panic("sca: tx descriptors cross architecural boundry");
657 if ((buf_p & 0xff000000) !=
658 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
659 panic("sca: tx buffers cross architecural boundry");
660 #endif
661
662 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
663 /*
664 * desc_p points to the physcial address of the NEXT desc
665 */
666 desc_p += sizeof(sca_desc_t);
667
668 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
669 sca_desc_write_bufp(sc, desc, buf_p);
670 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
671 sca_desc_write_stat(sc, desc, 0);
672
673 desc++; /* point to the next descriptor */
674 buf_p += SCA_BSIZE;
675 }
676
677 /*
678 * "heal" the circular list by making the last entry point to the
679 * first.
680 */
681 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
682
683 /*
684 * Now, initialize the transmit DMA logic
685 *
686 * CPB == chain pointer base address
687 */
688 dmac_write_1(scp, SCA_DSR1, 0);
689 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
690 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
691 /* XXX1
692 dmac_write_1(scp, SCA_DIR1,
693 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
694 */
695 dmac_write_1(scp, SCA_DIR1,
696 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
697 dmac_write_1(scp, SCA_CPB1,
698 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
699
700 /*
701 * now, do the same thing for receive descriptors
702 *
703 * XXX assumes that all rx desc and bufs in same page
704 */
705 if (!sc->sc_usedma)
706 sc->scu_set_page(sc, scp->sp_rxdesc_p);
707
708 desc = scp->sp_rxdesc;
709 desc_p = scp->sp_rxdesc_p;
710 buf_p = scp->sp_rxbuf_p;
711
712 #ifdef DEBUG
713 /* make sure that we won't wrap */
714 if ((desc_p & 0xffff0000) !=
715 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
716 panic("sca: rx descriptors cross architecural boundry");
717 if ((buf_p & 0xff000000) !=
718 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
719 panic("sca: rx buffers cross architecural boundry");
720 #endif
721
722 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
723 /*
724 * desc_p points to the physcial address of the NEXT desc
725 */
726 desc_p += sizeof(sca_desc_t);
727
728 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
729 sca_desc_write_bufp(sc, desc, buf_p);
730 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
731 sca_desc_write_buflen(sc, desc, 0);
732 sca_desc_write_stat(sc, desc, 0);
733
734 desc++; /* point to the next descriptor */
735 buf_p += SCA_BSIZE;
736 }
737
738 /*
739 * "heal" the circular list by making the last entry point to the
740 * first.
741 */
742 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
743
744 sca_dmac_rxinit(scp);
745
746 if (sc->sc_usedma)
747 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
748 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
749 else
750 sc->scu_page_off(sc);
751 }
752
753 /*
754 * reset and reinitialize the receive DMA logic
755 */
756 static void
757 sca_dmac_rxinit(sca_port_t *scp)
758 {
759 /*
760 * ... and the receive DMA logic ...
761 */
762 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
763 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
764
765 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
766 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
767
768 /* reset descriptors to initial state */
769 scp->sp_rxstart = 0;
770 scp->sp_rxend = scp->sp_nrxdesc - 1;
771
772 /*
773 * CPB == chain pointer base
774 * CDA == current descriptor address
775 * EDA == error descriptor address (overwrite position)
776 * because cda can't be eda when starting we always
777 * have a single buffer gap between cda and eda
778 */
779 dmac_write_1(scp, SCA_CPB0,
780 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
781 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
782 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
783 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
784
785 /*
786 * enable receiver DMA
787 */
788 dmac_write_1(scp, SCA_DIR0,
789 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
790 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
791 }
792
793 /*
794 * Queue the packet for our start routine to transmit
795 */
796 static int
797 sca_output(ifp, m, dst, rt0)
798 struct ifnet *ifp;
799 struct mbuf *m;
800 struct sockaddr *dst;
801 struct rtentry *rt0;
802 {
803 #ifdef ISO
804 struct hdlc_llc_header *llc;
805 #endif
806 struct hdlc_header *hdlc;
807 struct ifqueue *ifq = NULL;
808 int s, error, len;
809 short mflags;
810 ALTQ_DECL(struct altq_pktattr pktattr;)
811
812 error = 0;
813
814 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
815 error = ENETDOWN;
816 goto bad;
817 }
818
819 /*
820 * If the queueing discipline needs packet classification,
821 * do it before prepending link headers.
822 */
823 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
824
825 /*
826 * determine address family, and priority for this packet
827 */
828 switch (dst->sa_family) {
829 #ifdef INET
830 case AF_INET:
831 #ifdef SCA_USE_FASTQ
832 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
833 == IPTOS_LOWDELAY)
834 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
835 #endif
836 /*
837 * Add cisco serial line header. If there is no
838 * space in the first mbuf, allocate another.
839 */
840 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
841 if (m == 0)
842 return (ENOBUFS);
843 hdlc = mtod(m, struct hdlc_header *);
844 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
845 break;
846 #endif
847 #ifdef INET6
848 case AF_INET6:
849 /*
850 * Add cisco serial line header. If there is no
851 * space in the first mbuf, allocate another.
852 */
853 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
854 if (m == 0)
855 return (ENOBUFS);
856 hdlc = mtod(m, struct hdlc_header *);
857 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
858 break;
859 #endif
860 #ifdef ISO
861 case AF_ISO:
862 /*
863 * Add cisco llc serial line header. If there is no
864 * space in the first mbuf, allocate another.
865 */
866 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
867 if (m == 0)
868 return (ENOBUFS);
869 hdlc = mtod(m, struct hdlc_header *);
870 llc = mtod(m, struct hdlc_llc_header *);
871 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
872 llc->hl_ffb = 0;
873 break;
874 #endif
875 default:
876 printf("%s: address family %d unsupported\n",
877 ifp->if_xname, dst->sa_family);
878 error = EAFNOSUPPORT;
879 goto bad;
880 }
881
882 /* finish */
883 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
884 hdlc->h_addr = CISCO_MULTICAST;
885 else
886 hdlc->h_addr = CISCO_UNICAST;
887 hdlc->h_resv = 0;
888
889 /*
890 * queue the packet. If interactive, use the fast queue.
891 */
892 mflags = m->m_flags;
893 len = m->m_pkthdr.len;
894 s = splnet();
895 if (ifq != NULL) {
896 if (IF_QFULL(ifq)) {
897 IF_DROP(ifq);
898 m_freem(m);
899 error = ENOBUFS;
900 } else
901 IF_ENQUEUE(ifq, m);
902 } else
903 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
904 if (error != 0) {
905 splx(s);
906 ifp->if_oerrors++;
907 ifp->if_collisions++;
908 return (error);
909 }
910 ifp->if_obytes += len;
911 if (mflags & M_MCAST)
912 ifp->if_omcasts++;
913
914 sca_start(ifp);
915 splx(s);
916
917 return (error);
918
919 bad:
920 if (m)
921 m_freem(m);
922 return (error);
923 }
924
925 static int
926 sca_ioctl(ifp, cmd, addr)
927 struct ifnet *ifp;
928 u_long cmd;
929 caddr_t addr;
930 {
931 struct ifreq *ifr;
932 struct ifaddr *ifa;
933 int error;
934 int s;
935
936 s = splnet();
937
938 ifr = (struct ifreq *)addr;
939 ifa = (struct ifaddr *)addr;
940 error = 0;
941
942 switch (cmd) {
943 case SIOCSIFADDR:
944 switch(ifa->ifa_addr->sa_family) {
945 #ifdef INET
946 case AF_INET:
947 #endif
948 #ifdef INET6
949 case AF_INET6:
950 #endif
951 #if defined(INET) || defined(INET6)
952 ifp->if_flags |= IFF_UP;
953 sca_port_up(ifp->if_softc);
954 break;
955 #endif
956 default:
957 error = EAFNOSUPPORT;
958 break;
959 }
960 break;
961
962 case SIOCSIFDSTADDR:
963 #ifdef INET
964 if (ifa->ifa_addr->sa_family == AF_INET)
965 break;
966 #endif
967 #ifdef INET6
968 if (ifa->ifa_addr->sa_family == AF_INET6)
969 break;
970 #endif
971 error = EAFNOSUPPORT;
972 break;
973
974 case SIOCADDMULTI:
975 case SIOCDELMULTI:
976 /* XXX need multicast group management code */
977 if (ifr == 0) {
978 error = EAFNOSUPPORT; /* XXX */
979 break;
980 }
981 switch (ifr->ifr_addr.sa_family) {
982 #ifdef INET
983 case AF_INET:
984 break;
985 #endif
986 #ifdef INET6
987 case AF_INET6:
988 break;
989 #endif
990 default:
991 error = EAFNOSUPPORT;
992 break;
993 }
994 break;
995
996 case SIOCSIFFLAGS:
997 if (ifr->ifr_flags & IFF_UP) {
998 ifp->if_flags |= IFF_UP;
999 sca_port_up(ifp->if_softc);
1000 } else {
1001 ifp->if_flags &= ~IFF_UP;
1002 sca_port_down(ifp->if_softc);
1003 }
1004
1005 break;
1006
1007 default:
1008 error = EINVAL;
1009 }
1010
1011 splx(s);
1012 return error;
1013 }
1014
1015 /*
1016 * start packet transmission on the interface
1017 *
1018 * MUST BE CALLED AT splnet()
1019 */
1020 static void
1021 sca_start(ifp)
1022 struct ifnet *ifp;
1023 {
1024 sca_port_t *scp = ifp->if_softc;
1025 struct sca_softc *sc = scp->sca;
1026 struct mbuf *m, *mb_head;
1027 sca_desc_t *desc;
1028 u_int8_t *buf, stat;
1029 u_int32_t buf_p;
1030 int nexttx;
1031 int trigger_xmit;
1032 u_int len;
1033
1034 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1035
1036 /*
1037 * can't queue when we are full or transmitter is busy
1038 */
1039 #ifdef oldcode
1040 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1041 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1042 return;
1043 #else
1044 if (scp->sp_txinuse
1045 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1046 return;
1047 #endif
1048 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1049
1050 /*
1051 * XXX assume that all tx desc and bufs in same page
1052 */
1053 if (sc->sc_usedma)
1054 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1055 0, sc->scu_allocsize,
1056 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1057 else {
1058 sc->scu_page_on(sc);
1059 sc->scu_set_page(sc, scp->sp_txdesc_p);
1060 }
1061
1062 trigger_xmit = 0;
1063
1064 txloop:
1065 IF_DEQUEUE(&scp->linkq, mb_head);
1066 if (mb_head == NULL)
1067 #ifdef SCA_USE_FASTQ
1068 IF_DEQUEUE(&scp->fastq, mb_head);
1069 if (mb_head == NULL)
1070 #endif
1071 IF_DEQUEUE(&ifp->if_snd, mb_head);
1072 if (mb_head == NULL)
1073 goto start_xmit;
1074
1075 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1076 #ifdef oldcode
1077 if (scp->txinuse != 0) {
1078 /* Kill EOT interrupts on the previous descriptor. */
1079 desc = &scp->sp_txdesc[scp->txcur];
1080 stat = sca_desc_read_stat(sc, desc);
1081 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1082
1083 /* Figure out what the next free descriptor is. */
1084 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1085 } else
1086 nexttx = 0;
1087 #endif /* oldcode */
1088
1089 if (scp->sp_txinuse)
1090 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1091 else
1092 nexttx = 0;
1093
1094 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1095
1096 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1097 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1098
1099 /* XXX hoping we can delay the desc write till after we don't drop. */
1100 desc = &scp->sp_txdesc[nexttx];
1101
1102 /* XXX isn't this set already?? */
1103 sca_desc_write_bufp(sc, desc, buf_p);
1104 len = 0;
1105
1106 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1107
1108 #if 0 /* uncomment this for a core in cc1 */
1109 X
1110 #endif
1111 /*
1112 * Run through the chain, copying data into the descriptor as we
1113 * go. If it won't fit in one transmission block, drop the packet.
1114 * No, this isn't nice, but most of the time it _will_ fit.
1115 */
1116 for (m = mb_head ; m != NULL ; m = m->m_next) {
1117 if (m->m_len != 0) {
1118 len += m->m_len;
1119 if (len > SCA_BSIZE) {
1120 m_freem(mb_head);
1121 goto txloop;
1122 }
1123 SCA_DPRINTF(SCA_DEBUG_TX,
1124 ("TX: about to mbuf len %d\n", m->m_len));
1125
1126 if (sc->sc_usedma)
1127 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1128 else
1129 bus_space_write_region_1(sc->scu_memt,
1130 sc->scu_memh, sca_page_addr(sc, buf_p),
1131 mtod(m, u_int8_t *), m->m_len);
1132 buf += m->m_len;
1133 buf_p += m->m_len;
1134 }
1135 }
1136
1137 /* set the buffer, the length, and mark end of frame and end of xfer */
1138 sca_desc_write_buflen(sc, desc, len);
1139 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1140
1141 ifp->if_opackets++;
1142
1143 #if NBPFILTER > 0
1144 /*
1145 * Pass packet to bpf if there is a listener.
1146 */
1147 if (ifp->if_bpf)
1148 bpf_mtap(ifp->if_bpf, mb_head);
1149 #endif
1150
1151 m_freem(mb_head);
1152
1153 scp->sp_txcur = nexttx;
1154 scp->sp_txinuse++;
1155 trigger_xmit = 1;
1156
1157 SCA_DPRINTF(SCA_DEBUG_TX,
1158 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1159
1160 /*
1161 * XXX so didn't this used to limit us to 1?! - multi may be untested
1162 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1163 * to find bug
1164 */
1165 #ifdef oldcode
1166 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1167 #endif
1168 if (scp->sp_txinuse < scp->sp_ntxdesc)
1169 goto txloop;
1170
1171 start_xmit:
1172 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1173
1174 if (trigger_xmit != 0) {
1175 /* set EOT on final descriptor */
1176 desc = &scp->sp_txdesc[scp->sp_txcur];
1177 stat = sca_desc_read_stat(sc, desc);
1178 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1179 }
1180
1181 if (sc->sc_usedma)
1182 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1183 sc->scu_allocsize,
1184 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1185
1186 if (trigger_xmit != 0)
1187 sca_port_starttx(scp);
1188
1189 if (!sc->sc_usedma)
1190 sc->scu_page_off(sc);
1191 }
1192
1193 static void
1194 sca_watchdog(ifp)
1195 struct ifnet *ifp;
1196 {
1197 }
1198
1199 int
1200 sca_hardintr(struct sca_softc *sc)
1201 {
1202 u_int8_t isr0, isr1, isr2;
1203 int ret;
1204
1205 ret = 0; /* non-zero means we processed at least one interrupt */
1206
1207 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1208
1209 while (1) {
1210 /*
1211 * read SCA interrupts
1212 */
1213 isr0 = sca_read_1(sc, SCA_ISR0);
1214 isr1 = sca_read_1(sc, SCA_ISR1);
1215 isr2 = sca_read_1(sc, SCA_ISR2);
1216
1217 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1218 break;
1219
1220 SCA_DPRINTF(SCA_DEBUG_INTR,
1221 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1222 isr0, isr1, isr2));
1223
1224 /*
1225 * check DMAC interrupt
1226 */
1227 if (isr1 & 0x0f)
1228 ret += sca_dmac_intr(&sc->sc_ports[0],
1229 isr1 & 0x0f);
1230
1231 if (isr1 & 0xf0)
1232 ret += sca_dmac_intr(&sc->sc_ports[1],
1233 (isr1 & 0xf0) >> 4);
1234
1235 /*
1236 * mcsi intterupts
1237 */
1238 if (isr0 & 0x0f)
1239 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1240
1241 if (isr0 & 0xf0)
1242 ret += sca_msci_intr(&sc->sc_ports[1],
1243 (isr0 & 0xf0) >> 4);
1244
1245 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1246 if (isr2)
1247 ret += sca_timer_intr(sc, isr2);
1248 #endif
1249 }
1250
1251 return (ret);
1252 }
1253
1254 static int
1255 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1256 {
1257 u_int8_t dsr;
1258 int ret;
1259
1260 ret = 0;
1261
1262 /*
1263 * Check transmit channel
1264 */
1265 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1266 SCA_DPRINTF(SCA_DEBUG_INTR,
1267 ("TX INTERRUPT port %d\n", scp->sp_port));
1268
1269 dsr = 1;
1270 while (dsr != 0) {
1271 ret++;
1272 /*
1273 * reset interrupt
1274 */
1275 dsr = dmac_read_1(scp, SCA_DSR1);
1276 dmac_write_1(scp, SCA_DSR1,
1277 dsr | SCA_DSR_DEWD);
1278
1279 /*
1280 * filter out the bits we don't care about
1281 */
1282 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1283 if (dsr == 0)
1284 break;
1285
1286 /*
1287 * check for counter overflow
1288 */
1289 if (dsr & SCA_DSR_COF) {
1290 printf("%s: TXDMA counter overflow\n",
1291 scp->sp_if.if_xname);
1292
1293 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1294 scp->sp_txcur = 0;
1295 scp->sp_txinuse = 0;
1296 }
1297
1298 /*
1299 * check for buffer overflow
1300 */
1301 if (dsr & SCA_DSR_BOF) {
1302 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1303 scp->sp_if.if_xname,
1304 dmac_read_2(scp, SCA_CDAL1),
1305 dmac_read_2(scp, SCA_EDAL1),
1306 dmac_read_1(scp, SCA_CPB1));
1307
1308 /*
1309 * Yikes. Arrange for a full
1310 * transmitter restart.
1311 */
1312 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1313 scp->sp_txcur = 0;
1314 scp->sp_txinuse = 0;
1315 }
1316
1317 /*
1318 * check for end of transfer, which is not
1319 * an error. It means that all data queued
1320 * was transmitted, and we mark ourself as
1321 * not in use and stop the watchdog timer.
1322 */
1323 if (dsr & SCA_DSR_EOT) {
1324 SCA_DPRINTF(SCA_DEBUG_TX,
1325 ("Transmit completed. cda %x eda %x dsr %x\n",
1326 dmac_read_2(scp, SCA_CDAL1),
1327 dmac_read_2(scp, SCA_EDAL1),
1328 dsr));
1329
1330 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1331 scp->sp_txcur = 0;
1332 scp->sp_txinuse = 0;
1333
1334 /*
1335 * check for more packets
1336 */
1337 sca_start(&scp->sp_if);
1338 }
1339 }
1340 }
1341 /*
1342 * receive channel check
1343 */
1344 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1345 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1346 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1347
1348 dsr = 1;
1349 while (dsr != 0) {
1350 ret++;
1351
1352 dsr = dmac_read_1(scp, SCA_DSR0);
1353 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1354
1355 /*
1356 * filter out the bits we don't care about
1357 */
1358 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1359 | SCA_DSR_BOF | SCA_DSR_EOT);
1360 if (dsr == 0)
1361 break;
1362
1363 /*
1364 * End of frame
1365 */
1366 if (dsr & SCA_DSR_EOM) {
1367 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1368
1369 sca_get_packets(scp);
1370 }
1371
1372 /*
1373 * check for counter overflow
1374 */
1375 if (dsr & SCA_DSR_COF) {
1376 printf("%s: RXDMA counter overflow\n",
1377 scp->sp_if.if_xname);
1378
1379 sca_dmac_rxinit(scp);
1380 }
1381
1382 /*
1383 * check for end of transfer, which means we
1384 * ran out of descriptors to receive into.
1385 * This means the line is much faster than
1386 * we can handle.
1387 */
1388 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1389 printf("%s: RXDMA buffer overflow\n",
1390 scp->sp_if.if_xname);
1391
1392 sca_dmac_rxinit(scp);
1393 }
1394 }
1395 }
1396
1397 return ret;
1398 }
1399
1400 static int
1401 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1402 {
1403 u_int8_t st1, trc0;
1404
1405 /* get and clear the specific interrupt -- should act on it :)*/
1406 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1407 /* clear the interrupt */
1408 msci_write_1(scp, SCA_ST10, st1);
1409
1410 if (st1 & SCA_ST1_UDRN) {
1411 /* underrun -- try to increase ready control */
1412 trc0 = msci_read_1(scp, SCA_TRC00);
1413 if (trc0 == 0x1f)
1414 printf("TX: underrun - fifo depth maxed\n");
1415 else {
1416 if ((trc0 += 2) > 0x1f)
1417 trc0 = 0x1f;
1418 SCA_DPRINTF(SCA_DEBUG_TX,
1419 ("TX: udrn - incr fifo to %d\n", trc0));
1420 msci_write_1(scp, SCA_TRC00, trc0);
1421 }
1422 }
1423 }
1424 return (0);
1425 }
1426
1427 static void
1428 sca_get_packets(sca_port_t *scp)
1429 {
1430 struct sca_softc *sc;
1431
1432 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1433
1434 sc = scp->sca;
1435 if (sc->sc_usedma)
1436 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1437 0, sc->scu_allocsize,
1438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1439 else {
1440 /*
1441 * XXX this code is unable to deal with rx stuff
1442 * in more than 1 page
1443 */
1444 sc->scu_page_on(sc);
1445 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1446 }
1447
1448 /* process as many frames as are available */
1449 while (sca_frame_avail(scp)) {
1450 sca_frame_process(scp);
1451 sca_frame_read_done(scp);
1452 }
1453
1454 if (sc->sc_usedma)
1455 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1456 0, sc->scu_allocsize,
1457 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1458 else
1459 sc->scu_page_off(sc);
1460 }
1461
1462 /*
1463 * Starting with the first descriptor we wanted to read into, up to but
1464 * not including the current SCA read descriptor, look for a packet.
1465 *
1466 * must be called at splnet()
1467 */
1468 static int
1469 sca_frame_avail(sca_port_t *scp)
1470 {
1471 struct sca_softc *sc;
1472 u_int16_t cda;
1473 u_int32_t desc_p; /* physical address (lower 16 bits) */
1474 sca_desc_t *desc;
1475 u_int8_t rxstat;
1476 int cdaidx, toolong;
1477
1478 /*
1479 * Read the current descriptor from the SCA.
1480 */
1481 sc = scp->sca;
1482 cda = dmac_read_2(scp, SCA_CDAL0);
1483
1484 /*
1485 * calculate the index of the current descriptor
1486 */
1487 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1488 desc_p = cda - desc_p;
1489 cdaidx = desc_p / sizeof(sca_desc_t);
1490
1491 SCA_DPRINTF(SCA_DEBUG_RX,
1492 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1493 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1494
1495 /* note confusion */
1496 if (cdaidx >= scp->sp_nrxdesc)
1497 panic("current descriptor index out of range");
1498
1499 /* see if we have a valid frame available */
1500 toolong = 0;
1501 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1502 /*
1503 * We might have a valid descriptor. Set up a pointer
1504 * to the kva address for it so we can more easily examine
1505 * the contents.
1506 */
1507 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1508 rxstat = sca_desc_read_stat(scp->sca, desc);
1509
1510 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1511 scp->sp_port, scp->sp_rxstart, rxstat));
1512
1513 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1514 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1515
1516 /*
1517 * check for errors
1518 */
1519 if (rxstat & SCA_DESC_ERRORS) {
1520 /*
1521 * consider an error condition the end
1522 * of a frame
1523 */
1524 scp->sp_if.if_ierrors++;
1525 toolong = 0;
1526 continue;
1527 }
1528
1529 /*
1530 * if we aren't skipping overlong frames
1531 * we are done, otherwise reset and look for
1532 * another good frame
1533 */
1534 if (rxstat & SCA_DESC_EOM) {
1535 if (!toolong)
1536 return (1);
1537 toolong = 0;
1538 } else if (!toolong) {
1539 /*
1540 * we currently don't deal with frames
1541 * larger than a single buffer (fixed MTU)
1542 */
1543 scp->sp_if.if_ierrors++;
1544 toolong = 1;
1545 }
1546 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1547 scp->sp_rxstart));
1548 }
1549
1550 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1551 return 0;
1552 }
1553
1554 /*
1555 * Pass the packet up to the kernel if it is a packet we want to pay
1556 * attention to.
1557 *
1558 * MUST BE CALLED AT splnet()
1559 */
1560 static void
1561 sca_frame_process(sca_port_t *scp)
1562 {
1563 struct ifqueue *ifq;
1564 struct hdlc_header *hdlc;
1565 struct cisco_pkt *cisco;
1566 sca_desc_t *desc;
1567 struct mbuf *m;
1568 u_int8_t *bufp;
1569 u_int16_t len;
1570 u_int32_t t;
1571
1572 t = (time.tv_sec - boottime.tv_sec) * 1000;
1573 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1574 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1575 len = sca_desc_read_buflen(scp->sca, desc);
1576
1577 SCA_DPRINTF(SCA_DEBUG_RX,
1578 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1579 (bus_addr_t)bufp, len));
1580
1581 #if SCA_DEBUG_LEVEL > 0
1582 if (sca_debug & SCA_DEBUG_RXPKT)
1583 sca_frame_print(scp, desc, bufp);
1584 #endif
1585 /*
1586 * skip packets that are too short
1587 */
1588 if (len < sizeof(struct hdlc_header)) {
1589 scp->sp_if.if_ierrors++;
1590 return;
1591 }
1592
1593 m = sca_mbuf_alloc(scp->sca, bufp, len);
1594 if (m == NULL) {
1595 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1596 return;
1597 }
1598
1599 /*
1600 * read and then strip off the HDLC information
1601 */
1602 m = m_pullup(m, sizeof(struct hdlc_header));
1603 if (m == NULL) {
1604 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1605 return;
1606 }
1607
1608 #if NBPFILTER > 0
1609 if (scp->sp_if.if_bpf)
1610 bpf_mtap(scp->sp_if.if_bpf, m);
1611 #endif
1612
1613 scp->sp_if.if_ipackets++;
1614
1615 hdlc = mtod(m, struct hdlc_header *);
1616 switch (ntohs(hdlc->h_proto)) {
1617 #ifdef INET
1618 case HDLC_PROTOCOL_IP:
1619 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1620 m->m_pkthdr.rcvif = &scp->sp_if;
1621 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1622 m->m_data += sizeof(struct hdlc_header);
1623 m->m_len -= sizeof(struct hdlc_header);
1624 ifq = &ipintrq;
1625 schednetisr(NETISR_IP);
1626 break;
1627 #endif /* INET */
1628 #ifdef INET6
1629 case HDLC_PROTOCOL_IPV6:
1630 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1631 m->m_pkthdr.rcvif = &scp->sp_if;
1632 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1633 m->m_data += sizeof(struct hdlc_header);
1634 m->m_len -= sizeof(struct hdlc_header);
1635 ifq = &ip6intrq;
1636 schednetisr(NETISR_IPV6);
1637 break;
1638 #endif /* INET6 */
1639 #ifdef ISO
1640 case HDLC_PROTOCOL_ISO:
1641 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1642 goto dropit;
1643 m->m_pkthdr.rcvif = &scp->sp_if;
1644 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1645 m->m_data += sizeof(struct hdlc_llc_header);
1646 m->m_len -= sizeof(struct hdlc_llc_header);
1647 ifq = &clnlintrq;
1648 schednetisr(NETISR_ISO);
1649 break;
1650 #endif /* ISO */
1651 case CISCO_KEEPALIVE:
1652 SCA_DPRINTF(SCA_DEBUG_CISCO,
1653 ("Received CISCO keepalive packet\n"));
1654
1655 if (len < CISCO_PKT_LEN) {
1656 SCA_DPRINTF(SCA_DEBUG_CISCO,
1657 ("short CISCO packet %d, wanted %d\n",
1658 len, CISCO_PKT_LEN));
1659 scp->sp_if.if_ierrors++;
1660 goto dropit;
1661 }
1662
1663 m = m_pullup(m, sizeof(struct cisco_pkt));
1664 if (m == NULL) {
1665 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1666 return;
1667 }
1668
1669 cisco = (struct cisco_pkt *)
1670 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1671 m->m_pkthdr.rcvif = &scp->sp_if;
1672
1673 switch (ntohl(cisco->type)) {
1674 case CISCO_ADDR_REQ:
1675 printf("Got CISCO addr_req, ignoring\n");
1676 scp->sp_if.if_ierrors++;
1677 goto dropit;
1678
1679 case CISCO_ADDR_REPLY:
1680 printf("Got CISCO addr_reply, ignoring\n");
1681 scp->sp_if.if_ierrors++;
1682 goto dropit;
1683
1684 case CISCO_KEEPALIVE_REQ:
1685
1686 SCA_DPRINTF(SCA_DEBUG_CISCO,
1687 ("Received KA, mseq %d,"
1688 " yseq %d, rel 0x%04x, t0"
1689 " %04x, t1 %04x\n",
1690 ntohl(cisco->par1), ntohl(cisco->par2),
1691 ntohs(cisco->rel), ntohs(cisco->time0),
1692 ntohs(cisco->time1)));
1693
1694 scp->cka_lastrx = ntohl(cisco->par1);
1695 scp->cka_lasttx++;
1696
1697 /*
1698 * schedule the transmit right here.
1699 */
1700 cisco->par2 = cisco->par1;
1701 cisco->par1 = htonl(scp->cka_lasttx);
1702 cisco->time0 = htons((u_int16_t)(t >> 16));
1703 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1704
1705 ifq = &scp->linkq;
1706 if (IF_QFULL(ifq)) {
1707 IF_DROP(ifq);
1708 goto dropit;
1709 }
1710 IF_ENQUEUE(ifq, m);
1711
1712 sca_start(&scp->sp_if);
1713
1714 /* since start may have reset this fix */
1715 if (!scp->sca->sc_usedma) {
1716 scp->sca->scu_set_page(scp->sca,
1717 scp->sp_rxdesc_p);
1718 scp->sca->scu_page_on(scp->sca);
1719 }
1720 return;
1721 default:
1722 SCA_DPRINTF(SCA_DEBUG_CISCO,
1723 ("Unknown CISCO keepalive protocol 0x%04x\n",
1724 ntohl(cisco->type)));
1725
1726 scp->sp_if.if_noproto++;
1727 goto dropit;
1728 }
1729 return;
1730 default:
1731 SCA_DPRINTF(SCA_DEBUG_RX,
1732 ("Unknown/unexpected ethertype 0x%04x\n",
1733 ntohs(hdlc->h_proto)));
1734 scp->sp_if.if_noproto++;
1735 goto dropit;
1736 }
1737
1738 /* queue the packet */
1739 if (!IF_QFULL(ifq)) {
1740 IF_ENQUEUE(ifq, m);
1741 } else {
1742 IF_DROP(ifq);
1743 scp->sp_if.if_iqdrops++;
1744 goto dropit;
1745 }
1746 return;
1747 dropit:
1748 if (m)
1749 m_freem(m);
1750 return;
1751 }
1752
1753 #if SCA_DEBUG_LEVEL > 0
1754 /*
1755 * do a hex dump of the packet received into descriptor "desc" with
1756 * data buffer "p"
1757 */
1758 static void
1759 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1760 {
1761 int i;
1762 int nothing_yet = 1;
1763 struct sca_softc *sc;
1764 u_int len;
1765
1766 sc = scp->sca;
1767 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1768 desc,
1769 sca_desc_read_chainp(sc, desc),
1770 sca_desc_read_bufp(sc, desc),
1771 sca_desc_read_stat(sc, desc),
1772 (len = sca_desc_read_buflen(sc, desc)));
1773
1774 for (i = 0 ; i < len && i < 256; i++) {
1775 if (nothing_yet == 1 &&
1776 (sc->sc_usedma ? *p
1777 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1778 sca_page_addr(sc, p))) == 0) {
1779 p++;
1780 continue;
1781 }
1782 nothing_yet = 0;
1783 if (i % 16 == 0)
1784 printf("\n");
1785 printf("%02x ",
1786 (sc->sc_usedma ? *p
1787 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1788 sca_page_addr(sc, p))));
1789 p++;
1790 }
1791
1792 if (i % 16 != 1)
1793 printf("\n");
1794 }
1795 #endif
1796
1797 /*
1798 * adjust things becuase we have just read the current starting
1799 * frame
1800 *
1801 * must be called at splnet()
1802 */
1803 static void
1804 sca_frame_read_done(sca_port_t *scp)
1805 {
1806 u_int16_t edesc_p;
1807
1808 /* update where our indicies are */
1809 scp->sp_rxend = scp->sp_rxstart;
1810 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1811
1812 /* update the error [end] descriptor */
1813 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1814 (sizeof(sca_desc_t) * scp->sp_rxend);
1815 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1816 }
1817
1818 /*
1819 * set a port to the "up" state
1820 */
1821 static void
1822 sca_port_up(sca_port_t *scp)
1823 {
1824 struct sca_softc *sc = scp->sca;
1825 #if 0
1826 u_int8_t ier0, ier1;
1827 #endif
1828
1829 /*
1830 * reset things
1831 */
1832 #if 0
1833 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1834 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1835 #endif
1836 /*
1837 * clear in-use flag
1838 */
1839 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1840 scp->sp_if.if_flags |= IFF_RUNNING;
1841
1842 /*
1843 * raise DTR
1844 */
1845 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1846
1847 /*
1848 * raise RTS
1849 */
1850 msci_write_1(scp, SCA_CTL0,
1851 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1852 | SCA_CTL_RTS_HIGH);
1853
1854 #if 0
1855 /*
1856 * enable interrupts (no timer IER2)
1857 */
1858 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1859 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1860 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1861 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1862 if (scp->sp_port == 1) {
1863 ier0 <<= 4;
1864 ier1 <<= 4;
1865 }
1866 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1867 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1868 #else
1869 if (scp->sp_port == 0) {
1870 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1871 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1872 } else {
1873 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1874 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1875 }
1876 #endif
1877
1878 /*
1879 * enable transmit and receive
1880 */
1881 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1882 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1883
1884 /*
1885 * reset internal state
1886 */
1887 scp->sp_txinuse = 0;
1888 scp->sp_txcur = 0;
1889 scp->cka_lasttx = time.tv_usec;
1890 scp->cka_lastrx = 0;
1891 }
1892
1893 /*
1894 * set a port to the "down" state
1895 */
1896 static void
1897 sca_port_down(sca_port_t *scp)
1898 {
1899 struct sca_softc *sc = scp->sca;
1900 #if 0
1901 u_int8_t ier0, ier1;
1902 #endif
1903
1904 /*
1905 * lower DTR
1906 */
1907 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1908
1909 /*
1910 * lower RTS
1911 */
1912 msci_write_1(scp, SCA_CTL0,
1913 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1914 | SCA_CTL_RTS_LOW);
1915
1916 /*
1917 * disable interrupts
1918 */
1919 #if 0
1920 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1921 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1922 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1923 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1924 if (scp->sp_port == 1) {
1925 ier0 <<= 4;
1926 ier1 <<= 4;
1927 }
1928 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1929 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1930 #else
1931 if (scp->sp_port == 0) {
1932 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1933 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1934 } else {
1935 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1936 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1937 }
1938 #endif
1939
1940 /*
1941 * disable transmit and receive
1942 */
1943 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1944 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1945
1946 /*
1947 * no, we're not in use anymore
1948 */
1949 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1950 }
1951
1952 /*
1953 * disable all DMA and interrupts for all ports at once.
1954 */
1955 void
1956 sca_shutdown(struct sca_softc *sca)
1957 {
1958 /*
1959 * disable DMA and interrupts
1960 */
1961 sca_write_1(sca, SCA_DMER, 0);
1962 sca_write_1(sca, SCA_IER0, 0);
1963 sca_write_1(sca, SCA_IER1, 0);
1964 }
1965
1966 /*
1967 * If there are packets to transmit, start the transmit DMA logic.
1968 */
1969 static void
1970 sca_port_starttx(sca_port_t *scp)
1971 {
1972 struct sca_softc *sc;
1973 u_int32_t startdesc_p, enddesc_p;
1974 int enddesc;
1975
1976 sc = scp->sca;
1977
1978 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1979
1980 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1981 || scp->sp_txinuse == 0)
1982 return;
1983
1984 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1985
1986 scp->sp_if.if_flags |= IFF_OACTIVE;
1987
1988 /*
1989 * We have something to do, since we have at least one packet
1990 * waiting, and we are not already marked as active.
1991 */
1992 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1993 startdesc_p = scp->sp_txdesc_p;
1994 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1995
1996 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1997 startdesc_p, enddesc_p));
1998
1999 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
2000 dmac_write_2(scp, SCA_CDAL1,
2001 (u_int16_t)(startdesc_p & 0x0000ffff));
2002
2003 /*
2004 * enable the DMA
2005 */
2006 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
2007 }
2008
2009 /*
2010 * allocate an mbuf at least long enough to hold "len" bytes.
2011 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2012 * otherwise let the caller handle copying the data in.
2013 */
2014 static struct mbuf *
2015 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
2016 {
2017 struct mbuf *m;
2018
2019 /*
2020 * allocate an mbuf and copy the important bits of data
2021 * into it. If the packet won't fit in the header,
2022 * allocate a cluster for it and store it there.
2023 */
2024 MGETHDR(m, M_DONTWAIT, MT_DATA);
2025 if (m == NULL)
2026 return NULL;
2027 if (len > MHLEN) {
2028 if (len > MCLBYTES) {
2029 m_freem(m);
2030 return NULL;
2031 }
2032 MCLGET(m, M_DONTWAIT);
2033 if ((m->m_flags & M_EXT) == 0) {
2034 m_freem(m);
2035 return NULL;
2036 }
2037 }
2038 if (p != NULL) {
2039 /* XXX do we need to sync here? */
2040 if (sc->sc_usedma)
2041 memcpy(mtod(m, caddr_t), p, len);
2042 else
2043 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2044 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2045 }
2046 m->m_len = len;
2047 m->m_pkthdr.len = len;
2048
2049 return (m);
2050 }
2051
2052 /*
2053 * get the base clock
2054 */
2055 void
2056 sca_get_base_clock(struct sca_softc *sc)
2057 {
2058 struct timeval btv, ctv, dtv;
2059 u_int64_t bcnt;
2060 u_int32_t cnt;
2061 u_int16_t subcnt;
2062
2063 /* disable the timer, set prescale to 0 */
2064 sca_write_1(sc, SCA_TCSR0, 0);
2065 sca_write_1(sc, SCA_TEPR0, 0);
2066
2067 /* reset the counter */
2068 (void)sca_read_1(sc, SCA_TCSR0);
2069 subcnt = sca_read_2(sc, SCA_TCNTL0);
2070
2071 /* count to max */
2072 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2073
2074 cnt = 0;
2075 microtime(&btv);
2076 /* start the timer -- no interrupt enable */
2077 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2078 for (;;) {
2079 microtime(&ctv);
2080
2081 /* end around 3/4 of a second */
2082 timersub(&ctv, &btv, &dtv);
2083 if (dtv.tv_usec >= 750000)
2084 break;
2085
2086 /* spin */
2087 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2088 ;
2089 /* reset the timer */
2090 (void)sca_read_2(sc, SCA_TCNTL0);
2091 cnt++;
2092 }
2093
2094 /* stop the timer */
2095 sca_write_1(sc, SCA_TCSR0, 0);
2096
2097 subcnt = sca_read_2(sc, SCA_TCNTL0);
2098 /* add the slop in and get the total timer ticks */
2099 cnt = (cnt << 16) | subcnt;
2100
2101 /* cnt is 1/8 the actual time */
2102 bcnt = cnt * 8;
2103 /* make it proportional to 3/4 of a second */
2104 bcnt *= (u_int64_t)750000;
2105 bcnt /= (u_int64_t)dtv.tv_usec;
2106 cnt = bcnt;
2107
2108 /* make it Hz */
2109 cnt *= 4;
2110 cnt /= 3;
2111
2112 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2113 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2114
2115 /*
2116 * round to the nearest 200 -- this allows for +-3 ticks error
2117 */
2118 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2119 }
2120
2121 /*
2122 * print the information about the clock on the ports
2123 */
2124 void
2125 sca_print_clock_info(struct sca_softc *sc)
2126 {
2127 struct sca_port *scp;
2128 u_int32_t mhz, div;
2129 int i;
2130
2131 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2132 sc->sc_baseclock);
2133
2134 /* print the information about the port clock selection */
2135 for (i = 0; i < sc->sc_numports; i++) {
2136 scp = &sc->sc_ports[i];
2137 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2138 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2139
2140 printf("%s: rx clock: ", scp->sp_if.if_xname);
2141 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2142 case SCA_RXS_CLK_LINE:
2143 printf("line");
2144 break;
2145 case SCA_RXS_CLK_LINE_SN:
2146 printf("line with noise suppression");
2147 break;
2148 case SCA_RXS_CLK_INTERNAL:
2149 printf("internal %d Hz", (mhz >> div));
2150 break;
2151 case SCA_RXS_CLK_ADPLL_OUT:
2152 printf("adpll using internal %d Hz", (mhz >> div));
2153 break;
2154 case SCA_RXS_CLK_ADPLL_IN:
2155 printf("adpll using line clock");
2156 break;
2157 }
2158 printf(" tx clock: ");
2159 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2160 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2161 case SCA_TXS_CLK_LINE:
2162 printf("line\n");
2163 break;
2164 case SCA_TXS_CLK_INTERNAL:
2165 printf("internal %d Hz\n", (mhz >> div));
2166 break;
2167 case SCA_TXS_CLK_RXCLK:
2168 printf("rxclock\n");
2169 break;
2170 }
2171 if (scp->sp_eclock)
2172 printf("%s: outputting line clock\n",
2173 scp->sp_if.if_xname);
2174 }
2175 }
2176
2177