hd64570.c revision 1.40 1 /* $NetBSD: hd64570.c,v 1.40 2008/11/07 00:20:02 dyoung Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.40 2008/11/07 00:20:02 dyoung Exp $");
69
70 #include "bpfilter.h"
71 #include "opt_inet.h"
72 #include "opt_iso.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/device.h>
77 #include <sys/mbuf.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/kernel.h>
81
82 #include <net/if.h>
83 #include <net/if_types.h>
84 #include <net/netisr.h>
85
86 #if defined(INET) || defined(INET6)
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet6/in6_var.h>
93 #endif
94 #endif
95
96 #ifdef ISO
97 #include <net/if_llc.h>
98 #include <netiso/iso.h>
99 #include <netiso/iso_var.h>
100 #endif
101
102 #if NBPFILTER > 0
103 #include <net/bpf.h>
104 #endif
105
106 #include <sys/cpu.h>
107 #include <sys/bus.h>
108 #include <sys/intr.h>
109
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/ic/hd64570reg.h>
115 #include <dev/ic/hd64570var.h>
116
117 #define SCA_DEBUG_RX 0x0001
118 #define SCA_DEBUG_TX 0x0002
119 #define SCA_DEBUG_CISCO 0x0004
120 #define SCA_DEBUG_DMA 0x0008
121 #define SCA_DEBUG_RXPKT 0x0010
122 #define SCA_DEBUG_TXPKT 0x0020
123 #define SCA_DEBUG_INTR 0x0040
124 #define SCA_DEBUG_CLOCK 0x0080
125
126 #if 0
127 #define SCA_DEBUG_LEVEL ( 0xFFFF )
128 #else
129 #define SCA_DEBUG_LEVEL 0
130 #endif
131
132 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
133
134 #if SCA_DEBUG_LEVEL > 0
135 #define SCA_DPRINTF(l, x) do { \
136 if ((l) & sca_debug) \
137 printf x;\
138 } while (0)
139 #else
140 #define SCA_DPRINTF(l, x)
141 #endif
142
143 #if 0
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
145 #endif
146
147 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
149
150 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
151 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
152 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
153 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
154
155 static void sca_msci_init(struct sca_softc *, sca_port_t *);
156 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
157 static void sca_dmac_rxinit(sca_port_t *);
158
159 static int sca_dmac_intr(sca_port_t *, u_int8_t);
160 static int sca_msci_intr(sca_port_t *, u_int8_t);
161
162 static void sca_get_packets(sca_port_t *);
163 static int sca_frame_avail(sca_port_t *);
164 static void sca_frame_process(sca_port_t *);
165 static void sca_frame_read_done(sca_port_t *);
166
167 static void sca_port_starttx(sca_port_t *);
168
169 static void sca_port_up(sca_port_t *);
170 static void sca_port_down(sca_port_t *);
171
172 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
173 struct rtentry *);
174 static int sca_ioctl(struct ifnet *, u_long, void *);
175 static void sca_start(struct ifnet *);
176 static void sca_watchdog(struct ifnet *);
177
178 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
179
180 #if SCA_DEBUG_LEVEL > 0
181 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
182 #endif
183
184
185 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
186 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
187 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
188 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
189
190 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
191
192 static inline void
193 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
194 {
195 sca_write_1(scp->sca, scp->msci_off + reg, val);
196 }
197
198 static inline u_int8_t
199 msci_read_1(sca_port_t *scp, u_int reg)
200 {
201 return sca_read_1(scp->sca, scp->msci_off + reg);
202 }
203
204 static inline void
205 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
206 {
207 sca_write_1(scp->sca, scp->dmac_off + reg, val);
208 }
209
210 static inline void
211 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
212 {
213 sca_write_2(scp->sca, scp->dmac_off + reg, val);
214 }
215
216 static inline u_int8_t
217 dmac_read_1(sca_port_t *scp, u_int reg)
218 {
219 return sca_read_1(scp->sca, scp->dmac_off + reg);
220 }
221
222 static inline u_int16_t
223 dmac_read_2(sca_port_t *scp, u_int reg)
224 {
225 return sca_read_2(scp->sca, scp->dmac_off + reg);
226 }
227
228 /*
229 * read the chain pointer
230 */
231 static inline u_int16_t
232 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
233 {
234 if (sc->sc_usedma)
235 return ((dp)->sd_chainp);
236 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
237 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
238 }
239
240 /*
241 * write the chain pointer
242 */
243 static inline void
244 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
245 {
246 if (sc->sc_usedma)
247 (dp)->sd_chainp = cp;
248 else
249 bus_space_write_2(sc->scu_memt, sc->scu_memh,
250 sca_page_addr(sc, dp)
251 + offsetof(struct sca_desc, sd_chainp), cp);
252 }
253
254 /*
255 * read the buffer pointer
256 */
257 static inline u_int32_t
258 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
259 {
260 u_int32_t address;
261
262 if (sc->sc_usedma)
263 address = dp->sd_bufp | dp->sd_hbufp << 16;
264 else {
265 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
266 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
267 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
268 sca_page_addr(sc, dp)
269 + offsetof(struct sca_desc, sd_hbufp)) << 16;
270 }
271 return (address);
272 }
273
274 /*
275 * write the buffer pointer
276 */
277 static inline void
278 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
279 {
280 if (sc->sc_usedma) {
281 dp->sd_bufp = bufp & 0xFFFF;
282 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
283 } else {
284 bus_space_write_2(sc->scu_memt, sc->scu_memh,
285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
286 bufp & 0xFFFF);
287 bus_space_write_1(sc->scu_memt, sc->scu_memh,
288 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
289 (bufp & 0x00FF0000) >> 16);
290 }
291 }
292
293 /*
294 * read the buffer length
295 */
296 static inline u_int16_t
297 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
298 {
299 if (sc->sc_usedma)
300 return ((dp)->sd_buflen);
301 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
302 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
303 }
304
305 /*
306 * write the buffer length
307 */
308 static inline void
309 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
310 {
311 if (sc->sc_usedma)
312 (dp)->sd_buflen = len;
313 else
314 bus_space_write_2(sc->scu_memt, sc->scu_memh,
315 sca_page_addr(sc, dp)
316 + offsetof(struct sca_desc, sd_buflen), len);
317 }
318
319 /*
320 * read the descriptor status
321 */
322 static inline u_int8_t
323 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
324 {
325 if (sc->sc_usedma)
326 return ((dp)->sd_stat);
327 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
328 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
329 }
330
331 /*
332 * write the descriptor status
333 */
334 static inline void
335 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
336 {
337 if (sc->sc_usedma)
338 (dp)->sd_stat = stat;
339 else
340 bus_space_write_1(sc->scu_memt, sc->scu_memh,
341 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
342 stat);
343 }
344
345 void
346 sca_init(struct sca_softc *sc)
347 {
348 /*
349 * Do a little sanity check: check number of ports.
350 */
351 if (sc->sc_numports < 1 || sc->sc_numports > 2)
352 panic("sca can\'t handle more than 2 or less than 1 ports");
353
354 /*
355 * disable DMA and MSCI interrupts
356 */
357 sca_write_1(sc, SCA_DMER, 0);
358 sca_write_1(sc, SCA_IER0, 0);
359 sca_write_1(sc, SCA_IER1, 0);
360 sca_write_1(sc, SCA_IER2, 0);
361
362 /*
363 * configure interrupt system
364 */
365 sca_write_1(sc, SCA_ITCR,
366 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
367 #if 0
368 /* these are for the intrerrupt ack cycle which we don't use */
369 sca_write_1(sc, SCA_IVR, 0x40);
370 sca_write_1(sc, SCA_IMVR, 0x40);
371 #endif
372
373 /*
374 * set wait control register to zero wait states
375 */
376 sca_write_1(sc, SCA_PABR0, 0);
377 sca_write_1(sc, SCA_PABR1, 0);
378 sca_write_1(sc, SCA_WCRL, 0);
379 sca_write_1(sc, SCA_WCRM, 0);
380 sca_write_1(sc, SCA_WCRH, 0);
381
382 /*
383 * disable DMA and reset status
384 */
385 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
386
387 /*
388 * disable transmit DMA for all channels
389 */
390 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
391 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
392 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
393 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
394 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
395 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
396 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
397 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
398
399 /*
400 * enable DMA based on channel enable flags for each channel
401 */
402 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
403
404 /*
405 * Should check to see if the chip is responding, but for now
406 * assume it is.
407 */
408 }
409
410 /*
411 * initialize the port and attach it to the networking layer
412 */
413 void
414 sca_port_attach(struct sca_softc *sc, u_int port)
415 {
416 struct timeval now;
417 sca_port_t *scp = &sc->sc_ports[port];
418 struct ifnet *ifp;
419 static u_int ntwo_unit = 0;
420
421 scp->sca = sc; /* point back to the parent */
422
423 scp->sp_port = port;
424
425 if (port == 0) {
426 scp->msci_off = SCA_MSCI_OFF_0;
427 scp->dmac_off = SCA_DMAC_OFF_0;
428 if(sc->sc_parent != NULL)
429 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
430 else
431 ntwo_unit = 0; /* XXX */
432 } else {
433 scp->msci_off = SCA_MSCI_OFF_1;
434 scp->dmac_off = SCA_DMAC_OFF_1;
435 if(sc->sc_parent != NULL)
436 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
437 else
438 ntwo_unit = 1; /* XXX */
439 }
440
441 sca_msci_init(sc, scp);
442 sca_dmac_init(sc, scp);
443
444 /*
445 * attach to the network layer
446 */
447 ifp = &scp->sp_if;
448 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
449 ifp->if_softc = scp;
450 ifp->if_mtu = SCA_MTU;
451 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
452 ifp->if_type = IFT_PTPSERIAL;
453 ifp->if_hdrlen = HDLC_HDRLEN;
454 ifp->if_ioctl = sca_ioctl;
455 ifp->if_output = sca_output;
456 ifp->if_watchdog = sca_watchdog;
457 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
458 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
459 #ifdef SCA_USE_FASTQ
460 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
461 #endif
462 IFQ_SET_READY(&ifp->if_snd);
463 if_attach(ifp);
464 if_alloc_sadl(ifp);
465
466 #if NBPFILTER > 0
467 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
468 #endif
469
470 if (sc->sc_parent == NULL)
471 printf("%s: port %d\n", ifp->if_xname, port);
472 else
473 printf("%s at %s port %d\n",
474 ifp->if_xname, device_xname(sc->sc_parent), port);
475
476 /*
477 * reset the last seen times on the cisco keepalive protocol
478 */
479 getmicrotime(&now);
480 scp->cka_lasttx = now.tv_usec;
481 scp->cka_lastrx = 0;
482 }
483
484 #if 0
485 /*
486 * returns log2(div), sets 'tmc' for the required freq 'hz'
487 */
488 static u_int8_t
489 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
490 {
491 u_int32_t tmc, div;
492 u_int32_t clock;
493
494 /* clock hz = (chipclock / tmc) / 2^(div); */
495 /*
496 * TD == tmc * 2^(n)
497 *
498 * note:
499 * 1 <= TD <= 256 TD is inc of 1
500 * 2 <= TD <= 512 TD is inc of 2
501 * 4 <= TD <= 1024 TD is inc of 4
502 * ...
503 * 512 <= TD <= 256*512 TD is inc of 512
504 *
505 * so note there are overlaps. We lose prec
506 * as div increases so we wish to minize div.
507 *
508 * basically we want to do
509 *
510 * tmc = chip / hz, but have tmc <= 256
511 */
512
513 /* assume system clock is 9.8304MHz or 9830400Hz */
514 clock = clock = 9830400 >> 1;
515
516 /* round down */
517 div = 0;
518 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
519 clock >>= 1;
520 div++;
521 }
522 if (clock / tmc > hz)
523 tmc++;
524 if (!tmc)
525 tmc = 1;
526
527 if (div > SCA_RXS_DIV_512) {
528 /* set to maximums */
529 div = SCA_RXS_DIV_512;
530 tmc = 0;
531 }
532
533 *tmcp = (tmc & 0xFF); /* 0 == 256 */
534 return (div & 0xFF);
535 }
536 #endif
537
538 /*
539 * initialize the port's MSCI
540 */
541 static void
542 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
543 {
544 /* reset the channel */
545 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
546
547 msci_write_1(scp, SCA_MD00,
548 ( SCA_MD0_CRC_1
549 | SCA_MD0_CRC_CCITT
550 | SCA_MD0_CRC_ENABLE
551 | SCA_MD0_MODE_HDLC));
552 #if 0
553 /* immediately send receive reset so the above takes */
554 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
555 #endif
556
557 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
558 msci_write_1(scp, SCA_MD20,
559 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
560
561 /* be safe and do it again */
562 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
563
564 /* setup underrun and idle control, and initial RTS state */
565 msci_write_1(scp, SCA_CTL0,
566 (SCA_CTL_IDLC_PATTERN
567 | SCA_CTL_UDRNC_AFTER_FCS
568 | SCA_CTL_RTS_LOW));
569
570 /* reset the transmitter */
571 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
572
573 /*
574 * set the clock sources
575 */
576 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
577 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
578 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
579
580 /* set external clock generate as requested */
581 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
582
583 /*
584 * XXX don't pay attention to CTS or CD changes right now. I can't
585 * simulate one, and the transmitter will try to transmit even if
586 * CD isn't there anyway, so nothing bad SHOULD happen.
587 */
588 #if 0
589 msci_write_1(scp, SCA_IE00, 0);
590 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
591 #else
592 /* this would deliver transmitter underrun to ST1/ISR1 */
593 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
594 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
595 #endif
596 msci_write_1(scp, SCA_IE20, 0);
597
598 msci_write_1(scp, SCA_FIE0, 0);
599
600 msci_write_1(scp, SCA_SA00, 0);
601 msci_write_1(scp, SCA_SA10, 0);
602
603 msci_write_1(scp, SCA_IDL0, 0x7e);
604
605 msci_write_1(scp, SCA_RRC0, 0x0e);
606 /* msci_write_1(scp, SCA_TRC00, 0x10); */
607 /*
608 * the correct values here are important for avoiding underruns
609 * for any value less than or equal to TRC0 txrdy is activated
610 * which will start the dmac transfer to the fifo.
611 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
612 *
613 * thus if we are using a very fast clock that empties the fifo
614 * quickly, delays in the dmac starting to fill the fifo can
615 * lead to underruns so we want a fairly full fifo to still
616 * cause the dmac to start. for cards with on board ram this
617 * has no effect on system performance. For cards that DMA
618 * to/from system memory it will cause more, shorter,
619 * bus accesses rather than fewer longer ones.
620 */
621 msci_write_1(scp, SCA_TRC00, 0x00);
622 msci_write_1(scp, SCA_TRC10, 0x1f);
623 }
624
625 /*
626 * Take the memory for the port and construct two circular linked lists of
627 * descriptors (one tx, one rx) and set the pointers in these descriptors
628 * to point to the buffer space for this port.
629 */
630 static void
631 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
632 {
633 sca_desc_t *desc;
634 u_int32_t desc_p;
635 u_int32_t buf_p;
636 int i;
637
638 if (sc->sc_usedma)
639 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
640 BUS_DMASYNC_PREWRITE);
641 else {
642 /*
643 * XXX assumes that all tx desc and bufs in same page
644 */
645 sc->scu_page_on(sc);
646 sc->scu_set_page(sc, scp->sp_txdesc_p);
647 }
648
649 desc = scp->sp_txdesc;
650 desc_p = scp->sp_txdesc_p;
651 buf_p = scp->sp_txbuf_p;
652 scp->sp_txcur = 0;
653 scp->sp_txinuse = 0;
654
655 #ifdef DEBUG
656 /* make sure that we won't wrap */
657 if ((desc_p & 0xffff0000) !=
658 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
659 panic("sca: tx descriptors cross architecural boundary");
660 if ((buf_p & 0xff000000) !=
661 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
662 panic("sca: tx buffers cross architecural boundary");
663 #endif
664
665 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
666 /*
667 * desc_p points to the physcial address of the NEXT desc
668 */
669 desc_p += sizeof(sca_desc_t);
670
671 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
672 sca_desc_write_bufp(sc, desc, buf_p);
673 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
674 sca_desc_write_stat(sc, desc, 0);
675
676 desc++; /* point to the next descriptor */
677 buf_p += SCA_BSIZE;
678 }
679
680 /*
681 * "heal" the circular list by making the last entry point to the
682 * first.
683 */
684 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
685
686 /*
687 * Now, initialize the transmit DMA logic
688 *
689 * CPB == chain pointer base address
690 */
691 dmac_write_1(scp, SCA_DSR1, 0);
692 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
693 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
694 /* XXX1
695 dmac_write_1(scp, SCA_DIR1,
696 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
697 */
698 dmac_write_1(scp, SCA_DIR1,
699 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
700 dmac_write_1(scp, SCA_CPB1,
701 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
702
703 /*
704 * now, do the same thing for receive descriptors
705 *
706 * XXX assumes that all rx desc and bufs in same page
707 */
708 if (!sc->sc_usedma)
709 sc->scu_set_page(sc, scp->sp_rxdesc_p);
710
711 desc = scp->sp_rxdesc;
712 desc_p = scp->sp_rxdesc_p;
713 buf_p = scp->sp_rxbuf_p;
714
715 #ifdef DEBUG
716 /* make sure that we won't wrap */
717 if ((desc_p & 0xffff0000) !=
718 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
719 panic("sca: rx descriptors cross architecural boundary");
720 if ((buf_p & 0xff000000) !=
721 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
722 panic("sca: rx buffers cross architecural boundary");
723 #endif
724
725 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
726 /*
727 * desc_p points to the physcial address of the NEXT desc
728 */
729 desc_p += sizeof(sca_desc_t);
730
731 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
732 sca_desc_write_bufp(sc, desc, buf_p);
733 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
734 sca_desc_write_buflen(sc, desc, 0);
735 sca_desc_write_stat(sc, desc, 0);
736
737 desc++; /* point to the next descriptor */
738 buf_p += SCA_BSIZE;
739 }
740
741 /*
742 * "heal" the circular list by making the last entry point to the
743 * first.
744 */
745 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
746
747 sca_dmac_rxinit(scp);
748
749 if (sc->sc_usedma)
750 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
751 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
752 else
753 sc->scu_page_off(sc);
754 }
755
756 /*
757 * reset and reinitialize the receive DMA logic
758 */
759 static void
760 sca_dmac_rxinit(sca_port_t *scp)
761 {
762 /*
763 * ... and the receive DMA logic ...
764 */
765 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
766 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
767
768 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
769 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
770
771 /* reset descriptors to initial state */
772 scp->sp_rxstart = 0;
773 scp->sp_rxend = scp->sp_nrxdesc - 1;
774
775 /*
776 * CPB == chain pointer base
777 * CDA == current descriptor address
778 * EDA == error descriptor address (overwrite position)
779 * because cda can't be eda when starting we always
780 * have a single buffer gap between cda and eda
781 */
782 dmac_write_1(scp, SCA_CPB0,
783 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
784 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
785 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
786 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
787
788 /*
789 * enable receiver DMA
790 */
791 dmac_write_1(scp, SCA_DIR0,
792 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
793 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
794 }
795
796 /*
797 * Queue the packet for our start routine to transmit
798 */
799 static int
800 sca_output(
801 struct ifnet *ifp,
802 struct mbuf *m,
803 const struct sockaddr *dst,
804 struct rtentry *rt0)
805 {
806 #ifdef ISO
807 struct hdlc_llc_header *llc;
808 #endif
809 struct hdlc_header *hdlc;
810 struct ifqueue *ifq = NULL;
811 int s, error, len;
812 short mflags;
813 ALTQ_DECL(struct altq_pktattr pktattr;)
814
815 error = 0;
816
817 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
818 error = ENETDOWN;
819 goto bad;
820 }
821
822 /*
823 * If the queueing discipline needs packet classification,
824 * do it before prepending link headers.
825 */
826 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
827
828 /*
829 * determine address family, and priority for this packet
830 */
831 switch (dst->sa_family) {
832 #ifdef INET
833 case AF_INET:
834 #ifdef SCA_USE_FASTQ
835 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
836 == IPTOS_LOWDELAY)
837 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
838 #endif
839 /*
840 * Add cisco serial line header. If there is no
841 * space in the first mbuf, allocate another.
842 */
843 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
844 if (m == 0)
845 return (ENOBUFS);
846 hdlc = mtod(m, struct hdlc_header *);
847 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
848 break;
849 #endif
850 #ifdef INET6
851 case AF_INET6:
852 /*
853 * Add cisco serial line header. If there is no
854 * space in the first mbuf, allocate another.
855 */
856 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
857 if (m == 0)
858 return (ENOBUFS);
859 hdlc = mtod(m, struct hdlc_header *);
860 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
861 break;
862 #endif
863 #ifdef ISO
864 case AF_ISO:
865 /*
866 * Add cisco llc serial line header. If there is no
867 * space in the first mbuf, allocate another.
868 */
869 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
870 if (m == 0)
871 return (ENOBUFS);
872 hdlc = mtod(m, struct hdlc_header *);
873 llc = mtod(m, struct hdlc_llc_header *);
874 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
875 llc->hl_ffb = 0;
876 break;
877 #endif
878 default:
879 printf("%s: address family %d unsupported\n",
880 ifp->if_xname, dst->sa_family);
881 error = EAFNOSUPPORT;
882 goto bad;
883 }
884
885 /* finish */
886 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
887 hdlc->h_addr = CISCO_MULTICAST;
888 else
889 hdlc->h_addr = CISCO_UNICAST;
890 hdlc->h_resv = 0;
891
892 /*
893 * queue the packet. If interactive, use the fast queue.
894 */
895 mflags = m->m_flags;
896 len = m->m_pkthdr.len;
897 s = splnet();
898 if (ifq != NULL) {
899 if (IF_QFULL(ifq)) {
900 IF_DROP(ifq);
901 m_freem(m);
902 error = ENOBUFS;
903 } else
904 IF_ENQUEUE(ifq, m);
905 } else
906 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
907 if (error != 0) {
908 splx(s);
909 ifp->if_oerrors++;
910 ifp->if_collisions++;
911 return (error);
912 }
913 ifp->if_obytes += len;
914 if (mflags & M_MCAST)
915 ifp->if_omcasts++;
916
917 sca_start(ifp);
918 splx(s);
919
920 return (error);
921
922 bad:
923 if (m)
924 m_freem(m);
925 return (error);
926 }
927
928 static int
929 sca_ioctl(ifp, cmd, data)
930 struct ifnet *ifp;
931 u_long cmd;
932 void *data;
933 {
934 struct ifreq *ifr;
935 struct ifaddr *ifa;
936 int error;
937 int s;
938
939 s = splnet();
940
941 ifr = (struct ifreq *)data;
942 ifa = (struct ifaddr *)data;
943 error = 0;
944
945 switch (cmd) {
946 case SIOCINITIFADDR:
947 switch(ifa->ifa_addr->sa_family) {
948 #ifdef INET
949 case AF_INET:
950 #endif
951 #ifdef INET6
952 case AF_INET6:
953 #endif
954 #if defined(INET) || defined(INET6)
955 ifp->if_flags |= IFF_UP;
956 sca_port_up(ifp->if_softc);
957 break;
958 #endif
959 default:
960 error = EAFNOSUPPORT;
961 break;
962 }
963 break;
964
965 case SIOCSIFDSTADDR:
966 #ifdef INET
967 if (ifa->ifa_addr->sa_family == AF_INET)
968 break;
969 #endif
970 #ifdef INET6
971 if (ifa->ifa_addr->sa_family == AF_INET6)
972 break;
973 #endif
974 error = EAFNOSUPPORT;
975 break;
976
977 case SIOCADDMULTI:
978 case SIOCDELMULTI:
979 /* XXX need multicast group management code */
980 if (ifr == 0) {
981 error = EAFNOSUPPORT; /* XXX */
982 break;
983 }
984 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
985 #ifdef INET
986 case AF_INET:
987 break;
988 #endif
989 #ifdef INET6
990 case AF_INET6:
991 break;
992 #endif
993 default:
994 error = EAFNOSUPPORT;
995 break;
996 }
997 break;
998
999 case SIOCSIFFLAGS:
1000 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1001 break;
1002 if (ifr->ifr_flags & IFF_UP) {
1003 ifp->if_flags |= IFF_UP;
1004 sca_port_up(ifp->if_softc);
1005 } else {
1006 ifp->if_flags &= ~IFF_UP;
1007 sca_port_down(ifp->if_softc);
1008 }
1009
1010 break;
1011
1012 default:
1013 error = ifioctl_common(ifp, cmd, data);
1014 }
1015
1016 splx(s);
1017 return error;
1018 }
1019
1020 /*
1021 * start packet transmission on the interface
1022 *
1023 * MUST BE CALLED AT splnet()
1024 */
1025 static void
1026 sca_start(ifp)
1027 struct ifnet *ifp;
1028 {
1029 sca_port_t *scp = ifp->if_softc;
1030 struct sca_softc *sc = scp->sca;
1031 struct mbuf *m, *mb_head;
1032 sca_desc_t *desc;
1033 u_int8_t *buf, stat;
1034 u_int32_t buf_p;
1035 int nexttx;
1036 int trigger_xmit;
1037 u_int len;
1038
1039 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1040
1041 /*
1042 * can't queue when we are full or transmitter is busy
1043 */
1044 #ifdef oldcode
1045 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1046 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1047 return;
1048 #else
1049 if (scp->sp_txinuse
1050 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1051 return;
1052 #endif
1053 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1054
1055 /*
1056 * XXX assume that all tx desc and bufs in same page
1057 */
1058 if (sc->sc_usedma)
1059 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1060 0, sc->scu_allocsize,
1061 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1062 else {
1063 sc->scu_page_on(sc);
1064 sc->scu_set_page(sc, scp->sp_txdesc_p);
1065 }
1066
1067 trigger_xmit = 0;
1068
1069 txloop:
1070 IF_DEQUEUE(&scp->linkq, mb_head);
1071 if (mb_head == NULL)
1072 #ifdef SCA_USE_FASTQ
1073 IF_DEQUEUE(&scp->fastq, mb_head);
1074 if (mb_head == NULL)
1075 #endif
1076 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1077 if (mb_head == NULL)
1078 goto start_xmit;
1079
1080 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1081 #ifdef oldcode
1082 if (scp->txinuse != 0) {
1083 /* Kill EOT interrupts on the previous descriptor. */
1084 desc = &scp->sp_txdesc[scp->txcur];
1085 stat = sca_desc_read_stat(sc, desc);
1086 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1087
1088 /* Figure out what the next free descriptor is. */
1089 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1090 } else
1091 nexttx = 0;
1092 #endif /* oldcode */
1093
1094 if (scp->sp_txinuse)
1095 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1096 else
1097 nexttx = 0;
1098
1099 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1100
1101 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1102 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1103
1104 /* XXX hoping we can delay the desc write till after we don't drop. */
1105 desc = &scp->sp_txdesc[nexttx];
1106
1107 /* XXX isn't this set already?? */
1108 sca_desc_write_bufp(sc, desc, buf_p);
1109 len = 0;
1110
1111 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1112
1113 #if 0 /* uncomment this for a core in cc1 */
1114 X
1115 #endif
1116 /*
1117 * Run through the chain, copying data into the descriptor as we
1118 * go. If it won't fit in one transmission block, drop the packet.
1119 * No, this isn't nice, but most of the time it _will_ fit.
1120 */
1121 for (m = mb_head ; m != NULL ; m = m->m_next) {
1122 if (m->m_len != 0) {
1123 len += m->m_len;
1124 if (len > SCA_BSIZE) {
1125 m_freem(mb_head);
1126 goto txloop;
1127 }
1128 SCA_DPRINTF(SCA_DEBUG_TX,
1129 ("TX: about to mbuf len %d\n", m->m_len));
1130
1131 if (sc->sc_usedma)
1132 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1133 else
1134 bus_space_write_region_1(sc->scu_memt,
1135 sc->scu_memh, sca_page_addr(sc, buf_p),
1136 mtod(m, u_int8_t *), m->m_len);
1137 buf += m->m_len;
1138 buf_p += m->m_len;
1139 }
1140 }
1141
1142 /* set the buffer, the length, and mark end of frame and end of xfer */
1143 sca_desc_write_buflen(sc, desc, len);
1144 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1145
1146 ifp->if_opackets++;
1147
1148 #if NBPFILTER > 0
1149 /*
1150 * Pass packet to bpf if there is a listener.
1151 */
1152 if (ifp->if_bpf)
1153 bpf_mtap(ifp->if_bpf, mb_head);
1154 #endif
1155
1156 m_freem(mb_head);
1157
1158 scp->sp_txcur = nexttx;
1159 scp->sp_txinuse++;
1160 trigger_xmit = 1;
1161
1162 SCA_DPRINTF(SCA_DEBUG_TX,
1163 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1164
1165 /*
1166 * XXX so didn't this used to limit us to 1?! - multi may be untested
1167 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1168 * to find bug
1169 */
1170 #ifdef oldcode
1171 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1172 #endif
1173 if (scp->sp_txinuse < scp->sp_ntxdesc)
1174 goto txloop;
1175
1176 start_xmit:
1177 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1178
1179 if (trigger_xmit != 0) {
1180 /* set EOT on final descriptor */
1181 desc = &scp->sp_txdesc[scp->sp_txcur];
1182 stat = sca_desc_read_stat(sc, desc);
1183 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1184 }
1185
1186 if (sc->sc_usedma)
1187 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1188 sc->scu_allocsize,
1189 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1190
1191 if (trigger_xmit != 0)
1192 sca_port_starttx(scp);
1193
1194 if (!sc->sc_usedma)
1195 sc->scu_page_off(sc);
1196 }
1197
1198 static void
1199 sca_watchdog(struct ifnet *ifp)
1200 {
1201 }
1202
1203 int
1204 sca_hardintr(struct sca_softc *sc)
1205 {
1206 u_int8_t isr0, isr1, isr2;
1207 int ret;
1208
1209 ret = 0; /* non-zero means we processed at least one interrupt */
1210
1211 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1212
1213 while (1) {
1214 /*
1215 * read SCA interrupts
1216 */
1217 isr0 = sca_read_1(sc, SCA_ISR0);
1218 isr1 = sca_read_1(sc, SCA_ISR1);
1219 isr2 = sca_read_1(sc, SCA_ISR2);
1220
1221 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1222 break;
1223
1224 SCA_DPRINTF(SCA_DEBUG_INTR,
1225 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1226 isr0, isr1, isr2));
1227
1228 /*
1229 * check DMAC interrupt
1230 */
1231 if (isr1 & 0x0f)
1232 ret += sca_dmac_intr(&sc->sc_ports[0],
1233 isr1 & 0x0f);
1234
1235 if (isr1 & 0xf0)
1236 ret += sca_dmac_intr(&sc->sc_ports[1],
1237 (isr1 & 0xf0) >> 4);
1238
1239 /*
1240 * mcsi intterupts
1241 */
1242 if (isr0 & 0x0f)
1243 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1244
1245 if (isr0 & 0xf0)
1246 ret += sca_msci_intr(&sc->sc_ports[1],
1247 (isr0 & 0xf0) >> 4);
1248
1249 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1250 if (isr2)
1251 ret += sca_timer_intr(sc, isr2);
1252 #endif
1253 }
1254
1255 return (ret);
1256 }
1257
1258 static int
1259 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1260 {
1261 u_int8_t dsr;
1262 int ret;
1263
1264 ret = 0;
1265
1266 /*
1267 * Check transmit channel
1268 */
1269 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1270 SCA_DPRINTF(SCA_DEBUG_INTR,
1271 ("TX INTERRUPT port %d\n", scp->sp_port));
1272
1273 dsr = 1;
1274 while (dsr != 0) {
1275 ret++;
1276 /*
1277 * reset interrupt
1278 */
1279 dsr = dmac_read_1(scp, SCA_DSR1);
1280 dmac_write_1(scp, SCA_DSR1,
1281 dsr | SCA_DSR_DEWD);
1282
1283 /*
1284 * filter out the bits we don't care about
1285 */
1286 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1287 if (dsr == 0)
1288 break;
1289
1290 /*
1291 * check for counter overflow
1292 */
1293 if (dsr & SCA_DSR_COF) {
1294 printf("%s: TXDMA counter overflow\n",
1295 scp->sp_if.if_xname);
1296
1297 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1298 scp->sp_txcur = 0;
1299 scp->sp_txinuse = 0;
1300 }
1301
1302 /*
1303 * check for buffer overflow
1304 */
1305 if (dsr & SCA_DSR_BOF) {
1306 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1307 scp->sp_if.if_xname,
1308 dmac_read_2(scp, SCA_CDAL1),
1309 dmac_read_2(scp, SCA_EDAL1),
1310 dmac_read_1(scp, SCA_CPB1));
1311
1312 /*
1313 * Yikes. Arrange for a full
1314 * transmitter restart.
1315 */
1316 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1317 scp->sp_txcur = 0;
1318 scp->sp_txinuse = 0;
1319 }
1320
1321 /*
1322 * check for end of transfer, which is not
1323 * an error. It means that all data queued
1324 * was transmitted, and we mark ourself as
1325 * not in use and stop the watchdog timer.
1326 */
1327 if (dsr & SCA_DSR_EOT) {
1328 SCA_DPRINTF(SCA_DEBUG_TX,
1329 ("Transmit completed. cda %x eda %x dsr %x\n",
1330 dmac_read_2(scp, SCA_CDAL1),
1331 dmac_read_2(scp, SCA_EDAL1),
1332 dsr));
1333
1334 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1335 scp->sp_txcur = 0;
1336 scp->sp_txinuse = 0;
1337
1338 /*
1339 * check for more packets
1340 */
1341 sca_start(&scp->sp_if);
1342 }
1343 }
1344 }
1345 /*
1346 * receive channel check
1347 */
1348 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1349 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1350 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1351
1352 dsr = 1;
1353 while (dsr != 0) {
1354 ret++;
1355
1356 dsr = dmac_read_1(scp, SCA_DSR0);
1357 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1358
1359 /*
1360 * filter out the bits we don't care about
1361 */
1362 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1363 | SCA_DSR_BOF | SCA_DSR_EOT);
1364 if (dsr == 0)
1365 break;
1366
1367 /*
1368 * End of frame
1369 */
1370 if (dsr & SCA_DSR_EOM) {
1371 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1372
1373 sca_get_packets(scp);
1374 }
1375
1376 /*
1377 * check for counter overflow
1378 */
1379 if (dsr & SCA_DSR_COF) {
1380 printf("%s: RXDMA counter overflow\n",
1381 scp->sp_if.if_xname);
1382
1383 sca_dmac_rxinit(scp);
1384 }
1385
1386 /*
1387 * check for end of transfer, which means we
1388 * ran out of descriptors to receive into.
1389 * This means the line is much faster than
1390 * we can handle.
1391 */
1392 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1393 printf("%s: RXDMA buffer overflow\n",
1394 scp->sp_if.if_xname);
1395
1396 sca_dmac_rxinit(scp);
1397 }
1398 }
1399 }
1400
1401 return ret;
1402 }
1403
1404 static int
1405 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1406 {
1407 u_int8_t st1, trc0;
1408
1409 /* get and clear the specific interrupt -- should act on it :)*/
1410 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1411 /* clear the interrupt */
1412 msci_write_1(scp, SCA_ST10, st1);
1413
1414 if (st1 & SCA_ST1_UDRN) {
1415 /* underrun -- try to increase ready control */
1416 trc0 = msci_read_1(scp, SCA_TRC00);
1417 if (trc0 == 0x1f)
1418 printf("TX: underrun - fifo depth maxed\n");
1419 else {
1420 if ((trc0 += 2) > 0x1f)
1421 trc0 = 0x1f;
1422 SCA_DPRINTF(SCA_DEBUG_TX,
1423 ("TX: udrn - incr fifo to %d\n", trc0));
1424 msci_write_1(scp, SCA_TRC00, trc0);
1425 }
1426 }
1427 }
1428 return (0);
1429 }
1430
1431 static void
1432 sca_get_packets(sca_port_t *scp)
1433 {
1434 struct sca_softc *sc;
1435
1436 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1437
1438 sc = scp->sca;
1439 if (sc->sc_usedma)
1440 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1441 0, sc->scu_allocsize,
1442 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1443 else {
1444 /*
1445 * XXX this code is unable to deal with rx stuff
1446 * in more than 1 page
1447 */
1448 sc->scu_page_on(sc);
1449 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1450 }
1451
1452 /* process as many frames as are available */
1453 while (sca_frame_avail(scp)) {
1454 sca_frame_process(scp);
1455 sca_frame_read_done(scp);
1456 }
1457
1458 if (sc->sc_usedma)
1459 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1460 0, sc->scu_allocsize,
1461 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1462 else
1463 sc->scu_page_off(sc);
1464 }
1465
1466 /*
1467 * Starting with the first descriptor we wanted to read into, up to but
1468 * not including the current SCA read descriptor, look for a packet.
1469 *
1470 * must be called at splnet()
1471 */
1472 static int
1473 sca_frame_avail(sca_port_t *scp)
1474 {
1475 u_int16_t cda;
1476 u_int32_t desc_p; /* physical address (lower 16 bits) */
1477 sca_desc_t *desc;
1478 u_int8_t rxstat;
1479 int cdaidx, toolong;
1480
1481 /*
1482 * Read the current descriptor from the SCA.
1483 */
1484 cda = dmac_read_2(scp, SCA_CDAL0);
1485
1486 /*
1487 * calculate the index of the current descriptor
1488 */
1489 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1490 desc_p = cda - desc_p;
1491 cdaidx = desc_p / sizeof(sca_desc_t);
1492
1493 SCA_DPRINTF(SCA_DEBUG_RX,
1494 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1495 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1496
1497 /* note confusion */
1498 if (cdaidx >= scp->sp_nrxdesc)
1499 panic("current descriptor index out of range");
1500
1501 /* see if we have a valid frame available */
1502 toolong = 0;
1503 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1504 /*
1505 * We might have a valid descriptor. Set up a pointer
1506 * to the kva address for it so we can more easily examine
1507 * the contents.
1508 */
1509 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1510 rxstat = sca_desc_read_stat(scp->sca, desc);
1511
1512 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1513 scp->sp_port, scp->sp_rxstart, rxstat));
1514
1515 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1516 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1517
1518 /*
1519 * check for errors
1520 */
1521 if (rxstat & SCA_DESC_ERRORS) {
1522 /*
1523 * consider an error condition the end
1524 * of a frame
1525 */
1526 scp->sp_if.if_ierrors++;
1527 toolong = 0;
1528 continue;
1529 }
1530
1531 /*
1532 * if we aren't skipping overlong frames
1533 * we are done, otherwise reset and look for
1534 * another good frame
1535 */
1536 if (rxstat & SCA_DESC_EOM) {
1537 if (!toolong)
1538 return (1);
1539 toolong = 0;
1540 } else if (!toolong) {
1541 /*
1542 * we currently don't deal with frames
1543 * larger than a single buffer (fixed MTU)
1544 */
1545 scp->sp_if.if_ierrors++;
1546 toolong = 1;
1547 }
1548 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1549 scp->sp_rxstart));
1550 }
1551
1552 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1553 return 0;
1554 }
1555
1556 /*
1557 * Pass the packet up to the kernel if it is a packet we want to pay
1558 * attention to.
1559 *
1560 * MUST BE CALLED AT splnet()
1561 */
1562 static void
1563 sca_frame_process(sca_port_t *scp)
1564 {
1565 struct ifqueue *ifq;
1566 struct hdlc_header *hdlc;
1567 struct cisco_pkt *cisco;
1568 sca_desc_t *desc;
1569 struct mbuf *m;
1570 u_int8_t *bufp;
1571 u_int16_t len;
1572 u_int32_t t;
1573
1574 t = time_uptime * 1000;
1575 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1576 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1577 len = sca_desc_read_buflen(scp->sca, desc);
1578
1579 SCA_DPRINTF(SCA_DEBUG_RX,
1580 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1581 (bus_addr_t)bufp, len));
1582
1583 #if SCA_DEBUG_LEVEL > 0
1584 if (sca_debug & SCA_DEBUG_RXPKT)
1585 sca_frame_print(scp, desc, bufp);
1586 #endif
1587 /*
1588 * skip packets that are too short
1589 */
1590 if (len < sizeof(struct hdlc_header)) {
1591 scp->sp_if.if_ierrors++;
1592 return;
1593 }
1594
1595 m = sca_mbuf_alloc(scp->sca, bufp, len);
1596 if (m == NULL) {
1597 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1598 return;
1599 }
1600
1601 /*
1602 * read and then strip off the HDLC information
1603 */
1604 m = m_pullup(m, sizeof(struct hdlc_header));
1605 if (m == NULL) {
1606 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1607 return;
1608 }
1609
1610 #if NBPFILTER > 0
1611 if (scp->sp_if.if_bpf)
1612 bpf_mtap(scp->sp_if.if_bpf, m);
1613 #endif
1614
1615 scp->sp_if.if_ipackets++;
1616
1617 hdlc = mtod(m, struct hdlc_header *);
1618 switch (ntohs(hdlc->h_proto)) {
1619 #ifdef INET
1620 case HDLC_PROTOCOL_IP:
1621 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1622 m->m_pkthdr.rcvif = &scp->sp_if;
1623 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1624 m->m_data += sizeof(struct hdlc_header);
1625 m->m_len -= sizeof(struct hdlc_header);
1626 ifq = &ipintrq;
1627 schednetisr(NETISR_IP);
1628 break;
1629 #endif /* INET */
1630 #ifdef INET6
1631 case HDLC_PROTOCOL_IPV6:
1632 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1633 m->m_pkthdr.rcvif = &scp->sp_if;
1634 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1635 m->m_data += sizeof(struct hdlc_header);
1636 m->m_len -= sizeof(struct hdlc_header);
1637 ifq = &ip6intrq;
1638 schednetisr(NETISR_IPV6);
1639 break;
1640 #endif /* INET6 */
1641 #ifdef ISO
1642 case HDLC_PROTOCOL_ISO:
1643 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1644 goto dropit;
1645 m->m_pkthdr.rcvif = &scp->sp_if;
1646 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1647 m->m_data += sizeof(struct hdlc_llc_header);
1648 m->m_len -= sizeof(struct hdlc_llc_header);
1649 ifq = &clnlintrq;
1650 schednetisr(NETISR_ISO);
1651 break;
1652 #endif /* ISO */
1653 case CISCO_KEEPALIVE:
1654 SCA_DPRINTF(SCA_DEBUG_CISCO,
1655 ("Received CISCO keepalive packet\n"));
1656
1657 if (len < CISCO_PKT_LEN) {
1658 SCA_DPRINTF(SCA_DEBUG_CISCO,
1659 ("short CISCO packet %d, wanted %d\n",
1660 len, CISCO_PKT_LEN));
1661 scp->sp_if.if_ierrors++;
1662 goto dropit;
1663 }
1664
1665 m = m_pullup(m, sizeof(struct cisco_pkt));
1666 if (m == NULL) {
1667 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1668 return;
1669 }
1670
1671 cisco = (struct cisco_pkt *)
1672 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1673 m->m_pkthdr.rcvif = &scp->sp_if;
1674
1675 switch (ntohl(cisco->type)) {
1676 case CISCO_ADDR_REQ:
1677 printf("Got CISCO addr_req, ignoring\n");
1678 scp->sp_if.if_ierrors++;
1679 goto dropit;
1680
1681 case CISCO_ADDR_REPLY:
1682 printf("Got CISCO addr_reply, ignoring\n");
1683 scp->sp_if.if_ierrors++;
1684 goto dropit;
1685
1686 case CISCO_KEEPALIVE_REQ:
1687
1688 SCA_DPRINTF(SCA_DEBUG_CISCO,
1689 ("Received KA, mseq %d,"
1690 " yseq %d, rel 0x%04x, t0"
1691 " %04x, t1 %04x\n",
1692 ntohl(cisco->par1), ntohl(cisco->par2),
1693 ntohs(cisco->rel), ntohs(cisco->time0),
1694 ntohs(cisco->time1)));
1695
1696 scp->cka_lastrx = ntohl(cisco->par1);
1697 scp->cka_lasttx++;
1698
1699 /*
1700 * schedule the transmit right here.
1701 */
1702 cisco->par2 = cisco->par1;
1703 cisco->par1 = htonl(scp->cka_lasttx);
1704 cisco->time0 = htons((u_int16_t)(t >> 16));
1705 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1706
1707 ifq = &scp->linkq;
1708 if (IF_QFULL(ifq)) {
1709 IF_DROP(ifq);
1710 goto dropit;
1711 }
1712 IF_ENQUEUE(ifq, m);
1713
1714 sca_start(&scp->sp_if);
1715
1716 /* since start may have reset this fix */
1717 if (!scp->sca->sc_usedma) {
1718 scp->sca->scu_set_page(scp->sca,
1719 scp->sp_rxdesc_p);
1720 scp->sca->scu_page_on(scp->sca);
1721 }
1722 return;
1723 default:
1724 SCA_DPRINTF(SCA_DEBUG_CISCO,
1725 ("Unknown CISCO keepalive protocol 0x%04x\n",
1726 ntohl(cisco->type)));
1727
1728 scp->sp_if.if_noproto++;
1729 goto dropit;
1730 }
1731 return;
1732 default:
1733 SCA_DPRINTF(SCA_DEBUG_RX,
1734 ("Unknown/unexpected ethertype 0x%04x\n",
1735 ntohs(hdlc->h_proto)));
1736 scp->sp_if.if_noproto++;
1737 goto dropit;
1738 }
1739
1740 /* queue the packet */
1741 if (!IF_QFULL(ifq)) {
1742 IF_ENQUEUE(ifq, m);
1743 } else {
1744 IF_DROP(ifq);
1745 scp->sp_if.if_iqdrops++;
1746 goto dropit;
1747 }
1748 return;
1749 dropit:
1750 if (m)
1751 m_freem(m);
1752 return;
1753 }
1754
1755 #if SCA_DEBUG_LEVEL > 0
1756 /*
1757 * do a hex dump of the packet received into descriptor "desc" with
1758 * data buffer "p"
1759 */
1760 static void
1761 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1762 {
1763 int i;
1764 int nothing_yet = 1;
1765 struct sca_softc *sc;
1766 u_int len;
1767
1768 sc = scp->sca;
1769 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1770 desc,
1771 sca_desc_read_chainp(sc, desc),
1772 sca_desc_read_bufp(sc, desc),
1773 sca_desc_read_stat(sc, desc),
1774 (len = sca_desc_read_buflen(sc, desc)));
1775
1776 for (i = 0 ; i < len && i < 256; i++) {
1777 if (nothing_yet == 1 &&
1778 (sc->sc_usedma ? *p
1779 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1780 sca_page_addr(sc, p))) == 0) {
1781 p++;
1782 continue;
1783 }
1784 nothing_yet = 0;
1785 if (i % 16 == 0)
1786 printf("\n");
1787 printf("%02x ",
1788 (sc->sc_usedma ? *p
1789 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1790 sca_page_addr(sc, p))));
1791 p++;
1792 }
1793
1794 if (i % 16 != 1)
1795 printf("\n");
1796 }
1797 #endif
1798
1799 /*
1800 * adjust things because we have just read the current starting
1801 * frame
1802 *
1803 * must be called at splnet()
1804 */
1805 static void
1806 sca_frame_read_done(sca_port_t *scp)
1807 {
1808 u_int16_t edesc_p;
1809
1810 /* update where our indicies are */
1811 scp->sp_rxend = scp->sp_rxstart;
1812 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1813
1814 /* update the error [end] descriptor */
1815 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1816 (sizeof(sca_desc_t) * scp->sp_rxend);
1817 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1818 }
1819
1820 /*
1821 * set a port to the "up" state
1822 */
1823 static void
1824 sca_port_up(sca_port_t *scp)
1825 {
1826 struct sca_softc *sc = scp->sca;
1827 struct timeval now;
1828 #if 0
1829 u_int8_t ier0, ier1;
1830 #endif
1831
1832 /*
1833 * reset things
1834 */
1835 #if 0
1836 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1837 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1838 #endif
1839 /*
1840 * clear in-use flag
1841 */
1842 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1843 scp->sp_if.if_flags |= IFF_RUNNING;
1844
1845 /*
1846 * raise DTR
1847 */
1848 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1849
1850 /*
1851 * raise RTS
1852 */
1853 msci_write_1(scp, SCA_CTL0,
1854 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1855 | SCA_CTL_RTS_HIGH);
1856
1857 #if 0
1858 /*
1859 * enable interrupts (no timer IER2)
1860 */
1861 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1862 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1863 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1864 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1865 if (scp->sp_port == 1) {
1866 ier0 <<= 4;
1867 ier1 <<= 4;
1868 }
1869 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1870 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1871 #else
1872 if (scp->sp_port == 0) {
1873 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1874 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1875 } else {
1876 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1877 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1878 }
1879 #endif
1880
1881 /*
1882 * enable transmit and receive
1883 */
1884 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1885 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1886
1887 /*
1888 * reset internal state
1889 */
1890 scp->sp_txinuse = 0;
1891 scp->sp_txcur = 0;
1892 getmicrotime(&now);
1893 scp->cka_lasttx = now.tv_usec;
1894 scp->cka_lastrx = 0;
1895 }
1896
1897 /*
1898 * set a port to the "down" state
1899 */
1900 static void
1901 sca_port_down(sca_port_t *scp)
1902 {
1903 struct sca_softc *sc = scp->sca;
1904 #if 0
1905 u_int8_t ier0, ier1;
1906 #endif
1907
1908 /*
1909 * lower DTR
1910 */
1911 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1912
1913 /*
1914 * lower RTS
1915 */
1916 msci_write_1(scp, SCA_CTL0,
1917 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1918 | SCA_CTL_RTS_LOW);
1919
1920 /*
1921 * disable interrupts
1922 */
1923 #if 0
1924 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1925 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1926 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1927 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1928 if (scp->sp_port == 1) {
1929 ier0 <<= 4;
1930 ier1 <<= 4;
1931 }
1932 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1933 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1934 #else
1935 if (scp->sp_port == 0) {
1936 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1937 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1938 } else {
1939 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1940 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1941 }
1942 #endif
1943
1944 /*
1945 * disable transmit and receive
1946 */
1947 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1948 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1949
1950 /*
1951 * no, we're not in use anymore
1952 */
1953 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1954 }
1955
1956 /*
1957 * disable all DMA and interrupts for all ports at once.
1958 */
1959 void
1960 sca_shutdown(struct sca_softc *sca)
1961 {
1962 /*
1963 * disable DMA and interrupts
1964 */
1965 sca_write_1(sca, SCA_DMER, 0);
1966 sca_write_1(sca, SCA_IER0, 0);
1967 sca_write_1(sca, SCA_IER1, 0);
1968 }
1969
1970 /*
1971 * If there are packets to transmit, start the transmit DMA logic.
1972 */
1973 static void
1974 sca_port_starttx(sca_port_t *scp)
1975 {
1976 u_int32_t startdesc_p, enddesc_p;
1977 int enddesc;
1978
1979 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1980
1981 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1982 || scp->sp_txinuse == 0)
1983 return;
1984
1985 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1986
1987 scp->sp_if.if_flags |= IFF_OACTIVE;
1988
1989 /*
1990 * We have something to do, since we have at least one packet
1991 * waiting, and we are not already marked as active.
1992 */
1993 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1994 startdesc_p = scp->sp_txdesc_p;
1995 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1996
1997 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1998 startdesc_p, enddesc_p));
1999
2000 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
2001 dmac_write_2(scp, SCA_CDAL1,
2002 (u_int16_t)(startdesc_p & 0x0000ffff));
2003
2004 /*
2005 * enable the DMA
2006 */
2007 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
2008 }
2009
2010 /*
2011 * allocate an mbuf at least long enough to hold "len" bytes.
2012 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2013 * otherwise let the caller handle copying the data in.
2014 */
2015 static struct mbuf *
2016 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
2017 {
2018 struct mbuf *m;
2019
2020 /*
2021 * allocate an mbuf and copy the important bits of data
2022 * into it. If the packet won't fit in the header,
2023 * allocate a cluster for it and store it there.
2024 */
2025 MGETHDR(m, M_DONTWAIT, MT_DATA);
2026 if (m == NULL)
2027 return NULL;
2028 if (len > MHLEN) {
2029 if (len > MCLBYTES) {
2030 m_freem(m);
2031 return NULL;
2032 }
2033 MCLGET(m, M_DONTWAIT);
2034 if ((m->m_flags & M_EXT) == 0) {
2035 m_freem(m);
2036 return NULL;
2037 }
2038 }
2039 if (p != NULL) {
2040 /* XXX do we need to sync here? */
2041 if (sc->sc_usedma)
2042 memcpy(mtod(m, void *), p, len);
2043 else
2044 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2045 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2046 }
2047 m->m_len = len;
2048 m->m_pkthdr.len = len;
2049
2050 return (m);
2051 }
2052
2053 /*
2054 * get the base clock
2055 */
2056 void
2057 sca_get_base_clock(struct sca_softc *sc)
2058 {
2059 struct timeval btv, ctv, dtv;
2060 u_int64_t bcnt;
2061 u_int32_t cnt;
2062 u_int16_t subcnt;
2063
2064 /* disable the timer, set prescale to 0 */
2065 sca_write_1(sc, SCA_TCSR0, 0);
2066 sca_write_1(sc, SCA_TEPR0, 0);
2067
2068 /* reset the counter */
2069 (void)sca_read_1(sc, SCA_TCSR0);
2070 subcnt = sca_read_2(sc, SCA_TCNTL0);
2071
2072 /* count to max */
2073 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2074
2075 cnt = 0;
2076 microtime(&btv);
2077 /* start the timer -- no interrupt enable */
2078 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2079 for (;;) {
2080 microtime(&ctv);
2081
2082 /* end around 3/4 of a second */
2083 timersub(&ctv, &btv, &dtv);
2084 if (dtv.tv_usec >= 750000)
2085 break;
2086
2087 /* spin */
2088 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2089 ;
2090 /* reset the timer */
2091 (void)sca_read_2(sc, SCA_TCNTL0);
2092 cnt++;
2093 }
2094
2095 /* stop the timer */
2096 sca_write_1(sc, SCA_TCSR0, 0);
2097
2098 subcnt = sca_read_2(sc, SCA_TCNTL0);
2099 /* add the slop in and get the total timer ticks */
2100 cnt = (cnt << 16) | subcnt;
2101
2102 /* cnt is 1/8 the actual time */
2103 bcnt = cnt * 8;
2104 /* make it proportional to 3/4 of a second */
2105 bcnt *= (u_int64_t)750000;
2106 bcnt /= (u_int64_t)dtv.tv_usec;
2107 cnt = bcnt;
2108
2109 /* make it Hz */
2110 cnt *= 4;
2111 cnt /= 3;
2112
2113 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2114 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2115
2116 /*
2117 * round to the nearest 200 -- this allows for +-3 ticks error
2118 */
2119 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2120 }
2121
2122 /*
2123 * print the information about the clock on the ports
2124 */
2125 void
2126 sca_print_clock_info(struct sca_softc *sc)
2127 {
2128 struct sca_port *scp;
2129 u_int32_t mhz, div;
2130 int i;
2131
2132 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2133 sc->sc_baseclock);
2134
2135 /* print the information about the port clock selection */
2136 for (i = 0; i < sc->sc_numports; i++) {
2137 scp = &sc->sc_ports[i];
2138 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2139 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2140
2141 printf("%s: rx clock: ", scp->sp_if.if_xname);
2142 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2143 case SCA_RXS_CLK_LINE:
2144 printf("line");
2145 break;
2146 case SCA_RXS_CLK_LINE_SN:
2147 printf("line with noise suppression");
2148 break;
2149 case SCA_RXS_CLK_INTERNAL:
2150 printf("internal %d Hz", (mhz >> div));
2151 break;
2152 case SCA_RXS_CLK_ADPLL_OUT:
2153 printf("adpll using internal %d Hz", (mhz >> div));
2154 break;
2155 case SCA_RXS_CLK_ADPLL_IN:
2156 printf("adpll using line clock");
2157 break;
2158 }
2159 printf(" tx clock: ");
2160 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2161 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2162 case SCA_TXS_CLK_LINE:
2163 printf("line\n");
2164 break;
2165 case SCA_TXS_CLK_INTERNAL:
2166 printf("internal %d Hz\n", (mhz >> div));
2167 break;
2168 case SCA_TXS_CLK_RXCLK:
2169 printf("rxclock\n");
2170 break;
2171 }
2172 if (scp->sp_eclock)
2173 printf("%s: outputting line clock\n",
2174 scp->sp_if.if_xname);
2175 }
2176 }
2177
2178