hd64570.c revision 1.8 1 /* $NetBSD: hd64570.c,v 1.8 2000/01/04 06:36:29 chopps Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using dma. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include "bpfilter.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/device.h>
72 #include <sys/mbuf.h>
73 #include <sys/socket.h>
74 #include <sys/sockio.h>
75 #include <sys/kernel.h>
76
77 #include <net/if.h>
78 #include <net/if_types.h>
79 #include <net/netisr.h>
80
81 #include <netinet/in.h>
82 #include <netinet/in_systm.h>
83 #include <netinet/in_var.h>
84 #include <netinet/ip.h>
85
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89
90 #include <machine/cpu.h>
91 #include <machine/bus.h>
92 #include <machine/intr.h>
93
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/ic/hd64570reg.h>
99 #include <dev/ic/hd64570var.h>
100
101 #define SCA_DEBUG_RX 0x0001
102 #define SCA_DEBUG_TX 0x0002
103 #define SCA_DEBUG_CISCO 0x0004
104 #define SCA_DEBUG_DMA 0x0008
105 #define SCA_DEBUG_RXPKT 0x0010
106 #define SCA_DEBUG_TXPKT 0x0020
107 #define SCA_DEBUG_INTR 0x0040
108 #define SCA_DEBUG_CLOCK 0x0080
109
110 #if 0
111 #define SCA_DEBUG_LEVEL ( 0xFFFF )
112 #else
113 #define SCA_DEBUG_LEVEL 0
114 #endif
115
116 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
117
118 #if SCA_DEBUG_LEVEL > 0
119 #define SCA_DPRINTF(l, x) do { \
120 if ((l) & sca_debug) \
121 printf x;\
122 } while (0)
123 #else
124 #define SCA_DPRINTF(l, x)
125 #endif
126
127 #if 0
128 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
129 #endif
130
131 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
132 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
133
134 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
135 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
136 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
137 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
138
139 static void sca_msci_init(struct sca_softc *, sca_port_t *);
140 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
141 static void sca_dmac_rxinit(sca_port_t *);
142
143 static int sca_dmac_intr(sca_port_t *, u_int8_t);
144 static int sca_msci_intr(sca_port_t *, u_int8_t);
145
146 static void sca_get_packets(sca_port_t *);
147 static int sca_frame_avail(sca_port_t *);
148 static void sca_frame_process(sca_port_t *);
149 static void sca_frame_read_done(sca_port_t *);
150
151 static void sca_port_starttx(sca_port_t *);
152
153 static void sca_port_up(sca_port_t *);
154 static void sca_port_down(sca_port_t *);
155
156 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
157 struct rtentry *));
158 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
159 static void sca_start __P((struct ifnet *));
160 static void sca_watchdog __P((struct ifnet *));
161
162 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
163
164 #if SCA_DEBUG_LEVEL > 0
165 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
166 #endif
167
168
169 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
170 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
171 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
172 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
173
174 #define sca_page_addr(sc, addr) ((bus_addr_t)(addr) & (sc)->scu_pagemask)
175
176 static inline void
177 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
178 {
179 sca_write_1(scp->sca, scp->msci_off + reg, val);
180 }
181
182 static inline u_int8_t
183 msci_read_1(sca_port_t *scp, u_int reg)
184 {
185 return sca_read_1(scp->sca, scp->msci_off + reg);
186 }
187
188 static inline void
189 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
190 {
191 sca_write_1(scp->sca, scp->dmac_off + reg, val);
192 }
193
194 static inline void
195 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
196 {
197 sca_write_2(scp->sca, scp->dmac_off + reg, val);
198 }
199
200 static inline u_int8_t
201 dmac_read_1(sca_port_t *scp, u_int reg)
202 {
203 return sca_read_1(scp->sca, scp->dmac_off + reg);
204 }
205
206 static inline u_int16_t
207 dmac_read_2(sca_port_t *scp, u_int reg)
208 {
209 return sca_read_2(scp->sca, scp->dmac_off + reg);
210 }
211
212 /*
213 * read the chain pointer
214 */
215 static inline u_int16_t
216 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
217 {
218 if (sc->sc_usedma)
219 return ((dp)->sd_chainp);
220 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
221 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
222 }
223
224 /*
225 * write the chain pointer
226 */
227 static inline void
228 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
229 {
230 if (sc->sc_usedma)
231 (dp)->sd_chainp = cp;
232 else
233 bus_space_write_2(sc->scu_memt, sc->scu_memh,
234 sca_page_addr(sc, dp)
235 + offsetof(struct sca_desc, sd_chainp), cp);
236 }
237
238 /*
239 * read the buffer pointer
240 */
241 static inline u_int32_t
242 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
243 {
244 u_int32_t address;
245
246 if (sc->sc_usedma)
247 address = dp->sd_bufp | dp->sd_hbufp << 16;
248 else {
249 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
250 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
251 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
252 sca_page_addr(sc, dp)
253 + offsetof(struct sca_desc, sd_hbufp)) << 16;
254 }
255 return (address);
256 }
257
258 /*
259 * write the buffer pointer
260 */
261 static inline void
262 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
263 {
264 if (sc->sc_usedma) {
265 dp->sd_bufp = bufp & 0xFFFF;
266 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
267 } else {
268 bus_space_write_2(sc->scu_memt, sc->scu_memh,
269 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
270 bufp & 0xFFFF);
271 bus_space_write_1(sc->scu_memt, sc->scu_memh,
272 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
273 (bufp & 0x00FF0000) >> 16);
274 }
275 }
276
277 /*
278 * read the buffer length
279 */
280 static inline u_int16_t
281 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
282 {
283 if (sc->sc_usedma)
284 return ((dp)->sd_buflen);
285 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
286 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
287 }
288
289 /*
290 * write the buffer length
291 */
292 static inline void
293 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
294 {
295 if (sc->sc_usedma)
296 (dp)->sd_buflen = len;
297 else
298 bus_space_write_2(sc->scu_memt, sc->scu_memh,
299 sca_page_addr(sc, dp)
300 + offsetof(struct sca_desc, sd_buflen), len);
301 }
302
303 /*
304 * read the descriptor status
305 */
306 static inline u_int8_t
307 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
308 {
309 if (sc->sc_usedma)
310 return ((dp)->sd_stat);
311 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
312 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
313 }
314
315 /*
316 * write the descriptor status
317 */
318 static inline void
319 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
320 {
321 if (sc->sc_usedma)
322 (dp)->sd_stat = stat;
323 else
324 bus_space_write_1(sc->scu_memt, sc->scu_memh,
325 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
326 stat);
327 }
328
329 void
330 sca_init(struct sca_softc *sc)
331 {
332 /*
333 * Do a little sanity check: check number of ports.
334 */
335 if (sc->sc_numports < 1 || sc->sc_numports > 2)
336 panic("sca can\'t handle more than 2 or less than 1 ports");
337
338 /*
339 * disable DMA and MSCI interrupts
340 */
341 sca_write_1(sc, SCA_DMER, 0);
342 sca_write_1(sc, SCA_IER0, 0);
343 sca_write_1(sc, SCA_IER1, 0);
344 sca_write_1(sc, SCA_IER2, 0);
345
346 /*
347 * configure interrupt system
348 */
349 sca_write_1(sc, SCA_ITCR,
350 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
351 #if 0
352 /* these are for the intrerrupt ack cycle which we don't use */
353 sca_write_1(sc, SCA_IVR, 0x40);
354 sca_write_1(sc, SCA_IMVR, 0x40);
355 #endif
356
357 /*
358 * set wait control register to zero wait states
359 */
360 sca_write_1(sc, SCA_PABR0, 0);
361 sca_write_1(sc, SCA_PABR1, 0);
362 sca_write_1(sc, SCA_WCRL, 0);
363 sca_write_1(sc, SCA_WCRM, 0);
364 sca_write_1(sc, SCA_WCRH, 0);
365
366 /*
367 * disable DMA and reset status
368 */
369 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
370
371 /*
372 * disable transmit DMA for all channels
373 */
374 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
375 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
376 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
377 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
378 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
379 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
380 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
381 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
382
383 /*
384 * enable DMA based on channel enable flags for each channel
385 */
386 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
387
388 /*
389 * Should check to see if the chip is responding, but for now
390 * assume it is.
391 */
392 }
393
394 /*
395 * initialize the port and attach it to the networking layer
396 */
397 void
398 sca_port_attach(struct sca_softc *sc, u_int port)
399 {
400 sca_port_t *scp = &sc->sc_ports[port];
401 struct ifnet *ifp;
402 static u_int ntwo_unit = 0;
403
404 scp->sca = sc; /* point back to the parent */
405
406 scp->sp_port = port;
407
408 if (port == 0) {
409 scp->msci_off = SCA_MSCI_OFF_0;
410 scp->dmac_off = SCA_DMAC_OFF_0;
411 if(sc->sc_parent != NULL)
412 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
413 else
414 ntwo_unit = 0; /* XXX */
415 } else {
416 scp->msci_off = SCA_MSCI_OFF_1;
417 scp->dmac_off = SCA_DMAC_OFF_1;
418 if(sc->sc_parent != NULL)
419 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
420 else
421 ntwo_unit = 1; /* XXX */
422 }
423
424 sca_msci_init(sc, scp);
425 sca_dmac_init(sc, scp);
426
427 /*
428 * attach to the network layer
429 */
430 ifp = &scp->sp_if;
431 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
432 ifp->if_softc = scp;
433 ifp->if_mtu = SCA_MTU;
434 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
435 ifp->if_type = IFT_OTHER; /* Should be HDLC, but... */
436 ifp->if_hdrlen = HDLC_HDRLEN;
437 ifp->if_ioctl = sca_ioctl;
438 ifp->if_output = sca_output;
439 ifp->if_watchdog = sca_watchdog;
440 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
441 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
442 #ifdef SCA_USE_FASTQ
443 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
444 #endif
445 if_attach(ifp);
446
447 #if NBPFILTER > 0
448 bpfattach(&scp->sp_bpf, ifp, DLT_HDLC, HDLC_HDRLEN);
449 #endif
450
451 if (sc->sc_parent == NULL)
452 printf("%s: port %d\n", ifp->if_xname, port);
453 else
454 printf("%s at %s port %d\n",
455 ifp->if_xname, sc->sc_parent->dv_xname, port);
456
457 /*
458 * reset the last seen times on the cisco keepalive protocol
459 */
460 scp->cka_lasttx = time.tv_usec;
461 scp->cka_lastrx = 0;
462 }
463
464 #if 0
465 /*
466 * returns log2(div), sets 'tmc' for the required freq 'hz'
467 */
468 static u_int8_t
469 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
470 {
471 u_int32_t tmc, div;
472 u_int32_t clock;
473
474 /* clock hz = (chipclock / tmc) / 2^(div); */
475 /*
476 * TD == tmc * 2^(n)
477 *
478 * note:
479 * 1 <= TD <= 256 TD is inc of 1
480 * 2 <= TD <= 512 TD is inc of 2
481 * 4 <= TD <= 1024 TD is inc of 4
482 * ...
483 * 512 <= TD <= 256*512 TD is inc of 512
484 *
485 * so note there are overlaps. We lose prec
486 * as div increases so we wish to minize div.
487 *
488 * basically we want to do
489 *
490 * tmc = chip / hz, but have tmc <= 256
491 */
492
493 /* assume system clock is 9.8304Mhz or 9830400hz */
494 clock = clock = 9830400 >> 1;
495
496 /* round down */
497 div = 0;
498 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
499 clock >>= 1;
500 div++;
501 }
502 if (clock / tmc > hz)
503 tmc++;
504 if (!tmc)
505 tmc = 1;
506
507 if (div > SCA_RXS_DIV_512) {
508 /* set to maximums */
509 div = SCA_RXS_DIV_512;
510 tmc = 0;
511 }
512
513 *tmcp = (tmc & 0xFF); /* 0 == 256 */
514 return (div & 0xFF);
515 }
516 #endif
517
518 /*
519 * initialize the port's MSCI
520 */
521 static void
522 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
523 {
524 /* reset the channel */
525 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
526
527 msci_write_1(scp, SCA_MD00,
528 ( SCA_MD0_CRC_1
529 | SCA_MD0_CRC_CCITT
530 | SCA_MD0_CRC_ENABLE
531 | SCA_MD0_MODE_HDLC));
532 #if 0
533 /* immediately send receive reset so the above takes */
534 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
535 #endif
536
537 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
538 msci_write_1(scp, SCA_MD20,
539 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
540
541 /* be safe and do it again */
542 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
543
544 /* setup underrun and idle control, and initial RTS state */
545 msci_write_1(scp, SCA_CTL0,
546 (SCA_CTL_IDLC_PATTERN
547 | SCA_CTL_UDRNC_AFTER_FCS
548 | SCA_CTL_RTS_LOW));
549
550 /* reset the transmitter */
551 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
552
553 /*
554 * set the clock sources
555 */
556 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
557 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
558 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
559
560 /* set external clock generate as requested */
561 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
562
563 /*
564 * XXX don't pay attention to CTS or CD changes right now. I can't
565 * simulate one, and the transmitter will try to transmit even if
566 * CD isn't there anyway, so nothing bad SHOULD happen.
567 */
568 #if 0
569 msci_write_1(scp, SCA_IE00, 0);
570 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
571 #else
572 /* this would deliver transmitter underrun to ST1/ISR1 */
573 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
574 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
575 #endif
576 msci_write_1(scp, SCA_IE20, 0);
577
578 msci_write_1(scp, SCA_FIE0, 0);
579
580 msci_write_1(scp, SCA_SA00, 0);
581 msci_write_1(scp, SCA_SA10, 0);
582
583 msci_write_1(scp, SCA_IDL0, 0x7e);
584
585 msci_write_1(scp, SCA_RRC0, 0x0e);
586 /* msci_write_1(scp, SCA_TRC00, 0x10); */
587 /*
588 * the correct values here are important for avoiding underruns
589 * for any value less than or equal to TRC0 txrdy is activated
590 * which will start the dmac transfer to the fifo.
591 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma.
592 *
593 * thus if we are using a very fast clock that empties the fifo
594 * quickly, delays in the dmac starting to fill the fifo can
595 * lead to underruns so we want a fairly full fifo to still
596 * cause the dmac to start. for cards with on board ram this
597 * has no effect on system performance. For cards that dma
598 * to/from system memory it will cause more, shorter,
599 * bus accesses rather than fewer longer ones.
600 */
601 msci_write_1(scp, SCA_TRC00, 0x00);
602 msci_write_1(scp, SCA_TRC10, 0x1f);
603 }
604
605 /*
606 * Take the memory for the port and construct two circular linked lists of
607 * descriptors (one tx, one rx) and set the pointers in these descriptors
608 * to point to the buffer space for this port.
609 */
610 static void
611 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
612 {
613 sca_desc_t *desc;
614 u_int32_t desc_p;
615 u_int32_t buf_p;
616 int i;
617
618 if (sc->sc_usedma)
619 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
620 BUS_DMASYNC_PREWRITE);
621 else {
622 /*
623 * XXX assumes that all tx desc and bufs in same page
624 */
625 sc->scu_page_on(sc);
626 sc->scu_set_page(sc, scp->sp_txdesc_p);
627 }
628
629 desc = scp->sp_txdesc;
630 desc_p = scp->sp_txdesc_p;
631 buf_p = scp->sp_txbuf_p;
632 scp->sp_txcur = 0;
633 scp->sp_txinuse = 0;
634
635 #ifdef DEBUG
636 /* make sure that we won't wrap */
637 if ((desc_p & 0xffff0000) !=
638 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
639 panic("sca: tx descriptors cross architecural boundry");
640 if ((buf_p & 0xff000000) !=
641 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
642 panic("sca: tx buffers cross architecural boundry");
643 #endif
644
645 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
646 /*
647 * desc_p points to the physcial address of the NEXT desc
648 */
649 desc_p += sizeof(sca_desc_t);
650
651 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
652 sca_desc_write_bufp(sc, desc, buf_p);
653 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
654 sca_desc_write_stat(sc, desc, 0);
655
656 desc++; /* point to the next descriptor */
657 buf_p += SCA_BSIZE;
658 }
659
660 /*
661 * "heal" the circular list by making the last entry point to the
662 * first.
663 */
664 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
665
666 /*
667 * Now, initialize the transmit DMA logic
668 *
669 * CPB == chain pointer base address
670 */
671 dmac_write_1(scp, SCA_DSR1, 0);
672 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
673 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
674 /* XXX1
675 dmac_write_1(scp, SCA_DIR1,
676 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
677 */
678 dmac_write_1(scp, SCA_DIR1,
679 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
680 dmac_write_1(scp, SCA_CPB1,
681 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
682
683 /*
684 * now, do the same thing for receive descriptors
685 *
686 * XXX assumes that all rx desc and bufs in same page
687 */
688 if (!sc->sc_usedma)
689 sc->scu_set_page(sc, scp->sp_rxdesc_p);
690
691 desc = scp->sp_rxdesc;
692 desc_p = scp->sp_rxdesc_p;
693 buf_p = scp->sp_rxbuf_p;
694
695 #ifdef DEBUG
696 /* make sure that we won't wrap */
697 if ((desc_p & 0xffff0000) !=
698 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
699 panic("sca: rx descriptors cross architecural boundry");
700 if ((buf_p & 0xff000000) !=
701 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
702 panic("sca: rx buffers cross architecural boundry");
703 #endif
704
705 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
706 /*
707 * desc_p points to the physcial address of the NEXT desc
708 */
709 desc_p += sizeof(sca_desc_t);
710
711 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
712 sca_desc_write_bufp(sc, desc, buf_p);
713 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
714 sca_desc_write_buflen(sc, desc, 0);
715 sca_desc_write_stat(sc, desc, 0);
716
717 desc++; /* point to the next descriptor */
718 buf_p += SCA_BSIZE;
719 }
720
721 /*
722 * "heal" the circular list by making the last entry point to the
723 * first.
724 */
725 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
726
727 sca_dmac_rxinit(scp);
728
729 if (sc->sc_usedma)
730 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
731 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
732 else
733 sc->scu_page_off(sc);
734 }
735
736 /*
737 * reset and reinitialize the receive DMA logic
738 */
739 static void
740 sca_dmac_rxinit(sca_port_t *scp)
741 {
742 /*
743 * ... and the receive DMA logic ...
744 */
745 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
746 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
747
748 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
749 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
750
751 /* reset descriptors to initial state */
752 scp->sp_rxstart = 0;
753 scp->sp_rxend = scp->sp_nrxdesc - 1;
754
755 /*
756 * CPB == chain pointer base
757 * CDA == current descriptor address
758 * EDA == error descriptor address (overwrite position)
759 * because cda can't be eda when starting we always
760 * have a single buffer gap between cda and eda
761 */
762 dmac_write_1(scp, SCA_CPB0,
763 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
764 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
765 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
766 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
767
768 /*
769 * enable receiver DMA
770 */
771 dmac_write_1(scp, SCA_DIR0,
772 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
773 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
774 }
775
776 /*
777 * Queue the packet for our start routine to transmit
778 */
779 static int
780 sca_output(ifp, m, dst, rt0)
781 struct ifnet *ifp;
782 struct mbuf *m;
783 struct sockaddr *dst;
784 struct rtentry *rt0;
785 {
786 int error;
787 int s;
788 u_int16_t protocol;
789 hdlc_header_t *hdlc;
790 struct ifqueue *ifq;
791 #ifdef SCA_USE_FASTQ
792 struct ip *ip;
793 sca_port_t *scp = ifp->if_softc;
794 int highpri;
795 #endif
796
797 error = 0;
798 ifp->if_lastchange = time;
799
800 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
801 error = ENETDOWN;
802 goto bad;
803 }
804
805 #ifdef SCA_USE_FASTQ
806 highpri = 0;
807 #endif
808
809 /*
810 * determine address family, and priority for this packet
811 */
812 switch (dst->sa_family) {
813 case AF_INET:
814 protocol = HDLC_PROTOCOL_IP;
815
816 #ifdef SCA_USE_FASTQ
817 ip = mtod(m, struct ip *);
818 if ((ip->ip_tos & IPTOS_LOWDELAY) == IPTOS_LOWDELAY)
819 highpri = 1;
820 #endif
821 break;
822
823 default:
824 printf("%s: address family %d unsupported\n",
825 ifp->if_xname, dst->sa_family);
826 error = EAFNOSUPPORT;
827 goto bad;
828 }
829
830 if (M_LEADINGSPACE(m) < HDLC_HDRLEN) {
831 m = m_prepend(m, HDLC_HDRLEN, M_DONTWAIT);
832 if (m == NULL) {
833 error = ENOBUFS;
834 goto bad;
835 }
836 m->m_len = 0;
837 } else {
838 m->m_data -= HDLC_HDRLEN;
839 }
840
841 hdlc = mtod(m, hdlc_header_t *);
842 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
843 hdlc->addr = CISCO_MULTICAST;
844 else
845 hdlc->addr = CISCO_UNICAST;
846 hdlc->control = 0;
847 hdlc->protocol = htons(protocol);
848 m->m_len += HDLC_HDRLEN;
849
850 /*
851 * queue the packet. If interactive, use the fast queue.
852 */
853 s = splnet();
854 #ifdef SCA_USE_FASTQ
855 ifq = (highpri == 1 ? &scp->fastq : &ifp->if_snd);
856 #else
857 ifq = &ifp->if_snd;
858 #endif
859 if (IF_QFULL(ifq)) {
860 IF_DROP(ifq);
861 ifp->if_oerrors++;
862 ifp->if_collisions++;
863 error = ENOBUFS;
864 splx(s);
865 goto bad;
866 }
867 ifp->if_obytes += m->m_pkthdr.len;
868 IF_ENQUEUE(ifq, m);
869
870 ifp->if_lastchange = time;
871
872 if (m->m_flags & M_MCAST)
873 ifp->if_omcasts++;
874
875 sca_start(ifp);
876 splx(s);
877
878 return (error);
879
880 bad:
881 if (m)
882 m_freem(m);
883 return (error);
884 }
885
886 static int
887 sca_ioctl(ifp, cmd, addr)
888 struct ifnet *ifp;
889 u_long cmd;
890 caddr_t addr;
891 {
892 struct ifreq *ifr;
893 struct ifaddr *ifa;
894 int error;
895 int s;
896
897 s = splnet();
898
899 ifr = (struct ifreq *)addr;
900 ifa = (struct ifaddr *)addr;
901 error = 0;
902
903 switch (cmd) {
904 case SIOCSIFADDR:
905 if (ifa->ifa_addr->sa_family == AF_INET)
906 sca_port_up(ifp->if_softc);
907 else
908 error = EAFNOSUPPORT;
909 break;
910
911 case SIOCSIFDSTADDR:
912 if (ifa->ifa_addr->sa_family != AF_INET)
913 error = EAFNOSUPPORT;
914 break;
915
916 case SIOCADDMULTI:
917 case SIOCDELMULTI:
918 if (ifr == 0) {
919 error = EAFNOSUPPORT; /* XXX */
920 break;
921 }
922 switch (ifr->ifr_addr.sa_family) {
923
924 #ifdef INET
925 case AF_INET:
926 break;
927 #endif
928
929 default:
930 error = EAFNOSUPPORT;
931 break;
932 }
933 break;
934
935 case SIOCSIFFLAGS:
936 if (ifr->ifr_flags & IFF_UP)
937 sca_port_up(ifp->if_softc);
938 else
939 sca_port_down(ifp->if_softc);
940
941 break;
942
943 default:
944 error = EINVAL;
945 }
946
947 splx(s);
948 return error;
949 }
950
951 /*
952 * start packet transmission on the interface
953 *
954 * MUST BE CALLED AT splnet()
955 */
956 static void
957 sca_start(ifp)
958 struct ifnet *ifp;
959 {
960 sca_port_t *scp = ifp->if_softc;
961 struct sca_softc *sc = scp->sca;
962 struct mbuf *m, *mb_head;
963 sca_desc_t *desc;
964 u_int8_t *buf, stat;
965 u_int32_t buf_p;
966 int nexttx;
967 int trigger_xmit;
968 u_int len;
969
970 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
971
972 /*
973 * can't queue when we are full or transmitter is busy
974 */
975 #ifdef oldcode
976 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
977 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
978 return;
979 #else
980 if (scp->sp_txinuse
981 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
982 return;
983 #endif
984 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
985
986 /*
987 * XXX assume that all tx desc and bufs in same page
988 */
989 if (sc->sc_usedma)
990 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
991 0, sc->scu_allocsize,
992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
993 else {
994 sc->scu_page_on(sc);
995 sc->scu_set_page(sc, scp->sp_txdesc_p);
996 }
997
998 trigger_xmit = 0;
999
1000 txloop:
1001 IF_DEQUEUE(&scp->linkq, mb_head);
1002 if (mb_head == NULL)
1003 #ifdef SCA_USE_FASTQ
1004 IF_DEQUEUE(&scp->fastq, mb_head);
1005 if (mb_head == NULL)
1006 #endif
1007 IF_DEQUEUE(&ifp->if_snd, mb_head);
1008 if (mb_head == NULL)
1009 goto start_xmit;
1010
1011 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1012 #ifdef oldcode
1013 if (scp->txinuse != 0) {
1014 /* Kill EOT interrupts on the previous descriptor. */
1015 desc = &scp->sp_txdesc[scp->txcur];
1016 stat = sca_desc_read_stat(sc, desc);
1017 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1018
1019 /* Figure out what the next free descriptor is. */
1020 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1021 } else
1022 nexttx = 0;
1023 #endif /* oldcode */
1024
1025 if (scp->sp_txinuse)
1026 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1027 else
1028 nexttx = 0;
1029
1030 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1031
1032 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1033 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1034
1035 /* XXX hoping we can delay the desc write till after we don't drop. */
1036 desc = &scp->sp_txdesc[nexttx];
1037
1038 /* XXX isn't this set already?? */
1039 sca_desc_write_bufp(sc, desc, buf_p);
1040 len = 0;
1041
1042 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1043
1044 #if 0 /* uncomment this for a core in cc1 */
1045 X
1046 #endif
1047 /*
1048 * Run through the chain, copying data into the descriptor as we
1049 * go. If it won't fit in one transmission block, drop the packet.
1050 * No, this isn't nice, but most of the time it _will_ fit.
1051 */
1052 for (m = mb_head ; m != NULL ; m = m->m_next) {
1053 if (m->m_len != 0) {
1054 len += m->m_len;
1055 if (len > SCA_BSIZE) {
1056 m_freem(mb_head);
1057 goto txloop;
1058 }
1059 SCA_DPRINTF(SCA_DEBUG_TX,
1060 ("TX: about to mbuf len %d\n", m->m_len));
1061
1062 if (sc->sc_usedma)
1063 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
1064 else
1065 bus_space_write_region_1(sc->scu_memt,
1066 sc->scu_memh, sca_page_addr(sc, buf_p),
1067 mtod(m, u_int8_t *), m->m_len);
1068 buf += m->m_len;
1069 buf_p += m->m_len;
1070 }
1071 }
1072
1073 /* set the buffer, the length, and mark end of frame and end of xfer */
1074 sca_desc_write_buflen(sc, desc, len);
1075 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1076
1077 ifp->if_opackets++;
1078
1079 #if NBPFILTER > 0
1080 /*
1081 * Pass packet to bpf if there is a listener.
1082 */
1083 if (scp->sp_bpf)
1084 bpf_mtap(scp->sp_bpf, mb_head);
1085 #endif
1086
1087 m_freem(mb_head);
1088
1089 scp->sp_txcur = nexttx;
1090 scp->sp_txinuse++;
1091 trigger_xmit = 1;
1092
1093 SCA_DPRINTF(SCA_DEBUG_TX,
1094 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1095
1096 /*
1097 * XXX so didn't this used to limit us to 1?! - multi may be untested
1098 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1099 * to find bug
1100 */
1101 #ifdef oldcode
1102 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1103 #endif
1104 if (scp->sp_txinuse < scp->sp_ntxdesc)
1105 goto txloop;
1106
1107 start_xmit:
1108 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1109
1110 if (trigger_xmit != 0) {
1111 /* set EOT on final descriptor */
1112 desc = &scp->sp_txdesc[scp->sp_txcur];
1113 stat = sca_desc_read_stat(sc, desc);
1114 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1115 }
1116
1117 if (sc->sc_usedma)
1118 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1119 sc->scu_allocsize,
1120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1121
1122 if (trigger_xmit != 0)
1123 sca_port_starttx(scp);
1124
1125 if (!sc->sc_usedma)
1126 sc->scu_page_off(sc);
1127 }
1128
1129 static void
1130 sca_watchdog(ifp)
1131 struct ifnet *ifp;
1132 {
1133 }
1134
1135 int
1136 sca_hardintr(struct sca_softc *sc)
1137 {
1138 u_int8_t isr0, isr1, isr2;
1139 int ret;
1140
1141 ret = 0; /* non-zero means we processed at least one interrupt */
1142
1143 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1144
1145 while (1) {
1146 /*
1147 * read SCA interrupts
1148 */
1149 isr0 = sca_read_1(sc, SCA_ISR0);
1150 isr1 = sca_read_1(sc, SCA_ISR1);
1151 isr2 = sca_read_1(sc, SCA_ISR2);
1152
1153 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1154 break;
1155
1156 SCA_DPRINTF(SCA_DEBUG_INTR,
1157 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1158 isr0, isr1, isr2));
1159
1160 /*
1161 * check DMAC interrupt
1162 */
1163 if (isr1 & 0x0f)
1164 ret += sca_dmac_intr(&sc->sc_ports[0],
1165 isr1 & 0x0f);
1166
1167 if (isr1 & 0xf0)
1168 ret += sca_dmac_intr(&sc->sc_ports[1],
1169 (isr1 & 0xf0) >> 4);
1170
1171 /*
1172 * mcsi intterupts
1173 */
1174 if (isr0 & 0x0f)
1175 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1176
1177 if (isr0 & 0xf0)
1178 ret += sca_msci_intr(&sc->sc_ports[1],
1179 (isr0 & 0xf0) >> 4);
1180
1181 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1182 if (isr2)
1183 ret += sca_timer_intr(sc, isr2);
1184 #endif
1185 }
1186
1187 return (ret);
1188 }
1189
1190 static int
1191 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1192 {
1193 u_int8_t dsr;
1194 int ret;
1195
1196 ret = 0;
1197
1198 /*
1199 * Check transmit channel
1200 */
1201 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1202 SCA_DPRINTF(SCA_DEBUG_INTR,
1203 ("TX INTERRUPT port %d\n", scp->sp_port));
1204
1205 dsr = 1;
1206 while (dsr != 0) {
1207 ret++;
1208 /*
1209 * reset interrupt
1210 */
1211 dsr = dmac_read_1(scp, SCA_DSR1);
1212 dmac_write_1(scp, SCA_DSR1,
1213 dsr | SCA_DSR_DEWD);
1214
1215 /*
1216 * filter out the bits we don't care about
1217 */
1218 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1219 if (dsr == 0)
1220 break;
1221
1222 /*
1223 * check for counter overflow
1224 */
1225 if (dsr & SCA_DSR_COF) {
1226 printf("%s: TXDMA counter overflow\n",
1227 scp->sp_if.if_xname);
1228
1229 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1230 scp->sp_txcur = 0;
1231 scp->sp_txinuse = 0;
1232 }
1233
1234 /*
1235 * check for buffer overflow
1236 */
1237 if (dsr & SCA_DSR_BOF) {
1238 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1239 scp->sp_if.if_xname,
1240 dmac_read_2(scp, SCA_CDAL1),
1241 dmac_read_2(scp, SCA_EDAL1),
1242 dmac_read_1(scp, SCA_CPB1));
1243
1244 /*
1245 * Yikes. Arrange for a full
1246 * transmitter restart.
1247 */
1248 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1249 scp->sp_txcur = 0;
1250 scp->sp_txinuse = 0;
1251 }
1252
1253 /*
1254 * check for end of transfer, which is not
1255 * an error. It means that all data queued
1256 * was transmitted, and we mark ourself as
1257 * not in use and stop the watchdog timer.
1258 */
1259 if (dsr & SCA_DSR_EOT) {
1260 SCA_DPRINTF(SCA_DEBUG_TX,
1261 ("Transmit completed. cda %x eda %x dsr %x\n",
1262 dmac_read_2(scp, SCA_CDAL1),
1263 dmac_read_2(scp, SCA_EDAL1),
1264 dsr));
1265
1266 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1267 scp->sp_txcur = 0;
1268 scp->sp_txinuse = 0;
1269
1270 /*
1271 * check for more packets
1272 */
1273 sca_start(&scp->sp_if);
1274 }
1275 }
1276 }
1277 /*
1278 * receive channel check
1279 */
1280 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1281 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1282 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1283
1284 dsr = 1;
1285 while (dsr != 0) {
1286 ret++;
1287
1288 dsr = dmac_read_1(scp, SCA_DSR0);
1289 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1290
1291 /*
1292 * filter out the bits we don't care about
1293 */
1294 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1295 | SCA_DSR_BOF | SCA_DSR_EOT);
1296 if (dsr == 0)
1297 break;
1298
1299 /*
1300 * End of frame
1301 */
1302 if (dsr & SCA_DSR_EOM) {
1303 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1304
1305 sca_get_packets(scp);
1306 }
1307
1308 /*
1309 * check for counter overflow
1310 */
1311 if (dsr & SCA_DSR_COF) {
1312 printf("%s: RXDMA counter overflow\n",
1313 scp->sp_if.if_xname);
1314
1315 sca_dmac_rxinit(scp);
1316 }
1317
1318 /*
1319 * check for end of transfer, which means we
1320 * ran out of descriptors to receive into.
1321 * This means the line is much faster than
1322 * we can handle.
1323 */
1324 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1325 printf("%s: RXDMA buffer overflow\n",
1326 scp->sp_if.if_xname);
1327
1328 sca_dmac_rxinit(scp);
1329 }
1330 }
1331 }
1332
1333 return ret;
1334 }
1335
1336 static int
1337 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1338 {
1339 u_int8_t st1, trc0;
1340
1341 /* get and clear the specific interrupt -- should act on it :)*/
1342 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1343 /* clear the interrupt */
1344 msci_write_1(scp, SCA_ST10, st1);
1345
1346 if (st1 & SCA_ST1_UDRN) {
1347 /* underrun -- try to increase ready control */
1348 trc0 = msci_read_1(scp, SCA_TRC00);
1349 if (trc0 == 0x1f)
1350 printf("TX: underun - fifo depth maxed\n");
1351 else {
1352 if ((trc0 += 2) > 0x1f)
1353 trc0 = 0x1f;
1354 SCA_DPRINTF(SCA_DEBUG_TX,
1355 ("TX: udrn - incr fifo to %d\n", trc0));
1356 msci_write_1(scp, SCA_TRC00, trc0);
1357 }
1358 }
1359 }
1360 return (0);
1361 }
1362
1363 static void
1364 sca_get_packets(sca_port_t *scp)
1365 {
1366 struct sca_softc *sc;
1367
1368 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1369
1370 sc = scp->sca;
1371 if (sc->sc_usedma)
1372 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1373 0, sc->scu_allocsize,
1374 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 else {
1376 /*
1377 * XXX this code is unable to deal with rx stuff
1378 * in more than 1 page
1379 */
1380 sc->scu_page_on(sc);
1381 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1382 }
1383
1384 /* process as many frames as are available */
1385 while (sca_frame_avail(scp)) {
1386 sca_frame_process(scp);
1387 sca_frame_read_done(scp);
1388 }
1389
1390 if (sc->sc_usedma)
1391 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1392 0, sc->scu_allocsize,
1393 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1394 else
1395 sc->scu_page_off(sc);
1396 }
1397
1398 /*
1399 * Starting with the first descriptor we wanted to read into, up to but
1400 * not including the current SCA read descriptor, look for a packet.
1401 *
1402 * must be called at splnet()
1403 */
1404 static int
1405 sca_frame_avail(sca_port_t *scp)
1406 {
1407 struct sca_softc *sc;
1408 u_int16_t cda;
1409 u_int32_t desc_p; /* physical address (lower 16 bits) */
1410 sca_desc_t *desc;
1411 u_int8_t rxstat;
1412 int cdaidx, toolong;
1413
1414 /*
1415 * Read the current descriptor from the SCA.
1416 */
1417 sc = scp->sca;
1418 cda = dmac_read_2(scp, SCA_CDAL0);
1419
1420 /*
1421 * calculate the index of the current descriptor
1422 */
1423 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1424 desc_p = cda - desc_p;
1425 cdaidx = desc_p / sizeof(sca_desc_t);
1426
1427 SCA_DPRINTF(SCA_DEBUG_RX,
1428 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1429 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1430
1431 /* note confusion */
1432 if (cdaidx >= scp->sp_nrxdesc)
1433 panic("current descriptor index out of range");
1434
1435 /* see if we have a valid frame available */
1436 toolong = 0;
1437 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1438 /*
1439 * We might have a valid descriptor. Set up a pointer
1440 * to the kva address for it so we can more easily examine
1441 * the contents.
1442 */
1443 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1444 rxstat = sca_desc_read_stat(scp->sca, desc);
1445
1446 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1447 scp->sp_port, scp->sp_rxstart, rxstat));
1448
1449 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1450 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1451
1452 /*
1453 * check for errors
1454 */
1455 if (rxstat & SCA_DESC_ERRORS) {
1456 /*
1457 * consider an error condition the end
1458 * of a frame
1459 */
1460 scp->sp_if.if_ierrors++;
1461 toolong = 0;
1462 continue;
1463 }
1464
1465 /*
1466 * if we aren't skipping overlong frames
1467 * we are done, otherwise reset and look for
1468 * another good frame
1469 */
1470 if (rxstat & SCA_DESC_EOM) {
1471 if (!toolong)
1472 return (1);
1473 toolong = 0;
1474 } else if (!toolong) {
1475 /*
1476 * we currently don't deal with frames
1477 * larger than a single buffer (fixed MTU)
1478 */
1479 scp->sp_if.if_ierrors++;
1480 toolong = 1;
1481 }
1482 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1483 scp->sp_rxstart));
1484 }
1485
1486 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1487 return 0;
1488 }
1489
1490 /*
1491 * Pass the packet up to the kernel if it is a packet we want to pay
1492 * attention to.
1493 *
1494 * MUST BE CALLED AT splnet()
1495 */
1496 static void
1497 sca_frame_process(sca_port_t *scp)
1498 {
1499 struct ifqueue *ifq;
1500 hdlc_header_t *hdlc;
1501 cisco_pkt_t *cisco;
1502 sca_desc_t *desc;
1503 struct mbuf *m;
1504 u_int8_t *bufp;
1505 u_int16_t len;
1506 u_int32_t t;
1507
1508 t = (time.tv_sec - boottime.tv_sec) * 1000;
1509 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1510 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1511 len = sca_desc_read_buflen(scp->sca, desc);
1512
1513 SCA_DPRINTF(SCA_DEBUG_RX,
1514 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1515 (bus_addr_t)bufp, len));
1516
1517 #if SCA_DEBUG_LEVEL > 0
1518 if (sca_debug & SCA_DEBUG_RXPKT)
1519 sca_frame_print(scp, desc, bufp);
1520 #endif
1521 /*
1522 * skip packets that are too short
1523 */
1524 if (len < sizeof(hdlc_header_t))
1525 return;
1526
1527 m = sca_mbuf_alloc(scp->sca, bufp, len);
1528 if (m == NULL) {
1529 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1530 scp->sp_if.if_iqdrops++;
1531 return;
1532 }
1533
1534 /*
1535 * read and then strip off the HDLC information
1536 */
1537 m = m_pullup(m, sizeof(hdlc_header_t));
1538 if (m == NULL) {
1539 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1540 scp->sp_if.if_ierrors++;
1541 scp->sp_if.if_iqdrops++;
1542 }
1543
1544 #if NBPFILTER > 0
1545 if (scp->sp_bpf)
1546 bpf_mtap(scp->sp_bpf, m);
1547 #endif
1548
1549 scp->sp_if.if_ipackets++;
1550 scp->sp_if.if_lastchange = time;
1551
1552 hdlc = mtod(m, hdlc_header_t *);
1553 switch (ntohs(hdlc->protocol)) {
1554 case HDLC_PROTOCOL_IP:
1555 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1556
1557 m->m_pkthdr.rcvif = &scp->sp_if;
1558
1559 if (IF_QFULL(&ipintrq)) {
1560 IF_DROP(&ipintrq);
1561 scp->sp_if.if_ierrors++;
1562 scp->sp_if.if_iqdrops++;
1563 m_freem(m);
1564 } else {
1565 /*
1566 * strip off the HDLC header and hand off to IP stack
1567 */
1568 m->m_pkthdr.len -= HDLC_HDRLEN;
1569 m->m_data += HDLC_HDRLEN;
1570 m->m_len -= HDLC_HDRLEN;
1571 IF_ENQUEUE(&ipintrq, m);
1572 schednetisr(NETISR_IP);
1573 }
1574
1575 break;
1576
1577 case CISCO_KEEPALIVE:
1578 SCA_DPRINTF(SCA_DEBUG_CISCO,
1579 ("Received CISCO keepalive packet\n"));
1580
1581 if (len < CISCO_PKT_LEN) {
1582 SCA_DPRINTF(SCA_DEBUG_CISCO,
1583 ("short CISCO packet %d, wanted %d\n",
1584 len, CISCO_PKT_LEN));
1585 m_freem(m);
1586 return;
1587 }
1588
1589 m = m_pullup(m, sizeof(cisco_pkt_t));
1590 if (m == NULL) {
1591 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1592 scp->sp_if.if_ierrors++;
1593 scp->sp_if.if_iqdrops++;
1594 break;
1595 }
1596
1597 cisco = (cisco_pkt_t *)(mtod(m, u_int8_t *) + HDLC_HDRLEN);
1598 m->m_pkthdr.rcvif = &scp->sp_if;
1599
1600 switch (ntohl(cisco->type)) {
1601 case CISCO_ADDR_REQ:
1602 printf("Got CISCO addr_req, ignoring\n");
1603 m_freem(m);
1604 break;
1605
1606 case CISCO_ADDR_REPLY:
1607 printf("Got CISCO addr_reply, ignoring\n");
1608 m_freem(m);
1609 break;
1610
1611 case CISCO_KEEPALIVE_REQ:
1612
1613 SCA_DPRINTF(SCA_DEBUG_CISCO,
1614 ("Received KA, mseq %d,"
1615 " yseq %d, rel 0x%04x, t0"
1616 " %04x, t1 %04x\n",
1617 ntohl(cisco->par1), ntohl(cisco->par2),
1618 ntohs(cisco->rel), ntohs(cisco->time0),
1619 ntohs(cisco->time1)));
1620
1621 scp->cka_lastrx = ntohl(cisco->par1);
1622 scp->cka_lasttx++;
1623
1624 /*
1625 * schedule the transmit right here.
1626 */
1627 cisco->par2 = cisco->par1;
1628 cisco->par1 = htonl(scp->cka_lasttx);
1629 cisco->time0 = htons((u_int16_t)(t >> 16));
1630 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1631
1632 ifq = &scp->linkq;
1633 if (IF_QFULL(ifq)) {
1634 IF_DROP(ifq);
1635 m_freem(m);
1636 return;
1637 }
1638 IF_ENQUEUE(ifq, m);
1639
1640 sca_start(&scp->sp_if);
1641
1642 /* since start may have reset this fix */
1643 if (!scp->sca->sc_usedma) {
1644 scp->sca->scu_set_page(scp->sca,
1645 scp->sp_rxdesc_p);
1646 scp->sca->scu_page_on(scp->sca);
1647 }
1648
1649 break;
1650
1651 default:
1652 m_freem(m);
1653 SCA_DPRINTF(SCA_DEBUG_CISCO,
1654 ("Unknown CISCO keepalive protocol 0x%04x\n",
1655 ntohl(cisco->type)));
1656 return;
1657 }
1658
1659 break;
1660
1661 default:
1662 SCA_DPRINTF(SCA_DEBUG_RX,
1663 ("Unknown/unexpected ethertype 0x%04x\n",
1664 ntohs(hdlc->protocol)));
1665 m_freem(m);
1666 }
1667 }
1668
1669 #if SCA_DEBUG_LEVEL > 0
1670 /*
1671 * do a hex dump of the packet received into descriptor "desc" with
1672 * data buffer "p"
1673 */
1674 static void
1675 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1676 {
1677 int i;
1678 int nothing_yet = 1;
1679 struct sca_softc *sc;
1680 u_int len;
1681
1682 sc = scp->sca;
1683 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1684 desc,
1685 sca_desc_read_chainp(sc, desc),
1686 sca_desc_read_bufp(sc, desc),
1687 sca_desc_read_stat(sc, desc),
1688 (len = sca_desc_read_buflen(sc, desc)));
1689
1690 for (i = 0 ; i < len && i < 256; i++) {
1691 if (nothing_yet == 1 &&
1692 (sc->sc_usedma ? *p
1693 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1694 sca_page_addr(sc, p))) == 0) {
1695 p++;
1696 continue;
1697 }
1698 nothing_yet = 0;
1699 if (i % 16 == 0)
1700 printf("\n");
1701 printf("%02x ",
1702 (sc->sc_usedma ? *p
1703 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1704 sca_page_addr(sc, p))));
1705 p++;
1706 }
1707
1708 if (i % 16 != 1)
1709 printf("\n");
1710 }
1711 #endif
1712
1713 /*
1714 * adjust things becuase we have just read the current starting
1715 * frame
1716 *
1717 * must be called at splnet()
1718 */
1719 static void
1720 sca_frame_read_done(sca_port_t *scp)
1721 {
1722 u_int16_t edesc_p;
1723
1724 /* update where our indicies are */
1725 scp->sp_rxend = scp->sp_rxstart;
1726 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1727
1728 /* update the error [end] descriptor */
1729 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1730 (sizeof(sca_desc_t) * scp->sp_rxend);
1731 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1732 }
1733
1734 /*
1735 * set a port to the "up" state
1736 */
1737 static void
1738 sca_port_up(sca_port_t *scp)
1739 {
1740 struct sca_softc *sc = scp->sca;
1741 #if 0
1742 u_int8_t ier0, ier1;
1743 #endif
1744
1745 /*
1746 * reset things
1747 */
1748 #if 0
1749 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1750 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1751 #endif
1752 /*
1753 * clear in-use flag
1754 */
1755 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1756 scp->sp_if.if_flags |= IFF_RUNNING;
1757
1758 /*
1759 * raise DTR
1760 */
1761 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1762
1763 /*
1764 * raise RTS
1765 */
1766 msci_write_1(scp, SCA_CTL0,
1767 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1768 | SCA_CTL_RTS_HIGH);
1769
1770 #if 0
1771 /*
1772 * enable interrupts (no timer IER2)
1773 */
1774 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1775 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1776 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1777 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1778 if (scp->sp_port == 1) {
1779 ier0 <<= 4;
1780 ier1 <<= 4;
1781 }
1782 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1783 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1784 #else
1785 if (scp->sp_port == 0) {
1786 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1787 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1788 } else {
1789 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1790 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1791 }
1792 #endif
1793
1794 /*
1795 * enable transmit and receive
1796 */
1797 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1798 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1799
1800 /*
1801 * reset internal state
1802 */
1803 scp->sp_txinuse = 0;
1804 scp->sp_txcur = 0;
1805 scp->cka_lasttx = time.tv_usec;
1806 scp->cka_lastrx = 0;
1807 }
1808
1809 /*
1810 * set a port to the "down" state
1811 */
1812 static void
1813 sca_port_down(sca_port_t *scp)
1814 {
1815 struct sca_softc *sc = scp->sca;
1816 #if 0
1817 u_int8_t ier0, ier1;
1818 #endif
1819
1820 /*
1821 * lower DTR
1822 */
1823 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1824
1825 /*
1826 * lower RTS
1827 */
1828 msci_write_1(scp, SCA_CTL0,
1829 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1830 | SCA_CTL_RTS_LOW);
1831
1832 /*
1833 * disable interrupts
1834 */
1835 #if 0
1836 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1837 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1838 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1839 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1840 if (scp->sp_port == 1) {
1841 ier0 <<= 4;
1842 ier1 <<= 4;
1843 }
1844 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1845 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1846 #else
1847 if (scp->sp_port == 0) {
1848 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1849 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1850 } else {
1851 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1852 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1853 }
1854 #endif
1855
1856 /*
1857 * disable transmit and receive
1858 */
1859 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1860 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1861
1862 /*
1863 * no, we're not in use anymore
1864 */
1865 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1866 }
1867
1868 /*
1869 * disable all DMA and interrupts for all ports at once.
1870 */
1871 void
1872 sca_shutdown(struct sca_softc *sca)
1873 {
1874 /*
1875 * disable DMA and interrupts
1876 */
1877 sca_write_1(sca, SCA_DMER, 0);
1878 sca_write_1(sca, SCA_IER0, 0);
1879 sca_write_1(sca, SCA_IER1, 0);
1880 }
1881
1882 /*
1883 * If there are packets to transmit, start the transmit DMA logic.
1884 */
1885 static void
1886 sca_port_starttx(sca_port_t *scp)
1887 {
1888 struct sca_softc *sc;
1889 u_int32_t startdesc_p, enddesc_p;
1890 int enddesc;
1891
1892 sc = scp->sca;
1893
1894 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1895
1896 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1897 || scp->sp_txinuse == 0)
1898 return;
1899
1900 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1901
1902 scp->sp_if.if_flags |= IFF_OACTIVE;
1903
1904 /*
1905 * We have something to do, since we have at least one packet
1906 * waiting, and we are not already marked as active.
1907 */
1908 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1909 startdesc_p = scp->sp_txdesc_p;
1910 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1911
1912 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1913 startdesc_p, enddesc_p));
1914
1915 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1916 dmac_write_2(scp, SCA_CDAL1,
1917 (u_int16_t)(startdesc_p & 0x0000ffff));
1918
1919 /*
1920 * enable the DMA
1921 */
1922 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1923 }
1924
1925 /*
1926 * allocate an mbuf at least long enough to hold "len" bytes.
1927 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1928 * otherwise let the caller handle copying the data in.
1929 */
1930 static struct mbuf *
1931 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
1932 {
1933 struct mbuf *m;
1934
1935 /*
1936 * allocate an mbuf and copy the important bits of data
1937 * into it. If the packet won't fit in the header,
1938 * allocate a cluster for it and store it there.
1939 */
1940 MGETHDR(m, M_DONTWAIT, MT_DATA);
1941 if (m == NULL)
1942 return NULL;
1943 if (len > MHLEN) {
1944 if (len > MCLBYTES) {
1945 m_freem(m);
1946 return NULL;
1947 }
1948 MCLGET(m, M_DONTWAIT);
1949 if ((m->m_flags & M_EXT) == 0) {
1950 m_freem(m);
1951 return NULL;
1952 }
1953 }
1954 if (p != NULL) {
1955 /* XXX do we need to sync here? */
1956 if (sc->sc_usedma)
1957 bcopy(p, mtod(m, caddr_t), len);
1958 else
1959 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
1960 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
1961 }
1962 m->m_len = len;
1963 m->m_pkthdr.len = len;
1964
1965 return (m);
1966 }
1967
1968 /*
1969 * get the base clock
1970 */
1971 void
1972 sca_get_base_clock(struct sca_softc *sc)
1973 {
1974 struct timeval btv, ctv, dtv;
1975 u_int64_t bcnt;
1976 u_int32_t cnt;
1977 u_int16_t subcnt;
1978
1979 /* disable the timer, set prescale to 0 */
1980 sca_write_1(sc, SCA_TCSR0, 0);
1981 sca_write_1(sc, SCA_TEPR0, 0);
1982
1983 /* reset the counter */
1984 (void)sca_read_1(sc, SCA_TCSR0);
1985 subcnt = sca_read_2(sc, SCA_TCNTL0);
1986
1987 /* count to max */
1988 sca_write_2(sc, SCA_TCONRL0, 0xffff);
1989
1990 cnt = 0;
1991 microtime(&btv);
1992 /* start the timer -- no interrupt enable */
1993 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
1994 for (;;) {
1995 microtime(&ctv);
1996
1997 /* end around 3/4 of a second */
1998 timersub(&ctv, &btv, &dtv);
1999 if (dtv.tv_usec >= 750000)
2000 break;
2001
2002 /* spin */
2003 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2004 ;
2005 /* reset the timer */
2006 (void)sca_read_2(sc, SCA_TCNTL0);
2007 cnt++;
2008 }
2009
2010 /* stop the timer */
2011 sca_write_1(sc, SCA_TCSR0, 0);
2012
2013 subcnt = sca_read_2(sc, SCA_TCNTL0);
2014 /* add the slop in and get the total timer ticks */
2015 cnt = (cnt << 16) | subcnt;
2016
2017 /* cnt is 1/8 the actual time */
2018 bcnt = cnt * 8;
2019 /* make it proportional to 3/4 of a second */
2020 bcnt *= (u_int64_t)750000;
2021 bcnt /= (u_int64_t)dtv.tv_usec;
2022 cnt = bcnt;
2023
2024 /* make it Hz */
2025 cnt *= 4;
2026 cnt /= 3;
2027
2028 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2029 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2030
2031 /*
2032 * round to the nearest 200 -- this allows for +-3 ticks error
2033 */
2034 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2035 }
2036
2037 /*
2038 * print the information about the clock on the ports
2039 */
2040 void
2041 sca_print_clock_info(struct sca_softc *sc)
2042 {
2043 struct sca_port *scp;
2044 u_int32_t mhz, div;
2045 int i;
2046
2047 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2048 sc->sc_baseclock);
2049
2050 /* print the information about the port clock selection */
2051 for (i = 0; i < sc->sc_numports; i++) {
2052 scp = &sc->sc_ports[i];
2053 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2054 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2055
2056 printf("%s: rx clock: ", scp->sp_if.if_xname);
2057 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2058 case SCA_RXS_CLK_LINE:
2059 printf("line");
2060 break;
2061 case SCA_RXS_CLK_LINE_SN:
2062 printf("line with noise suppression");
2063 break;
2064 case SCA_RXS_CLK_INTERNAL:
2065 printf("internal %d Hz", (mhz >> div));
2066 break;
2067 case SCA_RXS_CLK_ADPLL_OUT:
2068 printf("adpll using internal %d Hz", (mhz >> div));
2069 break;
2070 case SCA_RXS_CLK_ADPLL_IN:
2071 printf("adpll using line clock");
2072 break;
2073 }
2074 printf(" tx clock: ");
2075 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2076 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2077 case SCA_TXS_CLK_LINE:
2078 printf("line\n");
2079 break;
2080 case SCA_TXS_CLK_INTERNAL:
2081 printf("internal %d Hz\n", (mhz >> div));
2082 break;
2083 case SCA_TXS_CLK_RXCLK:
2084 printf("rxclock\n");
2085 break;
2086 }
2087 if (scp->sp_eclock)
2088 printf("%s: outputting line clock\n",
2089 scp->sp_if.if_xname);
2090 }
2091 }
2092
2093