hd64570.c revision 1.43 1 /* $NetBSD: hd64570.c,v 1.43 2010/04/05 07:19:34 joerg Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.43 2010/04/05 07:19:34 joerg Exp $");
69
70 #include "opt_inet.h"
71 #include "opt_iso.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/device.h>
76 #include <sys/mbuf.h>
77 #include <sys/socket.h>
78 #include <sys/sockio.h>
79 #include <sys/kernel.h>
80
81 #include <net/if.h>
82 #include <net/if_types.h>
83 #include <net/netisr.h>
84
85 #if defined(INET) || defined(INET6)
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip.h>
90 #ifdef INET6
91 #include <netinet6/in6_var.h>
92 #endif
93 #endif
94
95 #ifdef ISO
96 #include <net/if_llc.h>
97 #include <netiso/iso.h>
98 #include <netiso/iso_var.h>
99 #endif
100
101 #include <net/bpf.h>
102
103 #include <sys/cpu.h>
104 #include <sys/bus.h>
105 #include <sys/intr.h>
106
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcidevs.h>
110
111 #include <dev/ic/hd64570reg.h>
112 #include <dev/ic/hd64570var.h>
113
114 #define SCA_DEBUG_RX 0x0001
115 #define SCA_DEBUG_TX 0x0002
116 #define SCA_DEBUG_CISCO 0x0004
117 #define SCA_DEBUG_DMA 0x0008
118 #define SCA_DEBUG_RXPKT 0x0010
119 #define SCA_DEBUG_TXPKT 0x0020
120 #define SCA_DEBUG_INTR 0x0040
121 #define SCA_DEBUG_CLOCK 0x0080
122
123 #if 0
124 #define SCA_DEBUG_LEVEL ( 0xFFFF )
125 #else
126 #define SCA_DEBUG_LEVEL 0
127 #endif
128
129 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
130
131 #if SCA_DEBUG_LEVEL > 0
132 #define SCA_DPRINTF(l, x) do { \
133 if ((l) & sca_debug) \
134 printf x;\
135 } while (0)
136 #else
137 #define SCA_DPRINTF(l, x)
138 #endif
139
140 #if 0
141 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
142 #endif
143
144 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
145 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
146
147 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
149 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
150 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
151
152 static void sca_msci_init(struct sca_softc *, sca_port_t *);
153 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
154 static void sca_dmac_rxinit(sca_port_t *);
155
156 static int sca_dmac_intr(sca_port_t *, u_int8_t);
157 static int sca_msci_intr(sca_port_t *, u_int8_t);
158
159 static void sca_get_packets(sca_port_t *);
160 static int sca_frame_avail(sca_port_t *);
161 static void sca_frame_process(sca_port_t *);
162 static void sca_frame_read_done(sca_port_t *);
163
164 static void sca_port_starttx(sca_port_t *);
165
166 static void sca_port_up(sca_port_t *);
167 static void sca_port_down(sca_port_t *);
168
169 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
170 struct rtentry *);
171 static int sca_ioctl(struct ifnet *, u_long, void *);
172 static void sca_start(struct ifnet *);
173 static void sca_watchdog(struct ifnet *);
174
175 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
176
177 #if SCA_DEBUG_LEVEL > 0
178 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
179 #endif
180
181
182 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
183 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
184 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
185 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
186
187 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
188
189 static inline void
190 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
191 {
192 sca_write_1(scp->sca, scp->msci_off + reg, val);
193 }
194
195 static inline u_int8_t
196 msci_read_1(sca_port_t *scp, u_int reg)
197 {
198 return sca_read_1(scp->sca, scp->msci_off + reg);
199 }
200
201 static inline void
202 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
203 {
204 sca_write_1(scp->sca, scp->dmac_off + reg, val);
205 }
206
207 static inline void
208 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
209 {
210 sca_write_2(scp->sca, scp->dmac_off + reg, val);
211 }
212
213 static inline u_int8_t
214 dmac_read_1(sca_port_t *scp, u_int reg)
215 {
216 return sca_read_1(scp->sca, scp->dmac_off + reg);
217 }
218
219 static inline u_int16_t
220 dmac_read_2(sca_port_t *scp, u_int reg)
221 {
222 return sca_read_2(scp->sca, scp->dmac_off + reg);
223 }
224
225 /*
226 * read the chain pointer
227 */
228 static inline u_int16_t
229 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
230 {
231 if (sc->sc_usedma)
232 return ((dp)->sd_chainp);
233 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
234 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
235 }
236
237 /*
238 * write the chain pointer
239 */
240 static inline void
241 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
242 {
243 if (sc->sc_usedma)
244 (dp)->sd_chainp = cp;
245 else
246 bus_space_write_2(sc->scu_memt, sc->scu_memh,
247 sca_page_addr(sc, dp)
248 + offsetof(struct sca_desc, sd_chainp), cp);
249 }
250
251 /*
252 * read the buffer pointer
253 */
254 static inline u_int32_t
255 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
256 {
257 u_int32_t address;
258
259 if (sc->sc_usedma)
260 address = dp->sd_bufp | dp->sd_hbufp << 16;
261 else {
262 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
263 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
264 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
265 sca_page_addr(sc, dp)
266 + offsetof(struct sca_desc, sd_hbufp)) << 16;
267 }
268 return (address);
269 }
270
271 /*
272 * write the buffer pointer
273 */
274 static inline void
275 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
276 {
277 if (sc->sc_usedma) {
278 dp->sd_bufp = bufp & 0xFFFF;
279 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
280 } else {
281 bus_space_write_2(sc->scu_memt, sc->scu_memh,
282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
283 bufp & 0xFFFF);
284 bus_space_write_1(sc->scu_memt, sc->scu_memh,
285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
286 (bufp & 0x00FF0000) >> 16);
287 }
288 }
289
290 /*
291 * read the buffer length
292 */
293 static inline u_int16_t
294 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
295 {
296 if (sc->sc_usedma)
297 return ((dp)->sd_buflen);
298 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
299 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
300 }
301
302 /*
303 * write the buffer length
304 */
305 static inline void
306 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
307 {
308 if (sc->sc_usedma)
309 (dp)->sd_buflen = len;
310 else
311 bus_space_write_2(sc->scu_memt, sc->scu_memh,
312 sca_page_addr(sc, dp)
313 + offsetof(struct sca_desc, sd_buflen), len);
314 }
315
316 /*
317 * read the descriptor status
318 */
319 static inline u_int8_t
320 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
321 {
322 if (sc->sc_usedma)
323 return ((dp)->sd_stat);
324 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
325 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
326 }
327
328 /*
329 * write the descriptor status
330 */
331 static inline void
332 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
333 {
334 if (sc->sc_usedma)
335 (dp)->sd_stat = stat;
336 else
337 bus_space_write_1(sc->scu_memt, sc->scu_memh,
338 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
339 stat);
340 }
341
342 void
343 sca_init(struct sca_softc *sc)
344 {
345 /*
346 * Do a little sanity check: check number of ports.
347 */
348 if (sc->sc_numports < 1 || sc->sc_numports > 2)
349 panic("sca can\'t handle more than 2 or less than 1 ports");
350
351 /*
352 * disable DMA and MSCI interrupts
353 */
354 sca_write_1(sc, SCA_DMER, 0);
355 sca_write_1(sc, SCA_IER0, 0);
356 sca_write_1(sc, SCA_IER1, 0);
357 sca_write_1(sc, SCA_IER2, 0);
358
359 /*
360 * configure interrupt system
361 */
362 sca_write_1(sc, SCA_ITCR,
363 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
364 #if 0
365 /* these are for the intrerrupt ack cycle which we don't use */
366 sca_write_1(sc, SCA_IVR, 0x40);
367 sca_write_1(sc, SCA_IMVR, 0x40);
368 #endif
369
370 /*
371 * set wait control register to zero wait states
372 */
373 sca_write_1(sc, SCA_PABR0, 0);
374 sca_write_1(sc, SCA_PABR1, 0);
375 sca_write_1(sc, SCA_WCRL, 0);
376 sca_write_1(sc, SCA_WCRM, 0);
377 sca_write_1(sc, SCA_WCRH, 0);
378
379 /*
380 * disable DMA and reset status
381 */
382 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
383
384 /*
385 * disable transmit DMA for all channels
386 */
387 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
388 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
389 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
390 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
391 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
392 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
393 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
394 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
395
396 /*
397 * enable DMA based on channel enable flags for each channel
398 */
399 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
400
401 /*
402 * Should check to see if the chip is responding, but for now
403 * assume it is.
404 */
405 }
406
407 /*
408 * initialize the port and attach it to the networking layer
409 */
410 void
411 sca_port_attach(struct sca_softc *sc, u_int port)
412 {
413 struct timeval now;
414 sca_port_t *scp = &sc->sc_ports[port];
415 struct ifnet *ifp;
416 static u_int ntwo_unit = 0;
417
418 scp->sca = sc; /* point back to the parent */
419
420 scp->sp_port = port;
421
422 if (port == 0) {
423 scp->msci_off = SCA_MSCI_OFF_0;
424 scp->dmac_off = SCA_DMAC_OFF_0;
425 if(sc->sc_parent != NULL)
426 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
427 else
428 ntwo_unit = 0; /* XXX */
429 } else {
430 scp->msci_off = SCA_MSCI_OFF_1;
431 scp->dmac_off = SCA_DMAC_OFF_1;
432 if(sc->sc_parent != NULL)
433 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
434 else
435 ntwo_unit = 1; /* XXX */
436 }
437
438 sca_msci_init(sc, scp);
439 sca_dmac_init(sc, scp);
440
441 /*
442 * attach to the network layer
443 */
444 ifp = &scp->sp_if;
445 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
446 ifp->if_softc = scp;
447 ifp->if_mtu = SCA_MTU;
448 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
449 ifp->if_type = IFT_PTPSERIAL;
450 ifp->if_hdrlen = HDLC_HDRLEN;
451 ifp->if_ioctl = sca_ioctl;
452 ifp->if_output = sca_output;
453 ifp->if_watchdog = sca_watchdog;
454 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
455 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
456 #ifdef SCA_USE_FASTQ
457 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
458 #endif
459 IFQ_SET_READY(&ifp->if_snd);
460 if_attach(ifp);
461 if_alloc_sadl(ifp);
462 bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
463
464 if (sc->sc_parent == NULL)
465 printf("%s: port %d\n", ifp->if_xname, port);
466 else
467 printf("%s at %s port %d\n",
468 ifp->if_xname, device_xname(sc->sc_parent), port);
469
470 /*
471 * reset the last seen times on the cisco keepalive protocol
472 */
473 getmicrotime(&now);
474 scp->cka_lasttx = now.tv_usec;
475 scp->cka_lastrx = 0;
476 }
477
478 #if 0
479 /*
480 * returns log2(div), sets 'tmc' for the required freq 'hz'
481 */
482 static u_int8_t
483 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
484 {
485 u_int32_t tmc, div;
486 u_int32_t clock;
487
488 /* clock hz = (chipclock / tmc) / 2^(div); */
489 /*
490 * TD == tmc * 2^(n)
491 *
492 * note:
493 * 1 <= TD <= 256 TD is inc of 1
494 * 2 <= TD <= 512 TD is inc of 2
495 * 4 <= TD <= 1024 TD is inc of 4
496 * ...
497 * 512 <= TD <= 256*512 TD is inc of 512
498 *
499 * so note there are overlaps. We lose prec
500 * as div increases so we wish to minize div.
501 *
502 * basically we want to do
503 *
504 * tmc = chip / hz, but have tmc <= 256
505 */
506
507 /* assume system clock is 9.8304MHz or 9830400Hz */
508 clock = clock = 9830400 >> 1;
509
510 /* round down */
511 div = 0;
512 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
513 clock >>= 1;
514 div++;
515 }
516 if (clock / tmc > hz)
517 tmc++;
518 if (!tmc)
519 tmc = 1;
520
521 if (div > SCA_RXS_DIV_512) {
522 /* set to maximums */
523 div = SCA_RXS_DIV_512;
524 tmc = 0;
525 }
526
527 *tmcp = (tmc & 0xFF); /* 0 == 256 */
528 return (div & 0xFF);
529 }
530 #endif
531
532 /*
533 * initialize the port's MSCI
534 */
535 static void
536 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
537 {
538 /* reset the channel */
539 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
540
541 msci_write_1(scp, SCA_MD00,
542 ( SCA_MD0_CRC_1
543 | SCA_MD0_CRC_CCITT
544 | SCA_MD0_CRC_ENABLE
545 | SCA_MD0_MODE_HDLC));
546 #if 0
547 /* immediately send receive reset so the above takes */
548 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
549 #endif
550
551 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
552 msci_write_1(scp, SCA_MD20,
553 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
554
555 /* be safe and do it again */
556 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
557
558 /* setup underrun and idle control, and initial RTS state */
559 msci_write_1(scp, SCA_CTL0,
560 (SCA_CTL_IDLC_PATTERN
561 | SCA_CTL_UDRNC_AFTER_FCS
562 | SCA_CTL_RTS_LOW));
563
564 /* reset the transmitter */
565 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
566
567 /*
568 * set the clock sources
569 */
570 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
571 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
572 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
573
574 /* set external clock generate as requested */
575 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
576
577 /*
578 * XXX don't pay attention to CTS or CD changes right now. I can't
579 * simulate one, and the transmitter will try to transmit even if
580 * CD isn't there anyway, so nothing bad SHOULD happen.
581 */
582 #if 0
583 msci_write_1(scp, SCA_IE00, 0);
584 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
585 #else
586 /* this would deliver transmitter underrun to ST1/ISR1 */
587 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
588 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
589 #endif
590 msci_write_1(scp, SCA_IE20, 0);
591
592 msci_write_1(scp, SCA_FIE0, 0);
593
594 msci_write_1(scp, SCA_SA00, 0);
595 msci_write_1(scp, SCA_SA10, 0);
596
597 msci_write_1(scp, SCA_IDL0, 0x7e);
598
599 msci_write_1(scp, SCA_RRC0, 0x0e);
600 /* msci_write_1(scp, SCA_TRC00, 0x10); */
601 /*
602 * the correct values here are important for avoiding underruns
603 * for any value less than or equal to TRC0 txrdy is activated
604 * which will start the dmac transfer to the fifo.
605 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
606 *
607 * thus if we are using a very fast clock that empties the fifo
608 * quickly, delays in the dmac starting to fill the fifo can
609 * lead to underruns so we want a fairly full fifo to still
610 * cause the dmac to start. for cards with on board ram this
611 * has no effect on system performance. For cards that DMA
612 * to/from system memory it will cause more, shorter,
613 * bus accesses rather than fewer longer ones.
614 */
615 msci_write_1(scp, SCA_TRC00, 0x00);
616 msci_write_1(scp, SCA_TRC10, 0x1f);
617 }
618
619 /*
620 * Take the memory for the port and construct two circular linked lists of
621 * descriptors (one tx, one rx) and set the pointers in these descriptors
622 * to point to the buffer space for this port.
623 */
624 static void
625 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
626 {
627 sca_desc_t *desc;
628 u_int32_t desc_p;
629 u_int32_t buf_p;
630 int i;
631
632 if (sc->sc_usedma)
633 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
634 BUS_DMASYNC_PREWRITE);
635 else {
636 /*
637 * XXX assumes that all tx desc and bufs in same page
638 */
639 sc->scu_page_on(sc);
640 sc->scu_set_page(sc, scp->sp_txdesc_p);
641 }
642
643 desc = scp->sp_txdesc;
644 desc_p = scp->sp_txdesc_p;
645 buf_p = scp->sp_txbuf_p;
646 scp->sp_txcur = 0;
647 scp->sp_txinuse = 0;
648
649 #ifdef DEBUG
650 /* make sure that we won't wrap */
651 if ((desc_p & 0xffff0000) !=
652 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
653 panic("sca: tx descriptors cross architecural boundary");
654 if ((buf_p & 0xff000000) !=
655 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
656 panic("sca: tx buffers cross architecural boundary");
657 #endif
658
659 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
660 /*
661 * desc_p points to the physcial address of the NEXT desc
662 */
663 desc_p += sizeof(sca_desc_t);
664
665 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
666 sca_desc_write_bufp(sc, desc, buf_p);
667 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
668 sca_desc_write_stat(sc, desc, 0);
669
670 desc++; /* point to the next descriptor */
671 buf_p += SCA_BSIZE;
672 }
673
674 /*
675 * "heal" the circular list by making the last entry point to the
676 * first.
677 */
678 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
679
680 /*
681 * Now, initialize the transmit DMA logic
682 *
683 * CPB == chain pointer base address
684 */
685 dmac_write_1(scp, SCA_DSR1, 0);
686 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
687 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
688 /* XXX1
689 dmac_write_1(scp, SCA_DIR1,
690 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
691 */
692 dmac_write_1(scp, SCA_DIR1,
693 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
694 dmac_write_1(scp, SCA_CPB1,
695 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
696
697 /*
698 * now, do the same thing for receive descriptors
699 *
700 * XXX assumes that all rx desc and bufs in same page
701 */
702 if (!sc->sc_usedma)
703 sc->scu_set_page(sc, scp->sp_rxdesc_p);
704
705 desc = scp->sp_rxdesc;
706 desc_p = scp->sp_rxdesc_p;
707 buf_p = scp->sp_rxbuf_p;
708
709 #ifdef DEBUG
710 /* make sure that we won't wrap */
711 if ((desc_p & 0xffff0000) !=
712 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
713 panic("sca: rx descriptors cross architecural boundary");
714 if ((buf_p & 0xff000000) !=
715 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
716 panic("sca: rx buffers cross architecural boundary");
717 #endif
718
719 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
720 /*
721 * desc_p points to the physcial address of the NEXT desc
722 */
723 desc_p += sizeof(sca_desc_t);
724
725 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
726 sca_desc_write_bufp(sc, desc, buf_p);
727 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
728 sca_desc_write_buflen(sc, desc, 0);
729 sca_desc_write_stat(sc, desc, 0);
730
731 desc++; /* point to the next descriptor */
732 buf_p += SCA_BSIZE;
733 }
734
735 /*
736 * "heal" the circular list by making the last entry point to the
737 * first.
738 */
739 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
740
741 sca_dmac_rxinit(scp);
742
743 if (sc->sc_usedma)
744 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
745 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
746 else
747 sc->scu_page_off(sc);
748 }
749
750 /*
751 * reset and reinitialize the receive DMA logic
752 */
753 static void
754 sca_dmac_rxinit(sca_port_t *scp)
755 {
756 /*
757 * ... and the receive DMA logic ...
758 */
759 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
760 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
761
762 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
763 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
764
765 /* reset descriptors to initial state */
766 scp->sp_rxstart = 0;
767 scp->sp_rxend = scp->sp_nrxdesc - 1;
768
769 /*
770 * CPB == chain pointer base
771 * CDA == current descriptor address
772 * EDA == error descriptor address (overwrite position)
773 * because cda can't be eda when starting we always
774 * have a single buffer gap between cda and eda
775 */
776 dmac_write_1(scp, SCA_CPB0,
777 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
778 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
779 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
780 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
781
782 /*
783 * enable receiver DMA
784 */
785 dmac_write_1(scp, SCA_DIR0,
786 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
787 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
788 }
789
790 /*
791 * Queue the packet for our start routine to transmit
792 */
793 static int
794 sca_output(
795 struct ifnet *ifp,
796 struct mbuf *m,
797 const struct sockaddr *dst,
798 struct rtentry *rt0)
799 {
800 #ifdef ISO
801 struct hdlc_llc_header *llc;
802 #endif
803 struct hdlc_header *hdlc;
804 struct ifqueue *ifq = NULL;
805 int s, error, len;
806 short mflags;
807 ALTQ_DECL(struct altq_pktattr pktattr;)
808
809 error = 0;
810
811 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
812 error = ENETDOWN;
813 goto bad;
814 }
815
816 /*
817 * If the queueing discipline needs packet classification,
818 * do it before prepending link headers.
819 */
820 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
821
822 /*
823 * determine address family, and priority for this packet
824 */
825 switch (dst->sa_family) {
826 #ifdef INET
827 case AF_INET:
828 #ifdef SCA_USE_FASTQ
829 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
830 == IPTOS_LOWDELAY)
831 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
832 #endif
833 /*
834 * Add cisco serial line header. If there is no
835 * space in the first mbuf, allocate another.
836 */
837 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
838 if (m == 0)
839 return (ENOBUFS);
840 hdlc = mtod(m, struct hdlc_header *);
841 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
842 break;
843 #endif
844 #ifdef INET6
845 case AF_INET6:
846 /*
847 * Add cisco serial line header. If there is no
848 * space in the first mbuf, allocate another.
849 */
850 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
851 if (m == 0)
852 return (ENOBUFS);
853 hdlc = mtod(m, struct hdlc_header *);
854 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
855 break;
856 #endif
857 #ifdef ISO
858 case AF_ISO:
859 /*
860 * Add cisco llc serial line header. If there is no
861 * space in the first mbuf, allocate another.
862 */
863 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
864 if (m == 0)
865 return (ENOBUFS);
866 hdlc = mtod(m, struct hdlc_header *);
867 llc = mtod(m, struct hdlc_llc_header *);
868 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
869 llc->hl_ffb = 0;
870 break;
871 #endif
872 default:
873 printf("%s: address family %d unsupported\n",
874 ifp->if_xname, dst->sa_family);
875 error = EAFNOSUPPORT;
876 goto bad;
877 }
878
879 /* finish */
880 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
881 hdlc->h_addr = CISCO_MULTICAST;
882 else
883 hdlc->h_addr = CISCO_UNICAST;
884 hdlc->h_resv = 0;
885
886 /*
887 * queue the packet. If interactive, use the fast queue.
888 */
889 mflags = m->m_flags;
890 len = m->m_pkthdr.len;
891 s = splnet();
892 if (ifq != NULL) {
893 if (IF_QFULL(ifq)) {
894 IF_DROP(ifq);
895 m_freem(m);
896 error = ENOBUFS;
897 } else
898 IF_ENQUEUE(ifq, m);
899 } else
900 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
901 if (error != 0) {
902 splx(s);
903 ifp->if_oerrors++;
904 ifp->if_collisions++;
905 return (error);
906 }
907 ifp->if_obytes += len;
908 if (mflags & M_MCAST)
909 ifp->if_omcasts++;
910
911 sca_start(ifp);
912 splx(s);
913
914 return (error);
915
916 bad:
917 if (m)
918 m_freem(m);
919 return (error);
920 }
921
922 static int
923 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
924 {
925 struct ifreq *ifr;
926 struct ifaddr *ifa;
927 int error;
928 int s;
929
930 s = splnet();
931
932 ifr = (struct ifreq *)data;
933 ifa = (struct ifaddr *)data;
934 error = 0;
935
936 switch (cmd) {
937 case SIOCINITIFADDR:
938 switch(ifa->ifa_addr->sa_family) {
939 #ifdef INET
940 case AF_INET:
941 #endif
942 #ifdef INET6
943 case AF_INET6:
944 #endif
945 #if defined(INET) || defined(INET6)
946 ifp->if_flags |= IFF_UP;
947 sca_port_up(ifp->if_softc);
948 break;
949 #endif
950 default:
951 error = EAFNOSUPPORT;
952 break;
953 }
954 break;
955
956 case SIOCSIFDSTADDR:
957 #ifdef INET
958 if (ifa->ifa_addr->sa_family == AF_INET)
959 break;
960 #endif
961 #ifdef INET6
962 if (ifa->ifa_addr->sa_family == AF_INET6)
963 break;
964 #endif
965 error = EAFNOSUPPORT;
966 break;
967
968 case SIOCADDMULTI:
969 case SIOCDELMULTI:
970 /* XXX need multicast group management code */
971 if (ifr == 0) {
972 error = EAFNOSUPPORT; /* XXX */
973 break;
974 }
975 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
976 #ifdef INET
977 case AF_INET:
978 break;
979 #endif
980 #ifdef INET6
981 case AF_INET6:
982 break;
983 #endif
984 default:
985 error = EAFNOSUPPORT;
986 break;
987 }
988 break;
989
990 case SIOCSIFFLAGS:
991 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
992 break;
993 if (ifr->ifr_flags & IFF_UP) {
994 ifp->if_flags |= IFF_UP;
995 sca_port_up(ifp->if_softc);
996 } else {
997 ifp->if_flags &= ~IFF_UP;
998 sca_port_down(ifp->if_softc);
999 }
1000
1001 break;
1002
1003 default:
1004 error = ifioctl_common(ifp, cmd, data);
1005 }
1006
1007 splx(s);
1008 return error;
1009 }
1010
1011 /*
1012 * start packet transmission on the interface
1013 *
1014 * MUST BE CALLED AT splnet()
1015 */
1016 static void
1017 sca_start(struct ifnet *ifp)
1018 {
1019 sca_port_t *scp = ifp->if_softc;
1020 struct sca_softc *sc = scp->sca;
1021 struct mbuf *m, *mb_head;
1022 sca_desc_t *desc;
1023 u_int8_t *buf, stat;
1024 u_int32_t buf_p;
1025 int nexttx;
1026 int trigger_xmit;
1027 u_int len;
1028
1029 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1030
1031 /*
1032 * can't queue when we are full or transmitter is busy
1033 */
1034 #ifdef oldcode
1035 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1036 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1037 return;
1038 #else
1039 if (scp->sp_txinuse
1040 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1041 return;
1042 #endif
1043 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1044
1045 /*
1046 * XXX assume that all tx desc and bufs in same page
1047 */
1048 if (sc->sc_usedma)
1049 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1050 0, sc->scu_allocsize,
1051 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1052 else {
1053 sc->scu_page_on(sc);
1054 sc->scu_set_page(sc, scp->sp_txdesc_p);
1055 }
1056
1057 trigger_xmit = 0;
1058
1059 txloop:
1060 IF_DEQUEUE(&scp->linkq, mb_head);
1061 if (mb_head == NULL)
1062 #ifdef SCA_USE_FASTQ
1063 IF_DEQUEUE(&scp->fastq, mb_head);
1064 if (mb_head == NULL)
1065 #endif
1066 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1067 if (mb_head == NULL)
1068 goto start_xmit;
1069
1070 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1071 #ifdef oldcode
1072 if (scp->txinuse != 0) {
1073 /* Kill EOT interrupts on the previous descriptor. */
1074 desc = &scp->sp_txdesc[scp->txcur];
1075 stat = sca_desc_read_stat(sc, desc);
1076 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1077
1078 /* Figure out what the next free descriptor is. */
1079 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1080 } else
1081 nexttx = 0;
1082 #endif /* oldcode */
1083
1084 if (scp->sp_txinuse)
1085 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1086 else
1087 nexttx = 0;
1088
1089 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1090
1091 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1092 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1093
1094 /* XXX hoping we can delay the desc write till after we don't drop. */
1095 desc = &scp->sp_txdesc[nexttx];
1096
1097 /* XXX isn't this set already?? */
1098 sca_desc_write_bufp(sc, desc, buf_p);
1099 len = 0;
1100
1101 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1102
1103 #if 0 /* uncomment this for a core in cc1 */
1104 X
1105 #endif
1106 /*
1107 * Run through the chain, copying data into the descriptor as we
1108 * go. If it won't fit in one transmission block, drop the packet.
1109 * No, this isn't nice, but most of the time it _will_ fit.
1110 */
1111 for (m = mb_head ; m != NULL ; m = m->m_next) {
1112 if (m->m_len != 0) {
1113 len += m->m_len;
1114 if (len > SCA_BSIZE) {
1115 m_freem(mb_head);
1116 goto txloop;
1117 }
1118 SCA_DPRINTF(SCA_DEBUG_TX,
1119 ("TX: about to mbuf len %d\n", m->m_len));
1120
1121 if (sc->sc_usedma)
1122 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1123 else
1124 bus_space_write_region_1(sc->scu_memt,
1125 sc->scu_memh, sca_page_addr(sc, buf_p),
1126 mtod(m, u_int8_t *), m->m_len);
1127 buf += m->m_len;
1128 buf_p += m->m_len;
1129 }
1130 }
1131
1132 /* set the buffer, the length, and mark end of frame and end of xfer */
1133 sca_desc_write_buflen(sc, desc, len);
1134 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1135
1136 ifp->if_opackets++;
1137
1138 /*
1139 * Pass packet to bpf if there is a listener.
1140 */
1141 bpf_mtap(ifp, mb_head);
1142
1143 m_freem(mb_head);
1144
1145 scp->sp_txcur = nexttx;
1146 scp->sp_txinuse++;
1147 trigger_xmit = 1;
1148
1149 SCA_DPRINTF(SCA_DEBUG_TX,
1150 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1151
1152 /*
1153 * XXX so didn't this used to limit us to 1?! - multi may be untested
1154 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1155 * to find bug
1156 */
1157 #ifdef oldcode
1158 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1159 #endif
1160 if (scp->sp_txinuse < scp->sp_ntxdesc)
1161 goto txloop;
1162
1163 start_xmit:
1164 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1165
1166 if (trigger_xmit != 0) {
1167 /* set EOT on final descriptor */
1168 desc = &scp->sp_txdesc[scp->sp_txcur];
1169 stat = sca_desc_read_stat(sc, desc);
1170 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1171 }
1172
1173 if (sc->sc_usedma)
1174 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1175 sc->scu_allocsize,
1176 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1177
1178 if (trigger_xmit != 0)
1179 sca_port_starttx(scp);
1180
1181 if (!sc->sc_usedma)
1182 sc->scu_page_off(sc);
1183 }
1184
1185 static void
1186 sca_watchdog(struct ifnet *ifp)
1187 {
1188 }
1189
1190 int
1191 sca_hardintr(struct sca_softc *sc)
1192 {
1193 u_int8_t isr0, isr1, isr2;
1194 int ret;
1195
1196 ret = 0; /* non-zero means we processed at least one interrupt */
1197
1198 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1199
1200 while (1) {
1201 /*
1202 * read SCA interrupts
1203 */
1204 isr0 = sca_read_1(sc, SCA_ISR0);
1205 isr1 = sca_read_1(sc, SCA_ISR1);
1206 isr2 = sca_read_1(sc, SCA_ISR2);
1207
1208 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1209 break;
1210
1211 SCA_DPRINTF(SCA_DEBUG_INTR,
1212 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1213 isr0, isr1, isr2));
1214
1215 /*
1216 * check DMAC interrupt
1217 */
1218 if (isr1 & 0x0f)
1219 ret += sca_dmac_intr(&sc->sc_ports[0],
1220 isr1 & 0x0f);
1221
1222 if (isr1 & 0xf0)
1223 ret += sca_dmac_intr(&sc->sc_ports[1],
1224 (isr1 & 0xf0) >> 4);
1225
1226 /*
1227 * mcsi intterupts
1228 */
1229 if (isr0 & 0x0f)
1230 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1231
1232 if (isr0 & 0xf0)
1233 ret += sca_msci_intr(&sc->sc_ports[1],
1234 (isr0 & 0xf0) >> 4);
1235
1236 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1237 if (isr2)
1238 ret += sca_timer_intr(sc, isr2);
1239 #endif
1240 }
1241
1242 return (ret);
1243 }
1244
1245 static int
1246 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1247 {
1248 u_int8_t dsr;
1249 int ret;
1250
1251 ret = 0;
1252
1253 /*
1254 * Check transmit channel
1255 */
1256 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1257 SCA_DPRINTF(SCA_DEBUG_INTR,
1258 ("TX INTERRUPT port %d\n", scp->sp_port));
1259
1260 dsr = 1;
1261 while (dsr != 0) {
1262 ret++;
1263 /*
1264 * reset interrupt
1265 */
1266 dsr = dmac_read_1(scp, SCA_DSR1);
1267 dmac_write_1(scp, SCA_DSR1,
1268 dsr | SCA_DSR_DEWD);
1269
1270 /*
1271 * filter out the bits we don't care about
1272 */
1273 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1274 if (dsr == 0)
1275 break;
1276
1277 /*
1278 * check for counter overflow
1279 */
1280 if (dsr & SCA_DSR_COF) {
1281 printf("%s: TXDMA counter overflow\n",
1282 scp->sp_if.if_xname);
1283
1284 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1285 scp->sp_txcur = 0;
1286 scp->sp_txinuse = 0;
1287 }
1288
1289 /*
1290 * check for buffer overflow
1291 */
1292 if (dsr & SCA_DSR_BOF) {
1293 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1294 scp->sp_if.if_xname,
1295 dmac_read_2(scp, SCA_CDAL1),
1296 dmac_read_2(scp, SCA_EDAL1),
1297 dmac_read_1(scp, SCA_CPB1));
1298
1299 /*
1300 * Yikes. Arrange for a full
1301 * transmitter restart.
1302 */
1303 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1304 scp->sp_txcur = 0;
1305 scp->sp_txinuse = 0;
1306 }
1307
1308 /*
1309 * check for end of transfer, which is not
1310 * an error. It means that all data queued
1311 * was transmitted, and we mark ourself as
1312 * not in use and stop the watchdog timer.
1313 */
1314 if (dsr & SCA_DSR_EOT) {
1315 SCA_DPRINTF(SCA_DEBUG_TX,
1316 ("Transmit completed. cda %x eda %x dsr %x\n",
1317 dmac_read_2(scp, SCA_CDAL1),
1318 dmac_read_2(scp, SCA_EDAL1),
1319 dsr));
1320
1321 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1322 scp->sp_txcur = 0;
1323 scp->sp_txinuse = 0;
1324
1325 /*
1326 * check for more packets
1327 */
1328 sca_start(&scp->sp_if);
1329 }
1330 }
1331 }
1332 /*
1333 * receive channel check
1334 */
1335 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1336 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1337 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1338
1339 dsr = 1;
1340 while (dsr != 0) {
1341 ret++;
1342
1343 dsr = dmac_read_1(scp, SCA_DSR0);
1344 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1345
1346 /*
1347 * filter out the bits we don't care about
1348 */
1349 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1350 | SCA_DSR_BOF | SCA_DSR_EOT);
1351 if (dsr == 0)
1352 break;
1353
1354 /*
1355 * End of frame
1356 */
1357 if (dsr & SCA_DSR_EOM) {
1358 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1359
1360 sca_get_packets(scp);
1361 }
1362
1363 /*
1364 * check for counter overflow
1365 */
1366 if (dsr & SCA_DSR_COF) {
1367 printf("%s: RXDMA counter overflow\n",
1368 scp->sp_if.if_xname);
1369
1370 sca_dmac_rxinit(scp);
1371 }
1372
1373 /*
1374 * check for end of transfer, which means we
1375 * ran out of descriptors to receive into.
1376 * This means the line is much faster than
1377 * we can handle.
1378 */
1379 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1380 printf("%s: RXDMA buffer overflow\n",
1381 scp->sp_if.if_xname);
1382
1383 sca_dmac_rxinit(scp);
1384 }
1385 }
1386 }
1387
1388 return ret;
1389 }
1390
1391 static int
1392 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1393 {
1394 u_int8_t st1, trc0;
1395
1396 /* get and clear the specific interrupt -- should act on it :)*/
1397 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1398 /* clear the interrupt */
1399 msci_write_1(scp, SCA_ST10, st1);
1400
1401 if (st1 & SCA_ST1_UDRN) {
1402 /* underrun -- try to increase ready control */
1403 trc0 = msci_read_1(scp, SCA_TRC00);
1404 if (trc0 == 0x1f)
1405 printf("TX: underrun - fifo depth maxed\n");
1406 else {
1407 if ((trc0 += 2) > 0x1f)
1408 trc0 = 0x1f;
1409 SCA_DPRINTF(SCA_DEBUG_TX,
1410 ("TX: udrn - incr fifo to %d\n", trc0));
1411 msci_write_1(scp, SCA_TRC00, trc0);
1412 }
1413 }
1414 }
1415 return (0);
1416 }
1417
1418 static void
1419 sca_get_packets(sca_port_t *scp)
1420 {
1421 struct sca_softc *sc;
1422
1423 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1424
1425 sc = scp->sca;
1426 if (sc->sc_usedma)
1427 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1428 0, sc->scu_allocsize,
1429 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1430 else {
1431 /*
1432 * XXX this code is unable to deal with rx stuff
1433 * in more than 1 page
1434 */
1435 sc->scu_page_on(sc);
1436 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1437 }
1438
1439 /* process as many frames as are available */
1440 while (sca_frame_avail(scp)) {
1441 sca_frame_process(scp);
1442 sca_frame_read_done(scp);
1443 }
1444
1445 if (sc->sc_usedma)
1446 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1447 0, sc->scu_allocsize,
1448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1449 else
1450 sc->scu_page_off(sc);
1451 }
1452
1453 /*
1454 * Starting with the first descriptor we wanted to read into, up to but
1455 * not including the current SCA read descriptor, look for a packet.
1456 *
1457 * must be called at splnet()
1458 */
1459 static int
1460 sca_frame_avail(sca_port_t *scp)
1461 {
1462 u_int16_t cda;
1463 u_int32_t desc_p; /* physical address (lower 16 bits) */
1464 sca_desc_t *desc;
1465 u_int8_t rxstat;
1466 int cdaidx, toolong;
1467
1468 /*
1469 * Read the current descriptor from the SCA.
1470 */
1471 cda = dmac_read_2(scp, SCA_CDAL0);
1472
1473 /*
1474 * calculate the index of the current descriptor
1475 */
1476 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1477 desc_p = cda - desc_p;
1478 cdaidx = desc_p / sizeof(sca_desc_t);
1479
1480 SCA_DPRINTF(SCA_DEBUG_RX,
1481 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1482 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1483
1484 /* note confusion */
1485 if (cdaidx >= scp->sp_nrxdesc)
1486 panic("current descriptor index out of range");
1487
1488 /* see if we have a valid frame available */
1489 toolong = 0;
1490 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1491 /*
1492 * We might have a valid descriptor. Set up a pointer
1493 * to the kva address for it so we can more easily examine
1494 * the contents.
1495 */
1496 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1497 rxstat = sca_desc_read_stat(scp->sca, desc);
1498
1499 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1500 scp->sp_port, scp->sp_rxstart, rxstat));
1501
1502 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1503 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1504
1505 /*
1506 * check for errors
1507 */
1508 if (rxstat & SCA_DESC_ERRORS) {
1509 /*
1510 * consider an error condition the end
1511 * of a frame
1512 */
1513 scp->sp_if.if_ierrors++;
1514 toolong = 0;
1515 continue;
1516 }
1517
1518 /*
1519 * if we aren't skipping overlong frames
1520 * we are done, otherwise reset and look for
1521 * another good frame
1522 */
1523 if (rxstat & SCA_DESC_EOM) {
1524 if (!toolong)
1525 return (1);
1526 toolong = 0;
1527 } else if (!toolong) {
1528 /*
1529 * we currently don't deal with frames
1530 * larger than a single buffer (fixed MTU)
1531 */
1532 scp->sp_if.if_ierrors++;
1533 toolong = 1;
1534 }
1535 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1536 scp->sp_rxstart));
1537 }
1538
1539 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1540 return 0;
1541 }
1542
1543 /*
1544 * Pass the packet up to the kernel if it is a packet we want to pay
1545 * attention to.
1546 *
1547 * MUST BE CALLED AT splnet()
1548 */
1549 static void
1550 sca_frame_process(sca_port_t *scp)
1551 {
1552 struct ifqueue *ifq;
1553 struct hdlc_header *hdlc;
1554 struct cisco_pkt *cisco;
1555 sca_desc_t *desc;
1556 struct mbuf *m;
1557 u_int8_t *bufp;
1558 u_int16_t len;
1559 u_int32_t t;
1560
1561 t = time_uptime * 1000;
1562 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1563 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1564 len = sca_desc_read_buflen(scp->sca, desc);
1565
1566 SCA_DPRINTF(SCA_DEBUG_RX,
1567 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1568 (bus_addr_t)bufp, len));
1569
1570 #if SCA_DEBUG_LEVEL > 0
1571 if (sca_debug & SCA_DEBUG_RXPKT)
1572 sca_frame_print(scp, desc, bufp);
1573 #endif
1574 /*
1575 * skip packets that are too short
1576 */
1577 if (len < sizeof(struct hdlc_header)) {
1578 scp->sp_if.if_ierrors++;
1579 return;
1580 }
1581
1582 m = sca_mbuf_alloc(scp->sca, bufp, len);
1583 if (m == NULL) {
1584 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1585 return;
1586 }
1587
1588 /*
1589 * read and then strip off the HDLC information
1590 */
1591 m = m_pullup(m, sizeof(struct hdlc_header));
1592 if (m == NULL) {
1593 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1594 return;
1595 }
1596
1597 bpf_mtap(&scp->sp_if, m);
1598
1599 scp->sp_if.if_ipackets++;
1600
1601 hdlc = mtod(m, struct hdlc_header *);
1602 switch (ntohs(hdlc->h_proto)) {
1603 #ifdef INET
1604 case HDLC_PROTOCOL_IP:
1605 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1606 m->m_pkthdr.rcvif = &scp->sp_if;
1607 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1608 m->m_data += sizeof(struct hdlc_header);
1609 m->m_len -= sizeof(struct hdlc_header);
1610 ifq = &ipintrq;
1611 schednetisr(NETISR_IP);
1612 break;
1613 #endif /* INET */
1614 #ifdef INET6
1615 case HDLC_PROTOCOL_IPV6:
1616 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1617 m->m_pkthdr.rcvif = &scp->sp_if;
1618 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1619 m->m_data += sizeof(struct hdlc_header);
1620 m->m_len -= sizeof(struct hdlc_header);
1621 ifq = &ip6intrq;
1622 schednetisr(NETISR_IPV6);
1623 break;
1624 #endif /* INET6 */
1625 #ifdef ISO
1626 case HDLC_PROTOCOL_ISO:
1627 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1628 goto dropit;
1629 m->m_pkthdr.rcvif = &scp->sp_if;
1630 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1631 m->m_data += sizeof(struct hdlc_llc_header);
1632 m->m_len -= sizeof(struct hdlc_llc_header);
1633 ifq = &clnlintrq;
1634 schednetisr(NETISR_ISO);
1635 break;
1636 #endif /* ISO */
1637 case CISCO_KEEPALIVE:
1638 SCA_DPRINTF(SCA_DEBUG_CISCO,
1639 ("Received CISCO keepalive packet\n"));
1640
1641 if (len < CISCO_PKT_LEN) {
1642 SCA_DPRINTF(SCA_DEBUG_CISCO,
1643 ("short CISCO packet %d, wanted %d\n",
1644 len, CISCO_PKT_LEN));
1645 scp->sp_if.if_ierrors++;
1646 goto dropit;
1647 }
1648
1649 m = m_pullup(m, sizeof(struct cisco_pkt));
1650 if (m == NULL) {
1651 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1652 return;
1653 }
1654
1655 cisco = (struct cisco_pkt *)
1656 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1657 m->m_pkthdr.rcvif = &scp->sp_if;
1658
1659 switch (ntohl(cisco->type)) {
1660 case CISCO_ADDR_REQ:
1661 printf("Got CISCO addr_req, ignoring\n");
1662 scp->sp_if.if_ierrors++;
1663 goto dropit;
1664
1665 case CISCO_ADDR_REPLY:
1666 printf("Got CISCO addr_reply, ignoring\n");
1667 scp->sp_if.if_ierrors++;
1668 goto dropit;
1669
1670 case CISCO_KEEPALIVE_REQ:
1671
1672 SCA_DPRINTF(SCA_DEBUG_CISCO,
1673 ("Received KA, mseq %d,"
1674 " yseq %d, rel 0x%04x, t0"
1675 " %04x, t1 %04x\n",
1676 ntohl(cisco->par1), ntohl(cisco->par2),
1677 ntohs(cisco->rel), ntohs(cisco->time0),
1678 ntohs(cisco->time1)));
1679
1680 scp->cka_lastrx = ntohl(cisco->par1);
1681 scp->cka_lasttx++;
1682
1683 /*
1684 * schedule the transmit right here.
1685 */
1686 cisco->par2 = cisco->par1;
1687 cisco->par1 = htonl(scp->cka_lasttx);
1688 cisco->time0 = htons((u_int16_t)(t >> 16));
1689 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1690
1691 ifq = &scp->linkq;
1692 if (IF_QFULL(ifq)) {
1693 IF_DROP(ifq);
1694 goto dropit;
1695 }
1696 IF_ENQUEUE(ifq, m);
1697
1698 sca_start(&scp->sp_if);
1699
1700 /* since start may have reset this fix */
1701 if (!scp->sca->sc_usedma) {
1702 scp->sca->scu_set_page(scp->sca,
1703 scp->sp_rxdesc_p);
1704 scp->sca->scu_page_on(scp->sca);
1705 }
1706 return;
1707 default:
1708 SCA_DPRINTF(SCA_DEBUG_CISCO,
1709 ("Unknown CISCO keepalive protocol 0x%04x\n",
1710 ntohl(cisco->type)));
1711
1712 scp->sp_if.if_noproto++;
1713 goto dropit;
1714 }
1715 return;
1716 default:
1717 SCA_DPRINTF(SCA_DEBUG_RX,
1718 ("Unknown/unexpected ethertype 0x%04x\n",
1719 ntohs(hdlc->h_proto)));
1720 scp->sp_if.if_noproto++;
1721 goto dropit;
1722 }
1723
1724 /* queue the packet */
1725 if (!IF_QFULL(ifq)) {
1726 IF_ENQUEUE(ifq, m);
1727 } else {
1728 IF_DROP(ifq);
1729 scp->sp_if.if_iqdrops++;
1730 goto dropit;
1731 }
1732 return;
1733 dropit:
1734 if (m)
1735 m_freem(m);
1736 return;
1737 }
1738
1739 #if SCA_DEBUG_LEVEL > 0
1740 /*
1741 * do a hex dump of the packet received into descriptor "desc" with
1742 * data buffer "p"
1743 */
1744 static void
1745 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1746 {
1747 int i;
1748 int nothing_yet = 1;
1749 struct sca_softc *sc;
1750 u_int len;
1751
1752 sc = scp->sca;
1753 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1754 desc,
1755 sca_desc_read_chainp(sc, desc),
1756 sca_desc_read_bufp(sc, desc),
1757 sca_desc_read_stat(sc, desc),
1758 (len = sca_desc_read_buflen(sc, desc)));
1759
1760 for (i = 0 ; i < len && i < 256; i++) {
1761 if (nothing_yet == 1 &&
1762 (sc->sc_usedma ? *p
1763 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1764 sca_page_addr(sc, p))) == 0) {
1765 p++;
1766 continue;
1767 }
1768 nothing_yet = 0;
1769 if (i % 16 == 0)
1770 printf("\n");
1771 printf("%02x ",
1772 (sc->sc_usedma ? *p
1773 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1774 sca_page_addr(sc, p))));
1775 p++;
1776 }
1777
1778 if (i % 16 != 1)
1779 printf("\n");
1780 }
1781 #endif
1782
1783 /*
1784 * adjust things because we have just read the current starting
1785 * frame
1786 *
1787 * must be called at splnet()
1788 */
1789 static void
1790 sca_frame_read_done(sca_port_t *scp)
1791 {
1792 u_int16_t edesc_p;
1793
1794 /* update where our indicies are */
1795 scp->sp_rxend = scp->sp_rxstart;
1796 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1797
1798 /* update the error [end] descriptor */
1799 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1800 (sizeof(sca_desc_t) * scp->sp_rxend);
1801 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1802 }
1803
1804 /*
1805 * set a port to the "up" state
1806 */
1807 static void
1808 sca_port_up(sca_port_t *scp)
1809 {
1810 struct sca_softc *sc = scp->sca;
1811 struct timeval now;
1812 #if 0
1813 u_int8_t ier0, ier1;
1814 #endif
1815
1816 /*
1817 * reset things
1818 */
1819 #if 0
1820 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1821 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1822 #endif
1823 /*
1824 * clear in-use flag
1825 */
1826 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1827 scp->sp_if.if_flags |= IFF_RUNNING;
1828
1829 /*
1830 * raise DTR
1831 */
1832 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1833
1834 /*
1835 * raise RTS
1836 */
1837 msci_write_1(scp, SCA_CTL0,
1838 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1839 | SCA_CTL_RTS_HIGH);
1840
1841 #if 0
1842 /*
1843 * enable interrupts (no timer IER2)
1844 */
1845 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1846 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1847 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1848 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1849 if (scp->sp_port == 1) {
1850 ier0 <<= 4;
1851 ier1 <<= 4;
1852 }
1853 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1854 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1855 #else
1856 if (scp->sp_port == 0) {
1857 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1858 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1859 } else {
1860 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1861 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1862 }
1863 #endif
1864
1865 /*
1866 * enable transmit and receive
1867 */
1868 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1869 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1870
1871 /*
1872 * reset internal state
1873 */
1874 scp->sp_txinuse = 0;
1875 scp->sp_txcur = 0;
1876 getmicrotime(&now);
1877 scp->cka_lasttx = now.tv_usec;
1878 scp->cka_lastrx = 0;
1879 }
1880
1881 /*
1882 * set a port to the "down" state
1883 */
1884 static void
1885 sca_port_down(sca_port_t *scp)
1886 {
1887 struct sca_softc *sc = scp->sca;
1888 #if 0
1889 u_int8_t ier0, ier1;
1890 #endif
1891
1892 /*
1893 * lower DTR
1894 */
1895 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1896
1897 /*
1898 * lower RTS
1899 */
1900 msci_write_1(scp, SCA_CTL0,
1901 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1902 | SCA_CTL_RTS_LOW);
1903
1904 /*
1905 * disable interrupts
1906 */
1907 #if 0
1908 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1909 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1910 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1911 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1912 if (scp->sp_port == 1) {
1913 ier0 <<= 4;
1914 ier1 <<= 4;
1915 }
1916 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1917 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1918 #else
1919 if (scp->sp_port == 0) {
1920 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1921 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1922 } else {
1923 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1924 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1925 }
1926 #endif
1927
1928 /*
1929 * disable transmit and receive
1930 */
1931 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1932 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1933
1934 /*
1935 * no, we're not in use anymore
1936 */
1937 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1938 }
1939
1940 /*
1941 * disable all DMA and interrupts for all ports at once.
1942 */
1943 void
1944 sca_shutdown(struct sca_softc *sca)
1945 {
1946 /*
1947 * disable DMA and interrupts
1948 */
1949 sca_write_1(sca, SCA_DMER, 0);
1950 sca_write_1(sca, SCA_IER0, 0);
1951 sca_write_1(sca, SCA_IER1, 0);
1952 }
1953
1954 /*
1955 * If there are packets to transmit, start the transmit DMA logic.
1956 */
1957 static void
1958 sca_port_starttx(sca_port_t *scp)
1959 {
1960 u_int32_t startdesc_p, enddesc_p;
1961 int enddesc;
1962
1963 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1964
1965 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1966 || scp->sp_txinuse == 0)
1967 return;
1968
1969 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1970
1971 scp->sp_if.if_flags |= IFF_OACTIVE;
1972
1973 /*
1974 * We have something to do, since we have at least one packet
1975 * waiting, and we are not already marked as active.
1976 */
1977 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1978 startdesc_p = scp->sp_txdesc_p;
1979 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1980
1981 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1982 startdesc_p, enddesc_p));
1983
1984 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1985 dmac_write_2(scp, SCA_CDAL1,
1986 (u_int16_t)(startdesc_p & 0x0000ffff));
1987
1988 /*
1989 * enable the DMA
1990 */
1991 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1992 }
1993
1994 /*
1995 * allocate an mbuf at least long enough to hold "len" bytes.
1996 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1997 * otherwise let the caller handle copying the data in.
1998 */
1999 static struct mbuf *
2000 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
2001 {
2002 struct mbuf *m;
2003
2004 /*
2005 * allocate an mbuf and copy the important bits of data
2006 * into it. If the packet won't fit in the header,
2007 * allocate a cluster for it and store it there.
2008 */
2009 MGETHDR(m, M_DONTWAIT, MT_DATA);
2010 if (m == NULL)
2011 return NULL;
2012 if (len > MHLEN) {
2013 if (len > MCLBYTES) {
2014 m_freem(m);
2015 return NULL;
2016 }
2017 MCLGET(m, M_DONTWAIT);
2018 if ((m->m_flags & M_EXT) == 0) {
2019 m_freem(m);
2020 return NULL;
2021 }
2022 }
2023 if (p != NULL) {
2024 /* XXX do we need to sync here? */
2025 if (sc->sc_usedma)
2026 memcpy(mtod(m, void *), p, len);
2027 else
2028 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2029 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2030 }
2031 m->m_len = len;
2032 m->m_pkthdr.len = len;
2033
2034 return (m);
2035 }
2036
2037 /*
2038 * get the base clock
2039 */
2040 void
2041 sca_get_base_clock(struct sca_softc *sc)
2042 {
2043 struct timeval btv, ctv, dtv;
2044 u_int64_t bcnt;
2045 u_int32_t cnt;
2046 u_int16_t subcnt;
2047
2048 /* disable the timer, set prescale to 0 */
2049 sca_write_1(sc, SCA_TCSR0, 0);
2050 sca_write_1(sc, SCA_TEPR0, 0);
2051
2052 /* reset the counter */
2053 (void)sca_read_1(sc, SCA_TCSR0);
2054 subcnt = sca_read_2(sc, SCA_TCNTL0);
2055
2056 /* count to max */
2057 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2058
2059 cnt = 0;
2060 microtime(&btv);
2061 /* start the timer -- no interrupt enable */
2062 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2063 for (;;) {
2064 microtime(&ctv);
2065
2066 /* end around 3/4 of a second */
2067 timersub(&ctv, &btv, &dtv);
2068 if (dtv.tv_usec >= 750000)
2069 break;
2070
2071 /* spin */
2072 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2073 ;
2074 /* reset the timer */
2075 (void)sca_read_2(sc, SCA_TCNTL0);
2076 cnt++;
2077 }
2078
2079 /* stop the timer */
2080 sca_write_1(sc, SCA_TCSR0, 0);
2081
2082 subcnt = sca_read_2(sc, SCA_TCNTL0);
2083 /* add the slop in and get the total timer ticks */
2084 cnt = (cnt << 16) | subcnt;
2085
2086 /* cnt is 1/8 the actual time */
2087 bcnt = cnt * 8;
2088 /* make it proportional to 3/4 of a second */
2089 bcnt *= (u_int64_t)750000;
2090 bcnt /= (u_int64_t)dtv.tv_usec;
2091 cnt = bcnt;
2092
2093 /* make it Hz */
2094 cnt *= 4;
2095 cnt /= 3;
2096
2097 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2098 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2099
2100 /*
2101 * round to the nearest 200 -- this allows for +-3 ticks error
2102 */
2103 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2104 }
2105
2106 /*
2107 * print the information about the clock on the ports
2108 */
2109 void
2110 sca_print_clock_info(struct sca_softc *sc)
2111 {
2112 struct sca_port *scp;
2113 u_int32_t mhz, div;
2114 int i;
2115
2116 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2117 sc->sc_baseclock);
2118
2119 /* print the information about the port clock selection */
2120 for (i = 0; i < sc->sc_numports; i++) {
2121 scp = &sc->sc_ports[i];
2122 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2123 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2124
2125 printf("%s: rx clock: ", scp->sp_if.if_xname);
2126 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2127 case SCA_RXS_CLK_LINE:
2128 printf("line");
2129 break;
2130 case SCA_RXS_CLK_LINE_SN:
2131 printf("line with noise suppression");
2132 break;
2133 case SCA_RXS_CLK_INTERNAL:
2134 printf("internal %d Hz", (mhz >> div));
2135 break;
2136 case SCA_RXS_CLK_ADPLL_OUT:
2137 printf("adpll using internal %d Hz", (mhz >> div));
2138 break;
2139 case SCA_RXS_CLK_ADPLL_IN:
2140 printf("adpll using line clock");
2141 break;
2142 }
2143 printf(" tx clock: ");
2144 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2145 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2146 case SCA_TXS_CLK_LINE:
2147 printf("line\n");
2148 break;
2149 case SCA_TXS_CLK_INTERNAL:
2150 printf("internal %d Hz\n", (mhz >> div));
2151 break;
2152 case SCA_TXS_CLK_RXCLK:
2153 printf("rxclock\n");
2154 break;
2155 }
2156 if (scp->sp_eclock)
2157 printf("%s: outputting line clock\n",
2158 scp->sp_if.if_xname);
2159 }
2160 }
2161
2162