hd64570.c revision 1.15 1 /* $NetBSD: hd64570.c,v 1.15 2001/03/01 00:40:41 itojun Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using dma. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include "bpfilter.h"
68 #include "opt_inet.h"
69 #include "opt_iso.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/device.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/sockio.h>
77 #include <sys/kernel.h>
78
79 #include <net/if.h>
80 #include <net/if_types.h>
81 #include <net/netisr.h>
82
83 #if defined(INET) || defined(INET6)
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #endif
91 #endif
92
93 #ifdef ISO
94 #include <net/if_llc.h>
95 #include <netiso/iso.h>
96 #include <netiso/iso_var.h>
97 #endif
98
99 #if NBPFILTER > 0
100 #include <net/bpf.h>
101 #endif
102
103 #include <machine/cpu.h>
104 #include <machine/bus.h>
105 #include <machine/intr.h>
106
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcidevs.h>
110
111 #include <dev/ic/hd64570reg.h>
112 #include <dev/ic/hd64570var.h>
113
114 #define SCA_DEBUG_RX 0x0001
115 #define SCA_DEBUG_TX 0x0002
116 #define SCA_DEBUG_CISCO 0x0004
117 #define SCA_DEBUG_DMA 0x0008
118 #define SCA_DEBUG_RXPKT 0x0010
119 #define SCA_DEBUG_TXPKT 0x0020
120 #define SCA_DEBUG_INTR 0x0040
121 #define SCA_DEBUG_CLOCK 0x0080
122
123 #if 0
124 #define SCA_DEBUG_LEVEL ( 0xFFFF )
125 #else
126 #define SCA_DEBUG_LEVEL 0
127 #endif
128
129 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
130
131 #if SCA_DEBUG_LEVEL > 0
132 #define SCA_DPRINTF(l, x) do { \
133 if ((l) & sca_debug) \
134 printf x;\
135 } while (0)
136 #else
137 #define SCA_DPRINTF(l, x)
138 #endif
139
140 #if 0
141 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
142 #endif
143
144 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
145 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
146
147 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
149 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
150 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
151
152 static void sca_msci_init(struct sca_softc *, sca_port_t *);
153 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
154 static void sca_dmac_rxinit(sca_port_t *);
155
156 static int sca_dmac_intr(sca_port_t *, u_int8_t);
157 static int sca_msci_intr(sca_port_t *, u_int8_t);
158
159 static void sca_get_packets(sca_port_t *);
160 static int sca_frame_avail(sca_port_t *);
161 static void sca_frame_process(sca_port_t *);
162 static void sca_frame_read_done(sca_port_t *);
163
164 static void sca_port_starttx(sca_port_t *);
165
166 static void sca_port_up(sca_port_t *);
167 static void sca_port_down(sca_port_t *);
168
169 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
170 struct rtentry *));
171 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
172 static void sca_start __P((struct ifnet *));
173 static void sca_watchdog __P((struct ifnet *));
174
175 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
176
177 #if SCA_DEBUG_LEVEL > 0
178 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
179 #endif
180
181
182 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
183 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
184 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
185 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
186
187 #define sca_page_addr(sc, addr) ((bus_addr_t)(addr) & (sc)->scu_pagemask)
188
189 static inline void
190 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
191 {
192 sca_write_1(scp->sca, scp->msci_off + reg, val);
193 }
194
195 static inline u_int8_t
196 msci_read_1(sca_port_t *scp, u_int reg)
197 {
198 return sca_read_1(scp->sca, scp->msci_off + reg);
199 }
200
201 static inline void
202 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
203 {
204 sca_write_1(scp->sca, scp->dmac_off + reg, val);
205 }
206
207 static inline void
208 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
209 {
210 sca_write_2(scp->sca, scp->dmac_off + reg, val);
211 }
212
213 static inline u_int8_t
214 dmac_read_1(sca_port_t *scp, u_int reg)
215 {
216 return sca_read_1(scp->sca, scp->dmac_off + reg);
217 }
218
219 static inline u_int16_t
220 dmac_read_2(sca_port_t *scp, u_int reg)
221 {
222 return sca_read_2(scp->sca, scp->dmac_off + reg);
223 }
224
225 /*
226 * read the chain pointer
227 */
228 static inline u_int16_t
229 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
230 {
231 if (sc->sc_usedma)
232 return ((dp)->sd_chainp);
233 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
234 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
235 }
236
237 /*
238 * write the chain pointer
239 */
240 static inline void
241 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
242 {
243 if (sc->sc_usedma)
244 (dp)->sd_chainp = cp;
245 else
246 bus_space_write_2(sc->scu_memt, sc->scu_memh,
247 sca_page_addr(sc, dp)
248 + offsetof(struct sca_desc, sd_chainp), cp);
249 }
250
251 /*
252 * read the buffer pointer
253 */
254 static inline u_int32_t
255 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
256 {
257 u_int32_t address;
258
259 if (sc->sc_usedma)
260 address = dp->sd_bufp | dp->sd_hbufp << 16;
261 else {
262 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
263 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
264 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
265 sca_page_addr(sc, dp)
266 + offsetof(struct sca_desc, sd_hbufp)) << 16;
267 }
268 return (address);
269 }
270
271 /*
272 * write the buffer pointer
273 */
274 static inline void
275 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
276 {
277 if (sc->sc_usedma) {
278 dp->sd_bufp = bufp & 0xFFFF;
279 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
280 } else {
281 bus_space_write_2(sc->scu_memt, sc->scu_memh,
282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
283 bufp & 0xFFFF);
284 bus_space_write_1(sc->scu_memt, sc->scu_memh,
285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
286 (bufp & 0x00FF0000) >> 16);
287 }
288 }
289
290 /*
291 * read the buffer length
292 */
293 static inline u_int16_t
294 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
295 {
296 if (sc->sc_usedma)
297 return ((dp)->sd_buflen);
298 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
299 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
300 }
301
302 /*
303 * write the buffer length
304 */
305 static inline void
306 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
307 {
308 if (sc->sc_usedma)
309 (dp)->sd_buflen = len;
310 else
311 bus_space_write_2(sc->scu_memt, sc->scu_memh,
312 sca_page_addr(sc, dp)
313 + offsetof(struct sca_desc, sd_buflen), len);
314 }
315
316 /*
317 * read the descriptor status
318 */
319 static inline u_int8_t
320 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
321 {
322 if (sc->sc_usedma)
323 return ((dp)->sd_stat);
324 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
325 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
326 }
327
328 /*
329 * write the descriptor status
330 */
331 static inline void
332 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
333 {
334 if (sc->sc_usedma)
335 (dp)->sd_stat = stat;
336 else
337 bus_space_write_1(sc->scu_memt, sc->scu_memh,
338 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
339 stat);
340 }
341
342 void
343 sca_init(struct sca_softc *sc)
344 {
345 /*
346 * Do a little sanity check: check number of ports.
347 */
348 if (sc->sc_numports < 1 || sc->sc_numports > 2)
349 panic("sca can\'t handle more than 2 or less than 1 ports");
350
351 /*
352 * disable DMA and MSCI interrupts
353 */
354 sca_write_1(sc, SCA_DMER, 0);
355 sca_write_1(sc, SCA_IER0, 0);
356 sca_write_1(sc, SCA_IER1, 0);
357 sca_write_1(sc, SCA_IER2, 0);
358
359 /*
360 * configure interrupt system
361 */
362 sca_write_1(sc, SCA_ITCR,
363 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
364 #if 0
365 /* these are for the intrerrupt ack cycle which we don't use */
366 sca_write_1(sc, SCA_IVR, 0x40);
367 sca_write_1(sc, SCA_IMVR, 0x40);
368 #endif
369
370 /*
371 * set wait control register to zero wait states
372 */
373 sca_write_1(sc, SCA_PABR0, 0);
374 sca_write_1(sc, SCA_PABR1, 0);
375 sca_write_1(sc, SCA_WCRL, 0);
376 sca_write_1(sc, SCA_WCRM, 0);
377 sca_write_1(sc, SCA_WCRH, 0);
378
379 /*
380 * disable DMA and reset status
381 */
382 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
383
384 /*
385 * disable transmit DMA for all channels
386 */
387 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
388 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
389 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
390 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
391 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
392 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
393 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
394 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
395
396 /*
397 * enable DMA based on channel enable flags for each channel
398 */
399 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
400
401 /*
402 * Should check to see if the chip is responding, but for now
403 * assume it is.
404 */
405 }
406
407 /*
408 * initialize the port and attach it to the networking layer
409 */
410 void
411 sca_port_attach(struct sca_softc *sc, u_int port)
412 {
413 sca_port_t *scp = &sc->sc_ports[port];
414 struct ifnet *ifp;
415 static u_int ntwo_unit = 0;
416
417 scp->sca = sc; /* point back to the parent */
418
419 scp->sp_port = port;
420
421 if (port == 0) {
422 scp->msci_off = SCA_MSCI_OFF_0;
423 scp->dmac_off = SCA_DMAC_OFF_0;
424 if(sc->sc_parent != NULL)
425 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
426 else
427 ntwo_unit = 0; /* XXX */
428 } else {
429 scp->msci_off = SCA_MSCI_OFF_1;
430 scp->dmac_off = SCA_DMAC_OFF_1;
431 if(sc->sc_parent != NULL)
432 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
433 else
434 ntwo_unit = 1; /* XXX */
435 }
436
437 sca_msci_init(sc, scp);
438 sca_dmac_init(sc, scp);
439
440 /*
441 * attach to the network layer
442 */
443 ifp = &scp->sp_if;
444 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
445 ifp->if_softc = scp;
446 ifp->if_mtu = SCA_MTU;
447 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
448 ifp->if_type = IFT_PTPSERIAL;
449 ifp->if_hdrlen = HDLC_HDRLEN;
450 ifp->if_ioctl = sca_ioctl;
451 ifp->if_output = sca_output;
452 ifp->if_watchdog = sca_watchdog;
453 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
454 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
455 #ifdef SCA_USE_FASTQ
456 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
457 #endif
458 if_attach(ifp);
459 if_alloc_sadl(ifp);
460
461 #if NBPFILTER > 0
462 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
463 #endif
464
465 if (sc->sc_parent == NULL)
466 printf("%s: port %d\n", ifp->if_xname, port);
467 else
468 printf("%s at %s port %d\n",
469 ifp->if_xname, sc->sc_parent->dv_xname, port);
470
471 /*
472 * reset the last seen times on the cisco keepalive protocol
473 */
474 scp->cka_lasttx = time.tv_usec;
475 scp->cka_lastrx = 0;
476 }
477
478 #if 0
479 /*
480 * returns log2(div), sets 'tmc' for the required freq 'hz'
481 */
482 static u_int8_t
483 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
484 {
485 u_int32_t tmc, div;
486 u_int32_t clock;
487
488 /* clock hz = (chipclock / tmc) / 2^(div); */
489 /*
490 * TD == tmc * 2^(n)
491 *
492 * note:
493 * 1 <= TD <= 256 TD is inc of 1
494 * 2 <= TD <= 512 TD is inc of 2
495 * 4 <= TD <= 1024 TD is inc of 4
496 * ...
497 * 512 <= TD <= 256*512 TD is inc of 512
498 *
499 * so note there are overlaps. We lose prec
500 * as div increases so we wish to minize div.
501 *
502 * basically we want to do
503 *
504 * tmc = chip / hz, but have tmc <= 256
505 */
506
507 /* assume system clock is 9.8304Mhz or 9830400hz */
508 clock = clock = 9830400 >> 1;
509
510 /* round down */
511 div = 0;
512 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
513 clock >>= 1;
514 div++;
515 }
516 if (clock / tmc > hz)
517 tmc++;
518 if (!tmc)
519 tmc = 1;
520
521 if (div > SCA_RXS_DIV_512) {
522 /* set to maximums */
523 div = SCA_RXS_DIV_512;
524 tmc = 0;
525 }
526
527 *tmcp = (tmc & 0xFF); /* 0 == 256 */
528 return (div & 0xFF);
529 }
530 #endif
531
532 /*
533 * initialize the port's MSCI
534 */
535 static void
536 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
537 {
538 /* reset the channel */
539 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
540
541 msci_write_1(scp, SCA_MD00,
542 ( SCA_MD0_CRC_1
543 | SCA_MD0_CRC_CCITT
544 | SCA_MD0_CRC_ENABLE
545 | SCA_MD0_MODE_HDLC));
546 #if 0
547 /* immediately send receive reset so the above takes */
548 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
549 #endif
550
551 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
552 msci_write_1(scp, SCA_MD20,
553 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
554
555 /* be safe and do it again */
556 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
557
558 /* setup underrun and idle control, and initial RTS state */
559 msci_write_1(scp, SCA_CTL0,
560 (SCA_CTL_IDLC_PATTERN
561 | SCA_CTL_UDRNC_AFTER_FCS
562 | SCA_CTL_RTS_LOW));
563
564 /* reset the transmitter */
565 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
566
567 /*
568 * set the clock sources
569 */
570 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
571 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
572 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
573
574 /* set external clock generate as requested */
575 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
576
577 /*
578 * XXX don't pay attention to CTS or CD changes right now. I can't
579 * simulate one, and the transmitter will try to transmit even if
580 * CD isn't there anyway, so nothing bad SHOULD happen.
581 */
582 #if 0
583 msci_write_1(scp, SCA_IE00, 0);
584 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
585 #else
586 /* this would deliver transmitter underrun to ST1/ISR1 */
587 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
588 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
589 #endif
590 msci_write_1(scp, SCA_IE20, 0);
591
592 msci_write_1(scp, SCA_FIE0, 0);
593
594 msci_write_1(scp, SCA_SA00, 0);
595 msci_write_1(scp, SCA_SA10, 0);
596
597 msci_write_1(scp, SCA_IDL0, 0x7e);
598
599 msci_write_1(scp, SCA_RRC0, 0x0e);
600 /* msci_write_1(scp, SCA_TRC00, 0x10); */
601 /*
602 * the correct values here are important for avoiding underruns
603 * for any value less than or equal to TRC0 txrdy is activated
604 * which will start the dmac transfer to the fifo.
605 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma.
606 *
607 * thus if we are using a very fast clock that empties the fifo
608 * quickly, delays in the dmac starting to fill the fifo can
609 * lead to underruns so we want a fairly full fifo to still
610 * cause the dmac to start. for cards with on board ram this
611 * has no effect on system performance. For cards that dma
612 * to/from system memory it will cause more, shorter,
613 * bus accesses rather than fewer longer ones.
614 */
615 msci_write_1(scp, SCA_TRC00, 0x00);
616 msci_write_1(scp, SCA_TRC10, 0x1f);
617 }
618
619 /*
620 * Take the memory for the port and construct two circular linked lists of
621 * descriptors (one tx, one rx) and set the pointers in these descriptors
622 * to point to the buffer space for this port.
623 */
624 static void
625 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
626 {
627 sca_desc_t *desc;
628 u_int32_t desc_p;
629 u_int32_t buf_p;
630 int i;
631
632 if (sc->sc_usedma)
633 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
634 BUS_DMASYNC_PREWRITE);
635 else {
636 /*
637 * XXX assumes that all tx desc and bufs in same page
638 */
639 sc->scu_page_on(sc);
640 sc->scu_set_page(sc, scp->sp_txdesc_p);
641 }
642
643 desc = scp->sp_txdesc;
644 desc_p = scp->sp_txdesc_p;
645 buf_p = scp->sp_txbuf_p;
646 scp->sp_txcur = 0;
647 scp->sp_txinuse = 0;
648
649 #ifdef DEBUG
650 /* make sure that we won't wrap */
651 if ((desc_p & 0xffff0000) !=
652 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
653 panic("sca: tx descriptors cross architecural boundry");
654 if ((buf_p & 0xff000000) !=
655 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
656 panic("sca: tx buffers cross architecural boundry");
657 #endif
658
659 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
660 /*
661 * desc_p points to the physcial address of the NEXT desc
662 */
663 desc_p += sizeof(sca_desc_t);
664
665 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
666 sca_desc_write_bufp(sc, desc, buf_p);
667 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
668 sca_desc_write_stat(sc, desc, 0);
669
670 desc++; /* point to the next descriptor */
671 buf_p += SCA_BSIZE;
672 }
673
674 /*
675 * "heal" the circular list by making the last entry point to the
676 * first.
677 */
678 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
679
680 /*
681 * Now, initialize the transmit DMA logic
682 *
683 * CPB == chain pointer base address
684 */
685 dmac_write_1(scp, SCA_DSR1, 0);
686 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
687 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
688 /* XXX1
689 dmac_write_1(scp, SCA_DIR1,
690 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
691 */
692 dmac_write_1(scp, SCA_DIR1,
693 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
694 dmac_write_1(scp, SCA_CPB1,
695 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
696
697 /*
698 * now, do the same thing for receive descriptors
699 *
700 * XXX assumes that all rx desc and bufs in same page
701 */
702 if (!sc->sc_usedma)
703 sc->scu_set_page(sc, scp->sp_rxdesc_p);
704
705 desc = scp->sp_rxdesc;
706 desc_p = scp->sp_rxdesc_p;
707 buf_p = scp->sp_rxbuf_p;
708
709 #ifdef DEBUG
710 /* make sure that we won't wrap */
711 if ((desc_p & 0xffff0000) !=
712 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
713 panic("sca: rx descriptors cross architecural boundry");
714 if ((buf_p & 0xff000000) !=
715 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
716 panic("sca: rx buffers cross architecural boundry");
717 #endif
718
719 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
720 /*
721 * desc_p points to the physcial address of the NEXT desc
722 */
723 desc_p += sizeof(sca_desc_t);
724
725 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
726 sca_desc_write_bufp(sc, desc, buf_p);
727 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
728 sca_desc_write_buflen(sc, desc, 0);
729 sca_desc_write_stat(sc, desc, 0);
730
731 desc++; /* point to the next descriptor */
732 buf_p += SCA_BSIZE;
733 }
734
735 /*
736 * "heal" the circular list by making the last entry point to the
737 * first.
738 */
739 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
740
741 sca_dmac_rxinit(scp);
742
743 if (sc->sc_usedma)
744 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
745 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
746 else
747 sc->scu_page_off(sc);
748 }
749
750 /*
751 * reset and reinitialize the receive DMA logic
752 */
753 static void
754 sca_dmac_rxinit(sca_port_t *scp)
755 {
756 /*
757 * ... and the receive DMA logic ...
758 */
759 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
760 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
761
762 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
763 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
764
765 /* reset descriptors to initial state */
766 scp->sp_rxstart = 0;
767 scp->sp_rxend = scp->sp_nrxdesc - 1;
768
769 /*
770 * CPB == chain pointer base
771 * CDA == current descriptor address
772 * EDA == error descriptor address (overwrite position)
773 * because cda can't be eda when starting we always
774 * have a single buffer gap between cda and eda
775 */
776 dmac_write_1(scp, SCA_CPB0,
777 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
778 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
779 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
780 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
781
782 /*
783 * enable receiver DMA
784 */
785 dmac_write_1(scp, SCA_DIR0,
786 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
787 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
788 }
789
790 /*
791 * Queue the packet for our start routine to transmit
792 */
793 static int
794 sca_output(ifp, m, dst, rt0)
795 struct ifnet *ifp;
796 struct mbuf *m;
797 struct sockaddr *dst;
798 struct rtentry *rt0;
799 {
800 #ifdef ISO
801 struct hdlc_llc_header *llc;
802 #endif
803 struct hdlc_header *hdlc;
804 struct ifqueue *ifq = NULL;
805 int s, error, len;
806 short mflags;
807 ALTQ_DECL(struct altq_pktattr pktattr;)
808
809 error = 0;
810 ifp->if_lastchange = time;
811
812 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
813 error = ENETDOWN;
814 goto bad;
815 }
816
817 /*
818 * If the queueing discipline needs packet classification,
819 * do it before prepending link headers.
820 */
821 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
822
823 /*
824 * determine address family, and priority for this packet
825 */
826 switch (dst->sa_family) {
827 #ifdef INET
828 case AF_INET:
829 #ifdef SCA_USE_FASTQ
830 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
831 == IPTOS_LOWDELAY)
832 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
833 #endif
834 /*
835 * Add cisco serial line header. If there is no
836 * space in the first mbuf, allocate another.
837 */
838 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
839 if (m == 0)
840 return (ENOBUFS);
841 hdlc = mtod(m, struct hdlc_header *);
842 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
843 break;
844 #endif
845 #ifdef INET6
846 case AF_INET6:
847 /*
848 * Add cisco serial line header. If there is no
849 * space in the first mbuf, allocate another.
850 */
851 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
852 if (m == 0)
853 return (ENOBUFS);
854 hdlc = mtod(m, struct hdlc_header *);
855 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
856 break;
857 #endif
858 #ifdef ISO
859 case AF_ISO:
860 /*
861 * Add cisco llc serial line header. If there is no
862 * space in the first mbuf, allocate another.
863 */
864 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
865 if (m == 0)
866 return (ENOBUFS);
867 hdlc = mtod(m, struct hdlc_header *);
868 llc = mtod(m, struct hdlc_llc_header *);
869 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
870 llc->hl_ffb = 0;
871 break;
872 #endif
873 default:
874 printf("%s: address family %d unsupported\n",
875 ifp->if_xname, dst->sa_family);
876 error = EAFNOSUPPORT;
877 goto bad;
878 }
879
880 /* finish */
881 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
882 hdlc->h_addr = CISCO_MULTICAST;
883 else
884 hdlc->h_addr = CISCO_UNICAST;
885 hdlc->h_resv = 0;
886
887 /*
888 * queue the packet. If interactive, use the fast queue.
889 */
890 mflags = m->m_flags;
891 len = m->m_pkthdr.len;
892 s = splnet();
893 if (ifq != NULL) {
894 if (IF_QFULL(ifq)) {
895 IF_DROP(ifq);
896 m_freem(m);
897 error = ENOBUFS;
898 } else
899 IF_ENQUEUE(ifq, m);
900 } else
901 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
902 if (error != 0) {
903 splx(s);
904 ifp->if_oerrors++;
905 ifp->if_collisions++;
906 return (error);
907 }
908 ifp->if_obytes += len;
909 ifp->if_lastchange = time;
910 if (mflags & M_MCAST)
911 ifp->if_omcasts++;
912
913 sca_start(ifp);
914 splx(s);
915
916 return (error);
917
918 bad:
919 if (m)
920 m_freem(m);
921 return (error);
922 }
923
924 static int
925 sca_ioctl(ifp, cmd, addr)
926 struct ifnet *ifp;
927 u_long cmd;
928 caddr_t addr;
929 {
930 struct ifreq *ifr;
931 struct ifaddr *ifa;
932 int error;
933 int s;
934
935 s = splnet();
936
937 ifr = (struct ifreq *)addr;
938 ifa = (struct ifaddr *)addr;
939 error = 0;
940
941 switch (cmd) {
942 case SIOCSIFADDR:
943 switch(ifa->ifa_addr->sa_family) {
944 #ifdef INET
945 case AF_INET:
946 #endif
947 #ifdef INET6
948 case AF_INET6:
949 #endif
950 #if defined(INET) || defined(INET6)
951 ifp->if_flags |= IFF_UP;
952 sca_port_up(ifp->if_softc);
953 break;
954 #endif
955 default:
956 error = EAFNOSUPPORT;
957 break;
958 }
959 break;
960
961 case SIOCSIFDSTADDR:
962 #ifdef INET
963 if (ifa->ifa_addr->sa_family == AF_INET)
964 break;
965 #endif
966 #ifdef INET6
967 if (ifa->ifa_addr->sa_family == AF_INET6)
968 break;
969 #endif
970 error = EAFNOSUPPORT;
971 break;
972
973 case SIOCADDMULTI:
974 case SIOCDELMULTI:
975 /* XXX need multicast group management code */
976 if (ifr == 0) {
977 error = EAFNOSUPPORT; /* XXX */
978 break;
979 }
980 switch (ifr->ifr_addr.sa_family) {
981 #ifdef INET
982 case AF_INET:
983 break;
984 #endif
985 #ifdef INET6
986 case AF_INET6:
987 break;
988 #endif
989 default:
990 error = EAFNOSUPPORT;
991 break;
992 }
993 break;
994
995 case SIOCSIFFLAGS:
996 if (ifr->ifr_flags & IFF_UP) {
997 ifp->if_flags |= IFF_UP;
998 sca_port_up(ifp->if_softc);
999 } else {
1000 ifp->if_flags &= ~IFF_UP;
1001 sca_port_down(ifp->if_softc);
1002 }
1003
1004 break;
1005
1006 default:
1007 error = EINVAL;
1008 }
1009
1010 splx(s);
1011 return error;
1012 }
1013
1014 /*
1015 * start packet transmission on the interface
1016 *
1017 * MUST BE CALLED AT splnet()
1018 */
1019 static void
1020 sca_start(ifp)
1021 struct ifnet *ifp;
1022 {
1023 sca_port_t *scp = ifp->if_softc;
1024 struct sca_softc *sc = scp->sca;
1025 struct mbuf *m, *mb_head;
1026 sca_desc_t *desc;
1027 u_int8_t *buf, stat;
1028 u_int32_t buf_p;
1029 int nexttx;
1030 int trigger_xmit;
1031 u_int len;
1032
1033 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1034
1035 /*
1036 * can't queue when we are full or transmitter is busy
1037 */
1038 #ifdef oldcode
1039 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1040 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1041 return;
1042 #else
1043 if (scp->sp_txinuse
1044 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1045 return;
1046 #endif
1047 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1048
1049 /*
1050 * XXX assume that all tx desc and bufs in same page
1051 */
1052 if (sc->sc_usedma)
1053 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1054 0, sc->scu_allocsize,
1055 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1056 else {
1057 sc->scu_page_on(sc);
1058 sc->scu_set_page(sc, scp->sp_txdesc_p);
1059 }
1060
1061 trigger_xmit = 0;
1062
1063 txloop:
1064 IF_DEQUEUE(&scp->linkq, mb_head);
1065 if (mb_head == NULL)
1066 #ifdef SCA_USE_FASTQ
1067 IF_DEQUEUE(&scp->fastq, mb_head);
1068 if (mb_head == NULL)
1069 #endif
1070 IF_DEQUEUE(&ifp->if_snd, mb_head);
1071 if (mb_head == NULL)
1072 goto start_xmit;
1073
1074 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1075 #ifdef oldcode
1076 if (scp->txinuse != 0) {
1077 /* Kill EOT interrupts on the previous descriptor. */
1078 desc = &scp->sp_txdesc[scp->txcur];
1079 stat = sca_desc_read_stat(sc, desc);
1080 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1081
1082 /* Figure out what the next free descriptor is. */
1083 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1084 } else
1085 nexttx = 0;
1086 #endif /* oldcode */
1087
1088 if (scp->sp_txinuse)
1089 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1090 else
1091 nexttx = 0;
1092
1093 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1094
1095 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1096 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1097
1098 /* XXX hoping we can delay the desc write till after we don't drop. */
1099 desc = &scp->sp_txdesc[nexttx];
1100
1101 /* XXX isn't this set already?? */
1102 sca_desc_write_bufp(sc, desc, buf_p);
1103 len = 0;
1104
1105 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1106
1107 #if 0 /* uncomment this for a core in cc1 */
1108 X
1109 #endif
1110 /*
1111 * Run through the chain, copying data into the descriptor as we
1112 * go. If it won't fit in one transmission block, drop the packet.
1113 * No, this isn't nice, but most of the time it _will_ fit.
1114 */
1115 for (m = mb_head ; m != NULL ; m = m->m_next) {
1116 if (m->m_len != 0) {
1117 len += m->m_len;
1118 if (len > SCA_BSIZE) {
1119 m_freem(mb_head);
1120 goto txloop;
1121 }
1122 SCA_DPRINTF(SCA_DEBUG_TX,
1123 ("TX: about to mbuf len %d\n", m->m_len));
1124
1125 if (sc->sc_usedma)
1126 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
1127 else
1128 bus_space_write_region_1(sc->scu_memt,
1129 sc->scu_memh, sca_page_addr(sc, buf_p),
1130 mtod(m, u_int8_t *), m->m_len);
1131 buf += m->m_len;
1132 buf_p += m->m_len;
1133 }
1134 }
1135
1136 /* set the buffer, the length, and mark end of frame and end of xfer */
1137 sca_desc_write_buflen(sc, desc, len);
1138 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1139
1140 ifp->if_opackets++;
1141
1142 #if NBPFILTER > 0
1143 /*
1144 * Pass packet to bpf if there is a listener.
1145 */
1146 if (ifp->if_bpf)
1147 bpf_mtap(ifp->if_bpf, mb_head);
1148 #endif
1149
1150 m_freem(mb_head);
1151
1152 scp->sp_txcur = nexttx;
1153 scp->sp_txinuse++;
1154 trigger_xmit = 1;
1155
1156 SCA_DPRINTF(SCA_DEBUG_TX,
1157 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1158
1159 /*
1160 * XXX so didn't this used to limit us to 1?! - multi may be untested
1161 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1162 * to find bug
1163 */
1164 #ifdef oldcode
1165 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1166 #endif
1167 if (scp->sp_txinuse < scp->sp_ntxdesc)
1168 goto txloop;
1169
1170 start_xmit:
1171 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1172
1173 if (trigger_xmit != 0) {
1174 /* set EOT on final descriptor */
1175 desc = &scp->sp_txdesc[scp->sp_txcur];
1176 stat = sca_desc_read_stat(sc, desc);
1177 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1178 }
1179
1180 if (sc->sc_usedma)
1181 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1182 sc->scu_allocsize,
1183 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1184
1185 if (trigger_xmit != 0)
1186 sca_port_starttx(scp);
1187
1188 if (!sc->sc_usedma)
1189 sc->scu_page_off(sc);
1190 }
1191
1192 static void
1193 sca_watchdog(ifp)
1194 struct ifnet *ifp;
1195 {
1196 }
1197
1198 int
1199 sca_hardintr(struct sca_softc *sc)
1200 {
1201 u_int8_t isr0, isr1, isr2;
1202 int ret;
1203
1204 ret = 0; /* non-zero means we processed at least one interrupt */
1205
1206 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1207
1208 while (1) {
1209 /*
1210 * read SCA interrupts
1211 */
1212 isr0 = sca_read_1(sc, SCA_ISR0);
1213 isr1 = sca_read_1(sc, SCA_ISR1);
1214 isr2 = sca_read_1(sc, SCA_ISR2);
1215
1216 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1217 break;
1218
1219 SCA_DPRINTF(SCA_DEBUG_INTR,
1220 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1221 isr0, isr1, isr2));
1222
1223 /*
1224 * check DMAC interrupt
1225 */
1226 if (isr1 & 0x0f)
1227 ret += sca_dmac_intr(&sc->sc_ports[0],
1228 isr1 & 0x0f);
1229
1230 if (isr1 & 0xf0)
1231 ret += sca_dmac_intr(&sc->sc_ports[1],
1232 (isr1 & 0xf0) >> 4);
1233
1234 /*
1235 * mcsi intterupts
1236 */
1237 if (isr0 & 0x0f)
1238 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1239
1240 if (isr0 & 0xf0)
1241 ret += sca_msci_intr(&sc->sc_ports[1],
1242 (isr0 & 0xf0) >> 4);
1243
1244 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1245 if (isr2)
1246 ret += sca_timer_intr(sc, isr2);
1247 #endif
1248 }
1249
1250 return (ret);
1251 }
1252
1253 static int
1254 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1255 {
1256 u_int8_t dsr;
1257 int ret;
1258
1259 ret = 0;
1260
1261 /*
1262 * Check transmit channel
1263 */
1264 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1265 SCA_DPRINTF(SCA_DEBUG_INTR,
1266 ("TX INTERRUPT port %d\n", scp->sp_port));
1267
1268 dsr = 1;
1269 while (dsr != 0) {
1270 ret++;
1271 /*
1272 * reset interrupt
1273 */
1274 dsr = dmac_read_1(scp, SCA_DSR1);
1275 dmac_write_1(scp, SCA_DSR1,
1276 dsr | SCA_DSR_DEWD);
1277
1278 /*
1279 * filter out the bits we don't care about
1280 */
1281 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1282 if (dsr == 0)
1283 break;
1284
1285 /*
1286 * check for counter overflow
1287 */
1288 if (dsr & SCA_DSR_COF) {
1289 printf("%s: TXDMA counter overflow\n",
1290 scp->sp_if.if_xname);
1291
1292 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1293 scp->sp_txcur = 0;
1294 scp->sp_txinuse = 0;
1295 }
1296
1297 /*
1298 * check for buffer overflow
1299 */
1300 if (dsr & SCA_DSR_BOF) {
1301 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1302 scp->sp_if.if_xname,
1303 dmac_read_2(scp, SCA_CDAL1),
1304 dmac_read_2(scp, SCA_EDAL1),
1305 dmac_read_1(scp, SCA_CPB1));
1306
1307 /*
1308 * Yikes. Arrange for a full
1309 * transmitter restart.
1310 */
1311 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1312 scp->sp_txcur = 0;
1313 scp->sp_txinuse = 0;
1314 }
1315
1316 /*
1317 * check for end of transfer, which is not
1318 * an error. It means that all data queued
1319 * was transmitted, and we mark ourself as
1320 * not in use and stop the watchdog timer.
1321 */
1322 if (dsr & SCA_DSR_EOT) {
1323 SCA_DPRINTF(SCA_DEBUG_TX,
1324 ("Transmit completed. cda %x eda %x dsr %x\n",
1325 dmac_read_2(scp, SCA_CDAL1),
1326 dmac_read_2(scp, SCA_EDAL1),
1327 dsr));
1328
1329 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1330 scp->sp_txcur = 0;
1331 scp->sp_txinuse = 0;
1332
1333 /*
1334 * check for more packets
1335 */
1336 sca_start(&scp->sp_if);
1337 }
1338 }
1339 }
1340 /*
1341 * receive channel check
1342 */
1343 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1344 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1345 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1346
1347 dsr = 1;
1348 while (dsr != 0) {
1349 ret++;
1350
1351 dsr = dmac_read_1(scp, SCA_DSR0);
1352 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1353
1354 /*
1355 * filter out the bits we don't care about
1356 */
1357 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1358 | SCA_DSR_BOF | SCA_DSR_EOT);
1359 if (dsr == 0)
1360 break;
1361
1362 /*
1363 * End of frame
1364 */
1365 if (dsr & SCA_DSR_EOM) {
1366 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1367
1368 sca_get_packets(scp);
1369 }
1370
1371 /*
1372 * check for counter overflow
1373 */
1374 if (dsr & SCA_DSR_COF) {
1375 printf("%s: RXDMA counter overflow\n",
1376 scp->sp_if.if_xname);
1377
1378 sca_dmac_rxinit(scp);
1379 }
1380
1381 /*
1382 * check for end of transfer, which means we
1383 * ran out of descriptors to receive into.
1384 * This means the line is much faster than
1385 * we can handle.
1386 */
1387 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1388 printf("%s: RXDMA buffer overflow\n",
1389 scp->sp_if.if_xname);
1390
1391 sca_dmac_rxinit(scp);
1392 }
1393 }
1394 }
1395
1396 return ret;
1397 }
1398
1399 static int
1400 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1401 {
1402 u_int8_t st1, trc0;
1403
1404 /* get and clear the specific interrupt -- should act on it :)*/
1405 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1406 /* clear the interrupt */
1407 msci_write_1(scp, SCA_ST10, st1);
1408
1409 if (st1 & SCA_ST1_UDRN) {
1410 /* underrun -- try to increase ready control */
1411 trc0 = msci_read_1(scp, SCA_TRC00);
1412 if (trc0 == 0x1f)
1413 printf("TX: underun - fifo depth maxed\n");
1414 else {
1415 if ((trc0 += 2) > 0x1f)
1416 trc0 = 0x1f;
1417 SCA_DPRINTF(SCA_DEBUG_TX,
1418 ("TX: udrn - incr fifo to %d\n", trc0));
1419 msci_write_1(scp, SCA_TRC00, trc0);
1420 }
1421 }
1422 }
1423 return (0);
1424 }
1425
1426 static void
1427 sca_get_packets(sca_port_t *scp)
1428 {
1429 struct sca_softc *sc;
1430
1431 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1432
1433 sc = scp->sca;
1434 if (sc->sc_usedma)
1435 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1436 0, sc->scu_allocsize,
1437 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1438 else {
1439 /*
1440 * XXX this code is unable to deal with rx stuff
1441 * in more than 1 page
1442 */
1443 sc->scu_page_on(sc);
1444 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1445 }
1446
1447 /* process as many frames as are available */
1448 while (sca_frame_avail(scp)) {
1449 sca_frame_process(scp);
1450 sca_frame_read_done(scp);
1451 }
1452
1453 if (sc->sc_usedma)
1454 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1455 0, sc->scu_allocsize,
1456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1457 else
1458 sc->scu_page_off(sc);
1459 }
1460
1461 /*
1462 * Starting with the first descriptor we wanted to read into, up to but
1463 * not including the current SCA read descriptor, look for a packet.
1464 *
1465 * must be called at splnet()
1466 */
1467 static int
1468 sca_frame_avail(sca_port_t *scp)
1469 {
1470 struct sca_softc *sc;
1471 u_int16_t cda;
1472 u_int32_t desc_p; /* physical address (lower 16 bits) */
1473 sca_desc_t *desc;
1474 u_int8_t rxstat;
1475 int cdaidx, toolong;
1476
1477 /*
1478 * Read the current descriptor from the SCA.
1479 */
1480 sc = scp->sca;
1481 cda = dmac_read_2(scp, SCA_CDAL0);
1482
1483 /*
1484 * calculate the index of the current descriptor
1485 */
1486 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1487 desc_p = cda - desc_p;
1488 cdaidx = desc_p / sizeof(sca_desc_t);
1489
1490 SCA_DPRINTF(SCA_DEBUG_RX,
1491 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1492 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1493
1494 /* note confusion */
1495 if (cdaidx >= scp->sp_nrxdesc)
1496 panic("current descriptor index out of range");
1497
1498 /* see if we have a valid frame available */
1499 toolong = 0;
1500 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1501 /*
1502 * We might have a valid descriptor. Set up a pointer
1503 * to the kva address for it so we can more easily examine
1504 * the contents.
1505 */
1506 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1507 rxstat = sca_desc_read_stat(scp->sca, desc);
1508
1509 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1510 scp->sp_port, scp->sp_rxstart, rxstat));
1511
1512 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1513 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1514
1515 /*
1516 * check for errors
1517 */
1518 if (rxstat & SCA_DESC_ERRORS) {
1519 /*
1520 * consider an error condition the end
1521 * of a frame
1522 */
1523 scp->sp_if.if_ierrors++;
1524 toolong = 0;
1525 continue;
1526 }
1527
1528 /*
1529 * if we aren't skipping overlong frames
1530 * we are done, otherwise reset and look for
1531 * another good frame
1532 */
1533 if (rxstat & SCA_DESC_EOM) {
1534 if (!toolong)
1535 return (1);
1536 toolong = 0;
1537 } else if (!toolong) {
1538 /*
1539 * we currently don't deal with frames
1540 * larger than a single buffer (fixed MTU)
1541 */
1542 scp->sp_if.if_ierrors++;
1543 toolong = 1;
1544 }
1545 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1546 scp->sp_rxstart));
1547 }
1548
1549 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1550 return 0;
1551 }
1552
1553 /*
1554 * Pass the packet up to the kernel if it is a packet we want to pay
1555 * attention to.
1556 *
1557 * MUST BE CALLED AT splnet()
1558 */
1559 static void
1560 sca_frame_process(sca_port_t *scp)
1561 {
1562 struct ifqueue *ifq;
1563 struct hdlc_header *hdlc;
1564 struct cisco_pkt *cisco;
1565 sca_desc_t *desc;
1566 struct mbuf *m;
1567 u_int8_t *bufp;
1568 u_int16_t len;
1569 u_int32_t t;
1570
1571 t = (time.tv_sec - boottime.tv_sec) * 1000;
1572 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1573 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1574 len = sca_desc_read_buflen(scp->sca, desc);
1575
1576 SCA_DPRINTF(SCA_DEBUG_RX,
1577 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1578 (bus_addr_t)bufp, len));
1579
1580 #if SCA_DEBUG_LEVEL > 0
1581 if (sca_debug & SCA_DEBUG_RXPKT)
1582 sca_frame_print(scp, desc, bufp);
1583 #endif
1584 /*
1585 * skip packets that are too short
1586 */
1587 if (len < sizeof(struct hdlc_header)) {
1588 scp->sp_if.if_ierrors++;
1589 return;
1590 }
1591
1592 m = sca_mbuf_alloc(scp->sca, bufp, len);
1593 if (m == NULL) {
1594 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1595 return;
1596 }
1597
1598 /*
1599 * read and then strip off the HDLC information
1600 */
1601 m = m_pullup(m, sizeof(struct hdlc_header));
1602 if (m == NULL) {
1603 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1604 return;
1605 }
1606
1607 #if NBPFILTER > 0
1608 if (scp->sp_if.if_bpf)
1609 bpf_mtap(scp->sp_if.if_bpf, m);
1610 #endif
1611
1612 scp->sp_if.if_ipackets++;
1613 scp->sp_if.if_lastchange = time;
1614
1615 hdlc = mtod(m, struct hdlc_header *);
1616 switch (ntohs(hdlc->h_proto)) {
1617 #ifdef INET
1618 case HDLC_PROTOCOL_IP:
1619 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1620 m->m_pkthdr.rcvif = &scp->sp_if;
1621 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1622 m->m_data += sizeof(struct hdlc_header);
1623 m->m_len -= sizeof(struct hdlc_header);
1624 ifq = &ipintrq;
1625 schednetisr(NETISR_IP);
1626 break;
1627 #endif /* INET */
1628 #ifdef INET6
1629 case HDLC_PROTOCOL_IPV6:
1630 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1631 m->m_pkthdr.rcvif = &scp->sp_if;
1632 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1633 m->m_data += sizeof(struct hdlc_header);
1634 m->m_len -= sizeof(struct hdlc_header);
1635 ifq = &ip6intrq;
1636 schednetisr(NETISR_IPV6);
1637 break;
1638 #endif /* INET6 */
1639 #ifdef ISO
1640 case HDLC_PROTOCOL_ISO:
1641 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1642 goto dropit;
1643 m->m_pkthdr.rcvif = &scp->sp_if;
1644 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1645 m->m_data += sizeof(struct hdlc_llc_header);
1646 m->m_len -= sizeof(struct hdlc_llc_header);
1647 ifq = &clnlintrq;
1648 schednetisr(NETISR_ISO);
1649 break;
1650 #endif /* ISO */
1651 case CISCO_KEEPALIVE:
1652 SCA_DPRINTF(SCA_DEBUG_CISCO,
1653 ("Received CISCO keepalive packet\n"));
1654
1655 if (len < CISCO_PKT_LEN) {
1656 SCA_DPRINTF(SCA_DEBUG_CISCO,
1657 ("short CISCO packet %d, wanted %d\n",
1658 len, CISCO_PKT_LEN));
1659 scp->sp_if.if_ierrors++;
1660 goto dropit;
1661 }
1662
1663 m = m_pullup(m, sizeof(struct cisco_pkt));
1664 if (m == NULL) {
1665 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1666 return;
1667 }
1668
1669 cisco = (struct cisco_pkt *)
1670 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1671 m->m_pkthdr.rcvif = &scp->sp_if;
1672
1673 switch (ntohl(cisco->type)) {
1674 case CISCO_ADDR_REQ:
1675 printf("Got CISCO addr_req, ignoring\n");
1676 scp->sp_if.if_ierrors++;
1677 goto dropit;
1678
1679 case CISCO_ADDR_REPLY:
1680 printf("Got CISCO addr_reply, ignoring\n");
1681 scp->sp_if.if_ierrors++;
1682 goto dropit;
1683
1684 case CISCO_KEEPALIVE_REQ:
1685
1686 SCA_DPRINTF(SCA_DEBUG_CISCO,
1687 ("Received KA, mseq %d,"
1688 " yseq %d, rel 0x%04x, t0"
1689 " %04x, t1 %04x\n",
1690 ntohl(cisco->par1), ntohl(cisco->par2),
1691 ntohs(cisco->rel), ntohs(cisco->time0),
1692 ntohs(cisco->time1)));
1693
1694 scp->cka_lastrx = ntohl(cisco->par1);
1695 scp->cka_lasttx++;
1696
1697 /*
1698 * schedule the transmit right here.
1699 */
1700 cisco->par2 = cisco->par1;
1701 cisco->par1 = htonl(scp->cka_lasttx);
1702 cisco->time0 = htons((u_int16_t)(t >> 16));
1703 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1704
1705 ifq = &scp->linkq;
1706 if (IF_QFULL(ifq)) {
1707 IF_DROP(ifq);
1708 goto dropit;
1709 }
1710 IF_ENQUEUE(ifq, m);
1711
1712 sca_start(&scp->sp_if);
1713
1714 /* since start may have reset this fix */
1715 if (!scp->sca->sc_usedma) {
1716 scp->sca->scu_set_page(scp->sca,
1717 scp->sp_rxdesc_p);
1718 scp->sca->scu_page_on(scp->sca);
1719 }
1720 return;
1721 default:
1722 SCA_DPRINTF(SCA_DEBUG_CISCO,
1723 ("Unknown CISCO keepalive protocol 0x%04x\n",
1724 ntohl(cisco->type)));
1725
1726 scp->sp_if.if_noproto++;
1727 goto dropit;
1728 }
1729 return;
1730 default:
1731 SCA_DPRINTF(SCA_DEBUG_RX,
1732 ("Unknown/unexpected ethertype 0x%04x\n",
1733 ntohs(hdlc->h_proto)));
1734 scp->sp_if.if_noproto++;
1735 goto dropit;
1736 }
1737
1738 /* queue the packet */
1739 if (!IF_QFULL(ifq)) {
1740 IF_ENQUEUE(ifq, m);
1741 } else {
1742 IF_DROP(ifq);
1743 scp->sp_if.if_iqdrops++;
1744 goto dropit;
1745 }
1746 return;
1747 dropit:
1748 if (m)
1749 m_freem(m);
1750 return;
1751 }
1752
1753 #if SCA_DEBUG_LEVEL > 0
1754 /*
1755 * do a hex dump of the packet received into descriptor "desc" with
1756 * data buffer "p"
1757 */
1758 static void
1759 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1760 {
1761 int i;
1762 int nothing_yet = 1;
1763 struct sca_softc *sc;
1764 u_int len;
1765
1766 sc = scp->sca;
1767 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1768 desc,
1769 sca_desc_read_chainp(sc, desc),
1770 sca_desc_read_bufp(sc, desc),
1771 sca_desc_read_stat(sc, desc),
1772 (len = sca_desc_read_buflen(sc, desc)));
1773
1774 for (i = 0 ; i < len && i < 256; i++) {
1775 if (nothing_yet == 1 &&
1776 (sc->sc_usedma ? *p
1777 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1778 sca_page_addr(sc, p))) == 0) {
1779 p++;
1780 continue;
1781 }
1782 nothing_yet = 0;
1783 if (i % 16 == 0)
1784 printf("\n");
1785 printf("%02x ",
1786 (sc->sc_usedma ? *p
1787 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1788 sca_page_addr(sc, p))));
1789 p++;
1790 }
1791
1792 if (i % 16 != 1)
1793 printf("\n");
1794 }
1795 #endif
1796
1797 /*
1798 * adjust things becuase we have just read the current starting
1799 * frame
1800 *
1801 * must be called at splnet()
1802 */
1803 static void
1804 sca_frame_read_done(sca_port_t *scp)
1805 {
1806 u_int16_t edesc_p;
1807
1808 /* update where our indicies are */
1809 scp->sp_rxend = scp->sp_rxstart;
1810 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1811
1812 /* update the error [end] descriptor */
1813 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1814 (sizeof(sca_desc_t) * scp->sp_rxend);
1815 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1816 }
1817
1818 /*
1819 * set a port to the "up" state
1820 */
1821 static void
1822 sca_port_up(sca_port_t *scp)
1823 {
1824 struct sca_softc *sc = scp->sca;
1825 #if 0
1826 u_int8_t ier0, ier1;
1827 #endif
1828
1829 /*
1830 * reset things
1831 */
1832 #if 0
1833 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1834 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1835 #endif
1836 /*
1837 * clear in-use flag
1838 */
1839 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1840 scp->sp_if.if_flags |= IFF_RUNNING;
1841
1842 /*
1843 * raise DTR
1844 */
1845 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1846
1847 /*
1848 * raise RTS
1849 */
1850 msci_write_1(scp, SCA_CTL0,
1851 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1852 | SCA_CTL_RTS_HIGH);
1853
1854 #if 0
1855 /*
1856 * enable interrupts (no timer IER2)
1857 */
1858 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1859 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1860 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1861 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1862 if (scp->sp_port == 1) {
1863 ier0 <<= 4;
1864 ier1 <<= 4;
1865 }
1866 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1867 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1868 #else
1869 if (scp->sp_port == 0) {
1870 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1871 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1872 } else {
1873 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1874 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1875 }
1876 #endif
1877
1878 /*
1879 * enable transmit and receive
1880 */
1881 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1882 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1883
1884 /*
1885 * reset internal state
1886 */
1887 scp->sp_txinuse = 0;
1888 scp->sp_txcur = 0;
1889 scp->cka_lasttx = time.tv_usec;
1890 scp->cka_lastrx = 0;
1891 }
1892
1893 /*
1894 * set a port to the "down" state
1895 */
1896 static void
1897 sca_port_down(sca_port_t *scp)
1898 {
1899 struct sca_softc *sc = scp->sca;
1900 #if 0
1901 u_int8_t ier0, ier1;
1902 #endif
1903
1904 /*
1905 * lower DTR
1906 */
1907 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1908
1909 /*
1910 * lower RTS
1911 */
1912 msci_write_1(scp, SCA_CTL0,
1913 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1914 | SCA_CTL_RTS_LOW);
1915
1916 /*
1917 * disable interrupts
1918 */
1919 #if 0
1920 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1921 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1922 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1923 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1924 if (scp->sp_port == 1) {
1925 ier0 <<= 4;
1926 ier1 <<= 4;
1927 }
1928 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1929 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1930 #else
1931 if (scp->sp_port == 0) {
1932 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1933 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1934 } else {
1935 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1936 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1937 }
1938 #endif
1939
1940 /*
1941 * disable transmit and receive
1942 */
1943 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1944 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1945
1946 /*
1947 * no, we're not in use anymore
1948 */
1949 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1950 }
1951
1952 /*
1953 * disable all DMA and interrupts for all ports at once.
1954 */
1955 void
1956 sca_shutdown(struct sca_softc *sca)
1957 {
1958 /*
1959 * disable DMA and interrupts
1960 */
1961 sca_write_1(sca, SCA_DMER, 0);
1962 sca_write_1(sca, SCA_IER0, 0);
1963 sca_write_1(sca, SCA_IER1, 0);
1964 }
1965
1966 /*
1967 * If there are packets to transmit, start the transmit DMA logic.
1968 */
1969 static void
1970 sca_port_starttx(sca_port_t *scp)
1971 {
1972 struct sca_softc *sc;
1973 u_int32_t startdesc_p, enddesc_p;
1974 int enddesc;
1975
1976 sc = scp->sca;
1977
1978 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1979
1980 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1981 || scp->sp_txinuse == 0)
1982 return;
1983
1984 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1985
1986 scp->sp_if.if_flags |= IFF_OACTIVE;
1987
1988 /*
1989 * We have something to do, since we have at least one packet
1990 * waiting, and we are not already marked as active.
1991 */
1992 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1993 startdesc_p = scp->sp_txdesc_p;
1994 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1995
1996 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1997 startdesc_p, enddesc_p));
1998
1999 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
2000 dmac_write_2(scp, SCA_CDAL1,
2001 (u_int16_t)(startdesc_p & 0x0000ffff));
2002
2003 /*
2004 * enable the DMA
2005 */
2006 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
2007 }
2008
2009 /*
2010 * allocate an mbuf at least long enough to hold "len" bytes.
2011 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2012 * otherwise let the caller handle copying the data in.
2013 */
2014 static struct mbuf *
2015 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
2016 {
2017 struct mbuf *m;
2018
2019 /*
2020 * allocate an mbuf and copy the important bits of data
2021 * into it. If the packet won't fit in the header,
2022 * allocate a cluster for it and store it there.
2023 */
2024 MGETHDR(m, M_DONTWAIT, MT_DATA);
2025 if (m == NULL)
2026 return NULL;
2027 if (len > MHLEN) {
2028 if (len > MCLBYTES) {
2029 m_freem(m);
2030 return NULL;
2031 }
2032 MCLGET(m, M_DONTWAIT);
2033 if ((m->m_flags & M_EXT) == 0) {
2034 m_freem(m);
2035 return NULL;
2036 }
2037 }
2038 if (p != NULL) {
2039 /* XXX do we need to sync here? */
2040 if (sc->sc_usedma)
2041 bcopy(p, mtod(m, caddr_t), len);
2042 else
2043 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2044 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2045 }
2046 m->m_len = len;
2047 m->m_pkthdr.len = len;
2048
2049 return (m);
2050 }
2051
2052 /*
2053 * get the base clock
2054 */
2055 void
2056 sca_get_base_clock(struct sca_softc *sc)
2057 {
2058 struct timeval btv, ctv, dtv;
2059 u_int64_t bcnt;
2060 u_int32_t cnt;
2061 u_int16_t subcnt;
2062
2063 /* disable the timer, set prescale to 0 */
2064 sca_write_1(sc, SCA_TCSR0, 0);
2065 sca_write_1(sc, SCA_TEPR0, 0);
2066
2067 /* reset the counter */
2068 (void)sca_read_1(sc, SCA_TCSR0);
2069 subcnt = sca_read_2(sc, SCA_TCNTL0);
2070
2071 /* count to max */
2072 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2073
2074 cnt = 0;
2075 microtime(&btv);
2076 /* start the timer -- no interrupt enable */
2077 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2078 for (;;) {
2079 microtime(&ctv);
2080
2081 /* end around 3/4 of a second */
2082 timersub(&ctv, &btv, &dtv);
2083 if (dtv.tv_usec >= 750000)
2084 break;
2085
2086 /* spin */
2087 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2088 ;
2089 /* reset the timer */
2090 (void)sca_read_2(sc, SCA_TCNTL0);
2091 cnt++;
2092 }
2093
2094 /* stop the timer */
2095 sca_write_1(sc, SCA_TCSR0, 0);
2096
2097 subcnt = sca_read_2(sc, SCA_TCNTL0);
2098 /* add the slop in and get the total timer ticks */
2099 cnt = (cnt << 16) | subcnt;
2100
2101 /* cnt is 1/8 the actual time */
2102 bcnt = cnt * 8;
2103 /* make it proportional to 3/4 of a second */
2104 bcnt *= (u_int64_t)750000;
2105 bcnt /= (u_int64_t)dtv.tv_usec;
2106 cnt = bcnt;
2107
2108 /* make it Hz */
2109 cnt *= 4;
2110 cnt /= 3;
2111
2112 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2113 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2114
2115 /*
2116 * round to the nearest 200 -- this allows for +-3 ticks error
2117 */
2118 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2119 }
2120
2121 /*
2122 * print the information about the clock on the ports
2123 */
2124 void
2125 sca_print_clock_info(struct sca_softc *sc)
2126 {
2127 struct sca_port *scp;
2128 u_int32_t mhz, div;
2129 int i;
2130
2131 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2132 sc->sc_baseclock);
2133
2134 /* print the information about the port clock selection */
2135 for (i = 0; i < sc->sc_numports; i++) {
2136 scp = &sc->sc_ports[i];
2137 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2138 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2139
2140 printf("%s: rx clock: ", scp->sp_if.if_xname);
2141 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2142 case SCA_RXS_CLK_LINE:
2143 printf("line");
2144 break;
2145 case SCA_RXS_CLK_LINE_SN:
2146 printf("line with noise suppression");
2147 break;
2148 case SCA_RXS_CLK_INTERNAL:
2149 printf("internal %d Hz", (mhz >> div));
2150 break;
2151 case SCA_RXS_CLK_ADPLL_OUT:
2152 printf("adpll using internal %d Hz", (mhz >> div));
2153 break;
2154 case SCA_RXS_CLK_ADPLL_IN:
2155 printf("adpll using line clock");
2156 break;
2157 }
2158 printf(" tx clock: ");
2159 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2160 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2161 case SCA_TXS_CLK_LINE:
2162 printf("line\n");
2163 break;
2164 case SCA_TXS_CLK_INTERNAL:
2165 printf("internal %d Hz\n", (mhz >> div));
2166 break;
2167 case SCA_TXS_CLK_RXCLK:
2168 printf("rxclock\n");
2169 break;
2170 }
2171 if (scp->sp_eclock)
2172 printf("%s: outputting line clock\n",
2173 scp->sp_if.if_xname);
2174 }
2175 }
2176
2177