hd64570.c revision 1.54 1 /* $NetBSD: hd64570.c,v 1.54 2018/06/26 06:48:00 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.54 2018/06/26 06:48:00 msaitoh Exp $");
69
70 #include "opt_inet.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/device.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/kernel.h>
79
80 #include <net/if.h>
81 #include <net/if_types.h>
82 #include <net/netisr.h>
83
84 #if defined(INET) || defined(INET6)
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip.h>
89 #ifdef INET6
90 #include <netinet6/in6_var.h>
91 #endif
92 #endif
93
94 #include <net/bpf.h>
95
96 #include <sys/cpu.h>
97 #include <sys/bus.h>
98 #include <sys/intr.h>
99
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcidevs.h>
103
104 #include <dev/ic/hd64570reg.h>
105 #include <dev/ic/hd64570var.h>
106
107 #define SCA_DEBUG_RX 0x0001
108 #define SCA_DEBUG_TX 0x0002
109 #define SCA_DEBUG_CISCO 0x0004
110 #define SCA_DEBUG_DMA 0x0008
111 #define SCA_DEBUG_RXPKT 0x0010
112 #define SCA_DEBUG_TXPKT 0x0020
113 #define SCA_DEBUG_INTR 0x0040
114 #define SCA_DEBUG_CLOCK 0x0080
115
116 #if 0
117 #define SCA_DEBUG_LEVEL ( 0xFFFF )
118 #else
119 #define SCA_DEBUG_LEVEL 0
120 #endif
121
122 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
123
124 #if SCA_DEBUG_LEVEL > 0
125 #define SCA_DPRINTF(l, x) do { \
126 if ((l) & sca_debug) \
127 printf x;\
128 } while (0)
129 #else
130 #define SCA_DPRINTF(l, x)
131 #endif
132
133 #if 0
134 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
135 #endif
136
137 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
138 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
139
140 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
141 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
142 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
143 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
144
145 static void sca_msci_init(struct sca_softc *, sca_port_t *);
146 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
147 static void sca_dmac_rxinit(sca_port_t *);
148
149 static int sca_dmac_intr(sca_port_t *, u_int8_t);
150 static int sca_msci_intr(sca_port_t *, u_int8_t);
151
152 static void sca_get_packets(sca_port_t *);
153 static int sca_frame_avail(sca_port_t *);
154 static void sca_frame_process(sca_port_t *);
155 static void sca_frame_read_done(sca_port_t *);
156
157 static void sca_port_starttx(sca_port_t *);
158
159 static void sca_port_up(sca_port_t *);
160 static void sca_port_down(sca_port_t *);
161
162 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
163 const struct rtentry *);
164 static int sca_ioctl(struct ifnet *, u_long, void *);
165 static void sca_start(struct ifnet *);
166 static void sca_watchdog(struct ifnet *);
167
168 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
169
170 #if SCA_DEBUG_LEVEL > 0
171 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
172 #endif
173
174
175 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
176 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
177 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
178 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
179
180 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
181
182 static inline void
183 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
184 {
185 sca_write_1(scp->sca, scp->msci_off + reg, val);
186 }
187
188 static inline u_int8_t
189 msci_read_1(sca_port_t *scp, u_int reg)
190 {
191 return sca_read_1(scp->sca, scp->msci_off + reg);
192 }
193
194 static inline void
195 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
196 {
197 sca_write_1(scp->sca, scp->dmac_off + reg, val);
198 }
199
200 static inline void
201 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
202 {
203 sca_write_2(scp->sca, scp->dmac_off + reg, val);
204 }
205
206 static inline u_int8_t
207 dmac_read_1(sca_port_t *scp, u_int reg)
208 {
209 return sca_read_1(scp->sca, scp->dmac_off + reg);
210 }
211
212 static inline u_int16_t
213 dmac_read_2(sca_port_t *scp, u_int reg)
214 {
215 return sca_read_2(scp->sca, scp->dmac_off + reg);
216 }
217
218 #if SCA_DEBUG_LEVEL > 0
219 /*
220 * read the chain pointer
221 */
222 static inline u_int16_t
223 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
224 {
225 if (sc->sc_usedma)
226 return ((dp)->sd_chainp);
227 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
228 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
229 }
230 #endif
231
232 /*
233 * write the chain pointer
234 */
235 static inline void
236 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
237 {
238 if (sc->sc_usedma)
239 (dp)->sd_chainp = cp;
240 else
241 bus_space_write_2(sc->scu_memt, sc->scu_memh,
242 sca_page_addr(sc, dp)
243 + offsetof(struct sca_desc, sd_chainp), cp);
244 }
245
246 #if SCA_DEBUG_LEVEL > 0
247 /*
248 * read the buffer pointer
249 */
250 static inline u_int32_t
251 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
252 {
253 u_int32_t address;
254
255 if (sc->sc_usedma)
256 address = dp->sd_bufp | dp->sd_hbufp << 16;
257 else {
258 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
259 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
260 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
261 sca_page_addr(sc, dp)
262 + offsetof(struct sca_desc, sd_hbufp)) << 16;
263 }
264 return (address);
265 }
266 #endif
267
268 /*
269 * write the buffer pointer
270 */
271 static inline void
272 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
273 {
274 if (sc->sc_usedma) {
275 dp->sd_bufp = bufp & 0xFFFF;
276 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
277 } else {
278 bus_space_write_2(sc->scu_memt, sc->scu_memh,
279 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
280 bufp & 0xFFFF);
281 bus_space_write_1(sc->scu_memt, sc->scu_memh,
282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
283 (bufp & 0x00FF0000) >> 16);
284 }
285 }
286
287 /*
288 * read the buffer length
289 */
290 static inline u_int16_t
291 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
292 {
293 if (sc->sc_usedma)
294 return ((dp)->sd_buflen);
295 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
296 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
297 }
298
299 /*
300 * write the buffer length
301 */
302 static inline void
303 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
304 {
305 if (sc->sc_usedma)
306 (dp)->sd_buflen = len;
307 else
308 bus_space_write_2(sc->scu_memt, sc->scu_memh,
309 sca_page_addr(sc, dp)
310 + offsetof(struct sca_desc, sd_buflen), len);
311 }
312
313 /*
314 * read the descriptor status
315 */
316 static inline u_int8_t
317 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
318 {
319 if (sc->sc_usedma)
320 return ((dp)->sd_stat);
321 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
322 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
323 }
324
325 /*
326 * write the descriptor status
327 */
328 static inline void
329 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
330 {
331 if (sc->sc_usedma)
332 (dp)->sd_stat = stat;
333 else
334 bus_space_write_1(sc->scu_memt, sc->scu_memh,
335 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
336 stat);
337 }
338
339 void
340 sca_init(struct sca_softc *sc)
341 {
342 /*
343 * Do a little sanity check: check number of ports.
344 */
345 if (sc->sc_numports < 1 || sc->sc_numports > 2)
346 panic("sca can\'t handle more than 2 or less than 1 ports");
347
348 /*
349 * disable DMA and MSCI interrupts
350 */
351 sca_write_1(sc, SCA_DMER, 0);
352 sca_write_1(sc, SCA_IER0, 0);
353 sca_write_1(sc, SCA_IER1, 0);
354 sca_write_1(sc, SCA_IER2, 0);
355
356 /*
357 * configure interrupt system
358 */
359 sca_write_1(sc, SCA_ITCR,
360 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
361 #if 0
362 /* these are for the intrerrupt ack cycle which we don't use */
363 sca_write_1(sc, SCA_IVR, 0x40);
364 sca_write_1(sc, SCA_IMVR, 0x40);
365 #endif
366
367 /*
368 * set wait control register to zero wait states
369 */
370 sca_write_1(sc, SCA_PABR0, 0);
371 sca_write_1(sc, SCA_PABR1, 0);
372 sca_write_1(sc, SCA_WCRL, 0);
373 sca_write_1(sc, SCA_WCRM, 0);
374 sca_write_1(sc, SCA_WCRH, 0);
375
376 /*
377 * disable DMA and reset status
378 */
379 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
380
381 /*
382 * disable transmit DMA for all channels
383 */
384 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
385 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
386 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
387 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
388 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
389 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
390 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
391 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
392
393 /*
394 * enable DMA based on channel enable flags for each channel
395 */
396 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
397
398 /*
399 * Should check to see if the chip is responding, but for now
400 * assume it is.
401 */
402 }
403
404 /*
405 * initialize the port and attach it to the networking layer
406 */
407 void
408 sca_port_attach(struct sca_softc *sc, u_int port)
409 {
410 struct timeval now;
411 sca_port_t *scp = &sc->sc_ports[port];
412 struct ifnet *ifp;
413 static u_int ntwo_unit = 0;
414
415 scp->sca = sc; /* point back to the parent */
416
417 scp->sp_port = port;
418
419 if (port == 0) {
420 scp->msci_off = SCA_MSCI_OFF_0;
421 scp->dmac_off = SCA_DMAC_OFF_0;
422 if(sc->sc_parent != NULL)
423 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
424 else
425 ntwo_unit = 0; /* XXX */
426 } else {
427 scp->msci_off = SCA_MSCI_OFF_1;
428 scp->dmac_off = SCA_DMAC_OFF_1;
429 if(sc->sc_parent != NULL)
430 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
431 else
432 ntwo_unit = 1; /* XXX */
433 }
434
435 sca_msci_init(sc, scp);
436 sca_dmac_init(sc, scp);
437
438 /*
439 * attach to the network layer
440 */
441 ifp = &scp->sp_if;
442 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
443 ifp->if_softc = scp;
444 ifp->if_mtu = SCA_MTU;
445 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
446 ifp->if_type = IFT_PTPSERIAL;
447 ifp->if_hdrlen = HDLC_HDRLEN;
448 ifp->if_ioctl = sca_ioctl;
449 ifp->if_output = sca_output;
450 ifp->if_watchdog = sca_watchdog;
451 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
452 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
453 #ifdef SCA_USE_FASTQ
454 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
455 #endif
456 IFQ_SET_READY(&ifp->if_snd);
457 if_attach(ifp);
458 if_deferred_start_init(ifp, NULL);
459 if_alloc_sadl(ifp);
460 bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
461 bpf_mtap_softint_init(ifp);
462
463 if (sc->sc_parent == NULL)
464 printf("%s: port %d\n", ifp->if_xname, port);
465 else
466 printf("%s at %s port %d\n",
467 ifp->if_xname, device_xname(sc->sc_parent), port);
468
469 /*
470 * reset the last seen times on the cisco keepalive protocol
471 */
472 getmicrotime(&now);
473 scp->cka_lasttx = now.tv_usec;
474 scp->cka_lastrx = 0;
475 }
476
477 #if 0
478 /*
479 * returns log2(div), sets 'tmc' for the required freq 'hz'
480 */
481 static u_int8_t
482 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
483 {
484 u_int32_t tmc, div;
485 u_int32_t clock;
486
487 /* clock hz = (chipclock / tmc) / 2^(div); */
488 /*
489 * TD == tmc * 2^(n)
490 *
491 * note:
492 * 1 <= TD <= 256 TD is inc of 1
493 * 2 <= TD <= 512 TD is inc of 2
494 * 4 <= TD <= 1024 TD is inc of 4
495 * ...
496 * 512 <= TD <= 256*512 TD is inc of 512
497 *
498 * so note there are overlaps. We lose prec
499 * as div increases so we wish to minize div.
500 *
501 * basically we want to do
502 *
503 * tmc = chip / hz, but have tmc <= 256
504 */
505
506 /* assume system clock is 9.8304MHz or 9830400Hz */
507 clock = clock = 9830400 >> 1;
508
509 /* round down */
510 div = 0;
511 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
512 clock >>= 1;
513 div++;
514 }
515 if (clock / tmc > hz)
516 tmc++;
517 if (!tmc)
518 tmc = 1;
519
520 if (div > SCA_RXS_DIV_512) {
521 /* set to maximums */
522 div = SCA_RXS_DIV_512;
523 tmc = 0;
524 }
525
526 *tmcp = (tmc & 0xFF); /* 0 == 256 */
527 return (div & 0xFF);
528 }
529 #endif
530
531 /*
532 * initialize the port's MSCI
533 */
534 static void
535 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
536 {
537 /* reset the channel */
538 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
539
540 msci_write_1(scp, SCA_MD00,
541 ( SCA_MD0_CRC_1
542 | SCA_MD0_CRC_CCITT
543 | SCA_MD0_CRC_ENABLE
544 | SCA_MD0_MODE_HDLC));
545 #if 0
546 /* immediately send receive reset so the above takes */
547 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
548 #endif
549
550 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
551 msci_write_1(scp, SCA_MD20,
552 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
553
554 /* be safe and do it again */
555 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
556
557 /* setup underrun and idle control, and initial RTS state */
558 msci_write_1(scp, SCA_CTL0,
559 (SCA_CTL_IDLC_PATTERN
560 | SCA_CTL_UDRNC_AFTER_FCS
561 | SCA_CTL_RTS_LOW));
562
563 /* reset the transmitter */
564 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
565
566 /*
567 * set the clock sources
568 */
569 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
570 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
571 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
572
573 /* set external clock generate as requested */
574 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
575
576 /*
577 * XXX don't pay attention to CTS or CD changes right now. I can't
578 * simulate one, and the transmitter will try to transmit even if
579 * CD isn't there anyway, so nothing bad SHOULD happen.
580 */
581 #if 0
582 msci_write_1(scp, SCA_IE00, 0);
583 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
584 #else
585 /* this would deliver transmitter underrun to ST1/ISR1 */
586 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
587 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
588 #endif
589 msci_write_1(scp, SCA_IE20, 0);
590
591 msci_write_1(scp, SCA_FIE0, 0);
592
593 msci_write_1(scp, SCA_SA00, 0);
594 msci_write_1(scp, SCA_SA10, 0);
595
596 msci_write_1(scp, SCA_IDL0, 0x7e);
597
598 msci_write_1(scp, SCA_RRC0, 0x0e);
599 /* msci_write_1(scp, SCA_TRC00, 0x10); */
600 /*
601 * the correct values here are important for avoiding underruns
602 * for any value less than or equal to TRC0 txrdy is activated
603 * which will start the dmac transfer to the fifo.
604 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
605 *
606 * thus if we are using a very fast clock that empties the fifo
607 * quickly, delays in the dmac starting to fill the fifo can
608 * lead to underruns so we want a fairly full fifo to still
609 * cause the dmac to start. for cards with on board ram this
610 * has no effect on system performance. For cards that DMA
611 * to/from system memory it will cause more, shorter,
612 * bus accesses rather than fewer longer ones.
613 */
614 msci_write_1(scp, SCA_TRC00, 0x00);
615 msci_write_1(scp, SCA_TRC10, 0x1f);
616 }
617
618 /*
619 * Take the memory for the port and construct two circular linked lists of
620 * descriptors (one tx, one rx) and set the pointers in these descriptors
621 * to point to the buffer space for this port.
622 */
623 static void
624 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
625 {
626 sca_desc_t *desc;
627 u_int32_t desc_p;
628 u_int32_t buf_p;
629 int i;
630
631 if (sc->sc_usedma)
632 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
633 BUS_DMASYNC_PREWRITE);
634 else {
635 /*
636 * XXX assumes that all tx desc and bufs in same page
637 */
638 sc->scu_page_on(sc);
639 sc->scu_set_page(sc, scp->sp_txdesc_p);
640 }
641
642 desc = scp->sp_txdesc;
643 desc_p = scp->sp_txdesc_p;
644 buf_p = scp->sp_txbuf_p;
645 scp->sp_txcur = 0;
646 scp->sp_txinuse = 0;
647
648 #ifdef DEBUG
649 /* make sure that we won't wrap */
650 if ((desc_p & 0xffff0000) !=
651 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
652 panic("sca: tx descriptors cross architecural boundary");
653 if ((buf_p & 0xff000000) !=
654 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
655 panic("sca: tx buffers cross architecural boundary");
656 #endif
657
658 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
659 /*
660 * desc_p points to the physcial address of the NEXT desc
661 */
662 desc_p += sizeof(sca_desc_t);
663
664 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
665 sca_desc_write_bufp(sc, desc, buf_p);
666 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
667 sca_desc_write_stat(sc, desc, 0);
668
669 desc++; /* point to the next descriptor */
670 buf_p += SCA_BSIZE;
671 }
672
673 /*
674 * "heal" the circular list by making the last entry point to the
675 * first.
676 */
677 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
678
679 /*
680 * Now, initialize the transmit DMA logic
681 *
682 * CPB == chain pointer base address
683 */
684 dmac_write_1(scp, SCA_DSR1, 0);
685 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
686 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
687 /* XXX1
688 dmac_write_1(scp, SCA_DIR1,
689 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
690 */
691 dmac_write_1(scp, SCA_DIR1,
692 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
693 dmac_write_1(scp, SCA_CPB1,
694 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
695
696 /*
697 * now, do the same thing for receive descriptors
698 *
699 * XXX assumes that all rx desc and bufs in same page
700 */
701 if (!sc->sc_usedma)
702 sc->scu_set_page(sc, scp->sp_rxdesc_p);
703
704 desc = scp->sp_rxdesc;
705 desc_p = scp->sp_rxdesc_p;
706 buf_p = scp->sp_rxbuf_p;
707
708 #ifdef DEBUG
709 /* make sure that we won't wrap */
710 if ((desc_p & 0xffff0000) !=
711 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
712 panic("sca: rx descriptors cross architecural boundary");
713 if ((buf_p & 0xff000000) !=
714 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
715 panic("sca: rx buffers cross architecural boundary");
716 #endif
717
718 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
719 /*
720 * desc_p points to the physcial address of the NEXT desc
721 */
722 desc_p += sizeof(sca_desc_t);
723
724 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
725 sca_desc_write_bufp(sc, desc, buf_p);
726 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
727 sca_desc_write_buflen(sc, desc, 0);
728 sca_desc_write_stat(sc, desc, 0);
729
730 desc++; /* point to the next descriptor */
731 buf_p += SCA_BSIZE;
732 }
733
734 /*
735 * "heal" the circular list by making the last entry point to the
736 * first.
737 */
738 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
739
740 sca_dmac_rxinit(scp);
741
742 if (sc->sc_usedma)
743 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
744 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
745 else
746 sc->scu_page_off(sc);
747 }
748
749 /*
750 * reset and reinitialize the receive DMA logic
751 */
752 static void
753 sca_dmac_rxinit(sca_port_t *scp)
754 {
755 /*
756 * ... and the receive DMA logic ...
757 */
758 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
759 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
760
761 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
762 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
763
764 /* reset descriptors to initial state */
765 scp->sp_rxstart = 0;
766 scp->sp_rxend = scp->sp_nrxdesc - 1;
767
768 /*
769 * CPB == chain pointer base
770 * CDA == current descriptor address
771 * EDA == error descriptor address (overwrite position)
772 * because cda can't be eda when starting we always
773 * have a single buffer gap between cda and eda
774 */
775 dmac_write_1(scp, SCA_CPB0,
776 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
777 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
778 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
779 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
780
781 /*
782 * enable receiver DMA
783 */
784 dmac_write_1(scp, SCA_DIR0,
785 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
786 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
787 }
788
789 /*
790 * Queue the packet for our start routine to transmit
791 */
792 static int
793 sca_output(
794 struct ifnet *ifp,
795 struct mbuf *m,
796 const struct sockaddr *dst,
797 const struct rtentry *rt0)
798 {
799 struct hdlc_header *hdlc;
800 struct ifqueue *ifq = NULL;
801 int s, error, len;
802 short mflags;
803
804 error = 0;
805
806 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
807 error = ENETDOWN;
808 goto bad;
809 }
810
811 /*
812 * If the queueing discipline needs packet classification,
813 * do it before prepending link headers.
814 */
815 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
816
817 /*
818 * determine address family, and priority for this packet
819 */
820 switch (dst->sa_family) {
821 #ifdef INET
822 case AF_INET:
823 #ifdef SCA_USE_FASTQ
824 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
825 == IPTOS_LOWDELAY)
826 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
827 #endif
828 /*
829 * Add cisco serial line header. If there is no
830 * space in the first mbuf, allocate another.
831 */
832 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
833 if (m == 0)
834 return (ENOBUFS);
835 hdlc = mtod(m, struct hdlc_header *);
836 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
837 break;
838 #endif
839 #ifdef INET6
840 case AF_INET6:
841 /*
842 * Add cisco serial line header. If there is no
843 * space in the first mbuf, allocate another.
844 */
845 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
846 if (m == 0)
847 return (ENOBUFS);
848 hdlc = mtod(m, struct hdlc_header *);
849 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
850 break;
851 #endif
852 default:
853 printf("%s: address family %d unsupported\n",
854 ifp->if_xname, dst->sa_family);
855 error = EAFNOSUPPORT;
856 goto bad;
857 }
858
859 /* finish */
860 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
861 hdlc->h_addr = CISCO_MULTICAST;
862 else
863 hdlc->h_addr = CISCO_UNICAST;
864 hdlc->h_resv = 0;
865
866 /*
867 * queue the packet. If interactive, use the fast queue.
868 */
869 mflags = m->m_flags;
870 len = m->m_pkthdr.len;
871 s = splnet();
872 if (ifq != NULL) {
873 if (IF_QFULL(ifq)) {
874 IF_DROP(ifq);
875 m_freem(m);
876 error = ENOBUFS;
877 } else
878 IF_ENQUEUE(ifq, m);
879 } else
880 IFQ_ENQUEUE(&ifp->if_snd, m, error);
881 if (error != 0) {
882 splx(s);
883 ifp->if_oerrors++;
884 ifp->if_collisions++;
885 return (error);
886 }
887 ifp->if_obytes += len;
888 if (mflags & M_MCAST)
889 ifp->if_omcasts++;
890
891 sca_start(ifp);
892 splx(s);
893
894 return (error);
895
896 bad:
897 if (m)
898 m_freem(m);
899 return (error);
900 }
901
902 static int
903 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
904 {
905 struct ifreq *ifr;
906 struct ifaddr *ifa;
907 int error;
908 int s;
909
910 s = splnet();
911
912 ifr = (struct ifreq *)data;
913 ifa = (struct ifaddr *)data;
914 error = 0;
915
916 switch (cmd) {
917 case SIOCINITIFADDR:
918 switch(ifa->ifa_addr->sa_family) {
919 #ifdef INET
920 case AF_INET:
921 #endif
922 #ifdef INET6
923 case AF_INET6:
924 #endif
925 #if defined(INET) || defined(INET6)
926 ifp->if_flags |= IFF_UP;
927 sca_port_up(ifp->if_softc);
928 break;
929 #endif
930 default:
931 error = EAFNOSUPPORT;
932 break;
933 }
934 break;
935
936 case SIOCSIFDSTADDR:
937 #ifdef INET
938 if (ifa->ifa_addr->sa_family == AF_INET)
939 break;
940 #endif
941 #ifdef INET6
942 if (ifa->ifa_addr->sa_family == AF_INET6)
943 break;
944 #endif
945 error = EAFNOSUPPORT;
946 break;
947
948 case SIOCADDMULTI:
949 case SIOCDELMULTI:
950 /* XXX need multicast group management code */
951 if (ifr == 0) {
952 error = EAFNOSUPPORT; /* XXX */
953 break;
954 }
955 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
956 #ifdef INET
957 case AF_INET:
958 break;
959 #endif
960 #ifdef INET6
961 case AF_INET6:
962 break;
963 #endif
964 default:
965 error = EAFNOSUPPORT;
966 break;
967 }
968 break;
969
970 case SIOCSIFFLAGS:
971 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
972 break;
973 if (ifr->ifr_flags & IFF_UP) {
974 ifp->if_flags |= IFF_UP;
975 sca_port_up(ifp->if_softc);
976 } else {
977 ifp->if_flags &= ~IFF_UP;
978 sca_port_down(ifp->if_softc);
979 }
980
981 break;
982
983 default:
984 error = ifioctl_common(ifp, cmd, data);
985 }
986
987 splx(s);
988 return error;
989 }
990
991 /*
992 * start packet transmission on the interface
993 *
994 * MUST BE CALLED AT splnet()
995 */
996 static void
997 sca_start(struct ifnet *ifp)
998 {
999 sca_port_t *scp = ifp->if_softc;
1000 struct sca_softc *sc = scp->sca;
1001 struct mbuf *m, *mb_head;
1002 sca_desc_t *desc;
1003 u_int8_t *buf, stat;
1004 u_int32_t buf_p;
1005 int nexttx;
1006 int trigger_xmit;
1007 u_int len;
1008
1009 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1010
1011 /*
1012 * can't queue when we are full or transmitter is busy
1013 */
1014 #ifdef oldcode
1015 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1016 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1017 return;
1018 #else
1019 if (scp->sp_txinuse
1020 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1021 return;
1022 #endif
1023 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1024
1025 /*
1026 * XXX assume that all tx desc and bufs in same page
1027 */
1028 if (sc->sc_usedma)
1029 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1030 0, sc->scu_allocsize,
1031 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1032 else {
1033 sc->scu_page_on(sc);
1034 sc->scu_set_page(sc, scp->sp_txdesc_p);
1035 }
1036
1037 trigger_xmit = 0;
1038
1039 txloop:
1040 IF_DEQUEUE(&scp->linkq, mb_head);
1041 if (mb_head == NULL)
1042 #ifdef SCA_USE_FASTQ
1043 IF_DEQUEUE(&scp->fastq, mb_head);
1044 if (mb_head == NULL)
1045 #endif
1046 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1047 if (mb_head == NULL)
1048 goto start_xmit;
1049
1050 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1051 #ifdef oldcode
1052 if (scp->txinuse != 0) {
1053 /* Kill EOT interrupts on the previous descriptor. */
1054 desc = &scp->sp_txdesc[scp->txcur];
1055 stat = sca_desc_read_stat(sc, desc);
1056 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1057
1058 /* Figure out what the next free descriptor is. */
1059 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1060 } else
1061 nexttx = 0;
1062 #endif /* oldcode */
1063
1064 if (scp->sp_txinuse)
1065 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1066 else
1067 nexttx = 0;
1068
1069 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1070
1071 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1072 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1073
1074 /* XXX hoping we can delay the desc write till after we don't drop. */
1075 desc = &scp->sp_txdesc[nexttx];
1076
1077 /* XXX isn't this set already?? */
1078 sca_desc_write_bufp(sc, desc, buf_p);
1079 len = 0;
1080
1081 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1082
1083 #if 0 /* uncomment this for a core in cc1 */
1084 X
1085 #endif
1086 /*
1087 * Run through the chain, copying data into the descriptor as we
1088 * go. If it won't fit in one transmission block, drop the packet.
1089 * No, this isn't nice, but most of the time it _will_ fit.
1090 */
1091 for (m = mb_head ; m != NULL ; m = m->m_next) {
1092 if (m->m_len != 0) {
1093 len += m->m_len;
1094 if (len > SCA_BSIZE) {
1095 m_freem(mb_head);
1096 goto txloop;
1097 }
1098 SCA_DPRINTF(SCA_DEBUG_TX,
1099 ("TX: about to mbuf len %d\n", m->m_len));
1100
1101 if (sc->sc_usedma)
1102 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1103 else
1104 bus_space_write_region_1(sc->scu_memt,
1105 sc->scu_memh, sca_page_addr(sc, buf_p),
1106 mtod(m, u_int8_t *), m->m_len);
1107 buf += m->m_len;
1108 buf_p += m->m_len;
1109 }
1110 }
1111
1112 /* set the buffer, the length, and mark end of frame and end of xfer */
1113 sca_desc_write_buflen(sc, desc, len);
1114 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1115
1116 ifp->if_opackets++;
1117
1118 /*
1119 * Pass packet to bpf if there is a listener.
1120 */
1121 bpf_mtap(ifp, mb_head, BPF_D_OUT);
1122
1123 m_freem(mb_head);
1124
1125 scp->sp_txcur = nexttx;
1126 scp->sp_txinuse++;
1127 trigger_xmit = 1;
1128
1129 SCA_DPRINTF(SCA_DEBUG_TX,
1130 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1131
1132 /*
1133 * XXX so didn't this used to limit us to 1?! - multi may be untested
1134 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1135 * to find bug
1136 */
1137 #ifdef oldcode
1138 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1139 #endif
1140 if (scp->sp_txinuse < scp->sp_ntxdesc)
1141 goto txloop;
1142
1143 start_xmit:
1144 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1145
1146 if (trigger_xmit != 0) {
1147 /* set EOT on final descriptor */
1148 desc = &scp->sp_txdesc[scp->sp_txcur];
1149 stat = sca_desc_read_stat(sc, desc);
1150 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1151 }
1152
1153 if (sc->sc_usedma)
1154 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1155 sc->scu_allocsize,
1156 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1157
1158 if (trigger_xmit != 0)
1159 sca_port_starttx(scp);
1160
1161 if (!sc->sc_usedma)
1162 sc->scu_page_off(sc);
1163 }
1164
1165 static void
1166 sca_watchdog(struct ifnet *ifp)
1167 {
1168 }
1169
1170 int
1171 sca_hardintr(struct sca_softc *sc)
1172 {
1173 u_int8_t isr0, isr1, isr2;
1174 int ret;
1175
1176 ret = 0; /* non-zero means we processed at least one interrupt */
1177
1178 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1179
1180 while (1) {
1181 /*
1182 * read SCA interrupts
1183 */
1184 isr0 = sca_read_1(sc, SCA_ISR0);
1185 isr1 = sca_read_1(sc, SCA_ISR1);
1186 isr2 = sca_read_1(sc, SCA_ISR2);
1187
1188 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1189 break;
1190
1191 SCA_DPRINTF(SCA_DEBUG_INTR,
1192 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1193 isr0, isr1, isr2));
1194
1195 /*
1196 * check DMAC interrupt
1197 */
1198 if (isr1 & 0x0f)
1199 ret += sca_dmac_intr(&sc->sc_ports[0],
1200 isr1 & 0x0f);
1201
1202 if (isr1 & 0xf0)
1203 ret += sca_dmac_intr(&sc->sc_ports[1],
1204 (isr1 & 0xf0) >> 4);
1205
1206 /*
1207 * mcsi intterupts
1208 */
1209 if (isr0 & 0x0f)
1210 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1211
1212 if (isr0 & 0xf0)
1213 ret += sca_msci_intr(&sc->sc_ports[1],
1214 (isr0 & 0xf0) >> 4);
1215
1216 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1217 if (isr2)
1218 ret += sca_timer_intr(sc, isr2);
1219 #endif
1220 }
1221
1222 return (ret);
1223 }
1224
1225 static int
1226 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1227 {
1228 u_int8_t dsr;
1229 int ret;
1230
1231 ret = 0;
1232
1233 /*
1234 * Check transmit channel
1235 */
1236 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1237 SCA_DPRINTF(SCA_DEBUG_INTR,
1238 ("TX INTERRUPT port %d\n", scp->sp_port));
1239
1240 dsr = 1;
1241 while (dsr != 0) {
1242 ret++;
1243 /*
1244 * reset interrupt
1245 */
1246 dsr = dmac_read_1(scp, SCA_DSR1);
1247 dmac_write_1(scp, SCA_DSR1,
1248 dsr | SCA_DSR_DEWD);
1249
1250 /*
1251 * filter out the bits we don't care about
1252 */
1253 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1254 if (dsr == 0)
1255 break;
1256
1257 /*
1258 * check for counter overflow
1259 */
1260 if (dsr & SCA_DSR_COF) {
1261 printf("%s: TXDMA counter overflow\n",
1262 scp->sp_if.if_xname);
1263
1264 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1265 scp->sp_txcur = 0;
1266 scp->sp_txinuse = 0;
1267 }
1268
1269 /*
1270 * check for buffer overflow
1271 */
1272 if (dsr & SCA_DSR_BOF) {
1273 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1274 scp->sp_if.if_xname,
1275 dmac_read_2(scp, SCA_CDAL1),
1276 dmac_read_2(scp, SCA_EDAL1),
1277 dmac_read_1(scp, SCA_CPB1));
1278
1279 /*
1280 * Yikes. Arrange for a full
1281 * transmitter restart.
1282 */
1283 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1284 scp->sp_txcur = 0;
1285 scp->sp_txinuse = 0;
1286 }
1287
1288 /*
1289 * check for end of transfer, which is not
1290 * an error. It means that all data queued
1291 * was transmitted, and we mark ourself as
1292 * not in use and stop the watchdog timer.
1293 */
1294 if (dsr & SCA_DSR_EOT) {
1295 SCA_DPRINTF(SCA_DEBUG_TX,
1296 ("Transmit completed. cda %x eda %x dsr %x\n",
1297 dmac_read_2(scp, SCA_CDAL1),
1298 dmac_read_2(scp, SCA_EDAL1),
1299 dsr));
1300
1301 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1302 scp->sp_txcur = 0;
1303 scp->sp_txinuse = 0;
1304
1305 /*
1306 * check for more packets
1307 */
1308 if_schedule_deferred_start(&scp->sp_if);
1309 }
1310 }
1311 }
1312 /*
1313 * receive channel check
1314 */
1315 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1316 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1317 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1318
1319 dsr = 1;
1320 while (dsr != 0) {
1321 ret++;
1322
1323 dsr = dmac_read_1(scp, SCA_DSR0);
1324 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1325
1326 /*
1327 * filter out the bits we don't care about
1328 */
1329 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1330 | SCA_DSR_BOF | SCA_DSR_EOT);
1331 if (dsr == 0)
1332 break;
1333
1334 /*
1335 * End of frame
1336 */
1337 if (dsr & SCA_DSR_EOM) {
1338 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1339
1340 sca_get_packets(scp);
1341 }
1342
1343 /*
1344 * check for counter overflow
1345 */
1346 if (dsr & SCA_DSR_COF) {
1347 printf("%s: RXDMA counter overflow\n",
1348 scp->sp_if.if_xname);
1349
1350 sca_dmac_rxinit(scp);
1351 }
1352
1353 /*
1354 * check for end of transfer, which means we
1355 * ran out of descriptors to receive into.
1356 * This means the line is much faster than
1357 * we can handle.
1358 */
1359 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1360 printf("%s: RXDMA buffer overflow\n",
1361 scp->sp_if.if_xname);
1362
1363 sca_dmac_rxinit(scp);
1364 }
1365 }
1366 }
1367
1368 return ret;
1369 }
1370
1371 static int
1372 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1373 {
1374 u_int8_t st1, trc0;
1375
1376 /* get and clear the specific interrupt -- should act on it :)*/
1377 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1378 /* clear the interrupt */
1379 msci_write_1(scp, SCA_ST10, st1);
1380
1381 if (st1 & SCA_ST1_UDRN) {
1382 /* underrun -- try to increase ready control */
1383 trc0 = msci_read_1(scp, SCA_TRC00);
1384 if (trc0 == 0x1f)
1385 printf("TX: underrun - fifo depth maxed\n");
1386 else {
1387 if ((trc0 += 2) > 0x1f)
1388 trc0 = 0x1f;
1389 SCA_DPRINTF(SCA_DEBUG_TX,
1390 ("TX: udrn - incr fifo to %d\n", trc0));
1391 msci_write_1(scp, SCA_TRC00, trc0);
1392 }
1393 }
1394 }
1395 return (0);
1396 }
1397
1398 static void
1399 sca_get_packets(sca_port_t *scp)
1400 {
1401 struct sca_softc *sc;
1402
1403 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1404
1405 sc = scp->sca;
1406 if (sc->sc_usedma)
1407 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1408 0, sc->scu_allocsize,
1409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1410 else {
1411 /*
1412 * XXX this code is unable to deal with rx stuff
1413 * in more than 1 page
1414 */
1415 sc->scu_page_on(sc);
1416 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1417 }
1418
1419 /* process as many frames as are available */
1420 while (sca_frame_avail(scp)) {
1421 sca_frame_process(scp);
1422 sca_frame_read_done(scp);
1423 }
1424
1425 if (sc->sc_usedma)
1426 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1427 0, sc->scu_allocsize,
1428 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1429 else
1430 sc->scu_page_off(sc);
1431 }
1432
1433 /*
1434 * Starting with the first descriptor we wanted to read into, up to but
1435 * not including the current SCA read descriptor, look for a packet.
1436 *
1437 * must be called at splnet()
1438 */
1439 static int
1440 sca_frame_avail(sca_port_t *scp)
1441 {
1442 u_int16_t cda;
1443 u_int32_t desc_p; /* physical address (lower 16 bits) */
1444 sca_desc_t *desc;
1445 u_int8_t rxstat;
1446 int cdaidx, toolong;
1447
1448 /*
1449 * Read the current descriptor from the SCA.
1450 */
1451 cda = dmac_read_2(scp, SCA_CDAL0);
1452
1453 /*
1454 * calculate the index of the current descriptor
1455 */
1456 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1457 desc_p = cda - desc_p;
1458 cdaidx = desc_p / sizeof(sca_desc_t);
1459
1460 SCA_DPRINTF(SCA_DEBUG_RX,
1461 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1462 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1463
1464 /* note confusion */
1465 if (cdaidx >= scp->sp_nrxdesc)
1466 panic("current descriptor index out of range");
1467
1468 /* see if we have a valid frame available */
1469 toolong = 0;
1470 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1471 /*
1472 * We might have a valid descriptor. Set up a pointer
1473 * to the kva address for it so we can more easily examine
1474 * the contents.
1475 */
1476 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1477 rxstat = sca_desc_read_stat(scp->sca, desc);
1478
1479 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1480 scp->sp_port, scp->sp_rxstart, rxstat));
1481
1482 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1483 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1484
1485 /*
1486 * check for errors
1487 */
1488 if (rxstat & SCA_DESC_ERRORS) {
1489 /*
1490 * consider an error condition the end
1491 * of a frame
1492 */
1493 scp->sp_if.if_ierrors++;
1494 toolong = 0;
1495 continue;
1496 }
1497
1498 /*
1499 * if we aren't skipping overlong frames
1500 * we are done, otherwise reset and look for
1501 * another good frame
1502 */
1503 if (rxstat & SCA_DESC_EOM) {
1504 if (!toolong)
1505 return (1);
1506 toolong = 0;
1507 } else if (!toolong) {
1508 /*
1509 * we currently don't deal with frames
1510 * larger than a single buffer (fixed MTU)
1511 */
1512 scp->sp_if.if_ierrors++;
1513 toolong = 1;
1514 }
1515 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1516 scp->sp_rxstart));
1517 }
1518
1519 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1520 return 0;
1521 }
1522
1523 /*
1524 * Pass the packet up to the kernel if it is a packet we want to pay
1525 * attention to.
1526 *
1527 * MUST BE CALLED AT splnet()
1528 */
1529 static void
1530 sca_frame_process(sca_port_t *scp)
1531 {
1532 pktqueue_t *pktq = NULL;
1533 struct ifqueue *ifq = NULL;
1534 struct hdlc_header *hdlc;
1535 struct cisco_pkt *cisco;
1536 sca_desc_t *desc;
1537 struct mbuf *m;
1538 u_int8_t *bufp;
1539 u_int16_t len;
1540 u_int32_t t;
1541 int isr = 0;
1542
1543 t = time_uptime * 1000;
1544 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1545 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1546 len = sca_desc_read_buflen(scp->sca, desc);
1547
1548 SCA_DPRINTF(SCA_DEBUG_RX,
1549 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1550 (bus_addr_t)bufp, len));
1551
1552 #if SCA_DEBUG_LEVEL > 0
1553 if (sca_debug & SCA_DEBUG_RXPKT)
1554 sca_frame_print(scp, desc, bufp);
1555 #endif
1556 /*
1557 * skip packets that are too short
1558 */
1559 if (len < sizeof(struct hdlc_header)) {
1560 scp->sp_if.if_ierrors++;
1561 return;
1562 }
1563
1564 m = sca_mbuf_alloc(scp->sca, bufp, len);
1565 if (m == NULL) {
1566 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1567 return;
1568 }
1569
1570 /*
1571 * read and then strip off the HDLC information
1572 */
1573 m = m_pullup(m, sizeof(struct hdlc_header));
1574 if (m == NULL) {
1575 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1576 return;
1577 }
1578
1579 bpf_mtap_softint(&scp->sp_if, m);
1580
1581 scp->sp_if.if_ipackets++;
1582
1583 hdlc = mtod(m, struct hdlc_header *);
1584 switch (ntohs(hdlc->h_proto)) {
1585 #ifdef INET
1586 case HDLC_PROTOCOL_IP:
1587 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1588 m_set_rcvif(m, &scp->sp_if);
1589 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1590 m->m_data += sizeof(struct hdlc_header);
1591 m->m_len -= sizeof(struct hdlc_header);
1592 pktq = ip_pktq;
1593 break;
1594 #endif /* INET */
1595 #ifdef INET6
1596 case HDLC_PROTOCOL_IPV6:
1597 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1598 m_set_rcvif(m, &scp->sp_if);
1599 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1600 m->m_data += sizeof(struct hdlc_header);
1601 m->m_len -= sizeof(struct hdlc_header);
1602 pktq = ip6_pktq;
1603 break;
1604 #endif /* INET6 */
1605 case CISCO_KEEPALIVE:
1606 SCA_DPRINTF(SCA_DEBUG_CISCO,
1607 ("Received CISCO keepalive packet\n"));
1608
1609 if (len < CISCO_PKT_LEN) {
1610 SCA_DPRINTF(SCA_DEBUG_CISCO,
1611 ("short CISCO packet %d, wanted %d\n",
1612 len, CISCO_PKT_LEN));
1613 scp->sp_if.if_ierrors++;
1614 goto dropit;
1615 }
1616
1617 m = m_pullup(m, sizeof(struct cisco_pkt));
1618 if (m == NULL) {
1619 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1620 return;
1621 }
1622
1623 cisco = (struct cisco_pkt *)
1624 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1625 m_set_rcvif(m, &scp->sp_if);
1626
1627 switch (ntohl(cisco->type)) {
1628 case CISCO_ADDR_REQ:
1629 printf("Got CISCO addr_req, ignoring\n");
1630 scp->sp_if.if_ierrors++;
1631 goto dropit;
1632
1633 case CISCO_ADDR_REPLY:
1634 printf("Got CISCO addr_reply, ignoring\n");
1635 scp->sp_if.if_ierrors++;
1636 goto dropit;
1637
1638 case CISCO_KEEPALIVE_REQ:
1639
1640 SCA_DPRINTF(SCA_DEBUG_CISCO,
1641 ("Received KA, mseq %d,"
1642 " yseq %d, rel 0x%04x, t0"
1643 " %04x, t1 %04x\n",
1644 ntohl(cisco->par1), ntohl(cisco->par2),
1645 ntohs(cisco->rel), ntohs(cisco->time0),
1646 ntohs(cisco->time1)));
1647
1648 scp->cka_lastrx = ntohl(cisco->par1);
1649 scp->cka_lasttx++;
1650
1651 /*
1652 * schedule the transmit right here.
1653 */
1654 cisco->par2 = cisco->par1;
1655 cisco->par1 = htonl(scp->cka_lasttx);
1656 cisco->time0 = htons((u_int16_t)(t >> 16));
1657 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1658
1659 ifq = &scp->linkq;
1660 if (IF_QFULL(ifq)) {
1661 IF_DROP(ifq);
1662 goto dropit;
1663 }
1664 IF_ENQUEUE(ifq, m);
1665
1666 sca_start(&scp->sp_if);
1667
1668 /* since start may have reset this fix */
1669 if (!scp->sca->sc_usedma) {
1670 scp->sca->scu_set_page(scp->sca,
1671 scp->sp_rxdesc_p);
1672 scp->sca->scu_page_on(scp->sca);
1673 }
1674 return;
1675 default:
1676 SCA_DPRINTF(SCA_DEBUG_CISCO,
1677 ("Unknown CISCO keepalive protocol 0x%04x\n",
1678 ntohl(cisco->type)));
1679
1680 scp->sp_if.if_noproto++;
1681 goto dropit;
1682 }
1683 return;
1684 default:
1685 SCA_DPRINTF(SCA_DEBUG_RX,
1686 ("Unknown/unexpected ethertype 0x%04x\n",
1687 ntohs(hdlc->h_proto)));
1688 scp->sp_if.if_noproto++;
1689 goto dropit;
1690 }
1691
1692 /* Queue the packet */
1693 if (__predict_true(pktq)) {
1694 if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1695 scp->sp_if.if_iqdrops++;
1696 goto dropit;
1697 }
1698 return;
1699 }
1700 if (!IF_QFULL(ifq)) {
1701 IF_ENQUEUE(ifq, m);
1702 schednetisr(isr);
1703 } else {
1704 IF_DROP(ifq);
1705 scp->sp_if.if_iqdrops++;
1706 goto dropit;
1707 }
1708 return;
1709 dropit:
1710 if (m)
1711 m_freem(m);
1712 return;
1713 }
1714
1715 #if SCA_DEBUG_LEVEL > 0
1716 /*
1717 * do a hex dump of the packet received into descriptor "desc" with
1718 * data buffer "p"
1719 */
1720 static void
1721 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1722 {
1723 int i;
1724 int nothing_yet = 1;
1725 struct sca_softc *sc;
1726 u_int len;
1727
1728 sc = scp->sca;
1729 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1730 desc,
1731 sca_desc_read_chainp(sc, desc),
1732 sca_desc_read_bufp(sc, desc),
1733 sca_desc_read_stat(sc, desc),
1734 (len = sca_desc_read_buflen(sc, desc)));
1735
1736 for (i = 0 ; i < len && i < 256; i++) {
1737 if (nothing_yet == 1 &&
1738 (sc->sc_usedma ? *p
1739 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1740 sca_page_addr(sc, p))) == 0) {
1741 p++;
1742 continue;
1743 }
1744 nothing_yet = 0;
1745 if (i % 16 == 0)
1746 printf("\n");
1747 printf("%02x ",
1748 (sc->sc_usedma ? *p
1749 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1750 sca_page_addr(sc, p))));
1751 p++;
1752 }
1753
1754 if (i % 16 != 1)
1755 printf("\n");
1756 }
1757 #endif
1758
1759 /*
1760 * adjust things because we have just read the current starting
1761 * frame
1762 *
1763 * must be called at splnet()
1764 */
1765 static void
1766 sca_frame_read_done(sca_port_t *scp)
1767 {
1768 u_int16_t edesc_p;
1769
1770 /* update where our indicies are */
1771 scp->sp_rxend = scp->sp_rxstart;
1772 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1773
1774 /* update the error [end] descriptor */
1775 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1776 (sizeof(sca_desc_t) * scp->sp_rxend);
1777 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1778 }
1779
1780 /*
1781 * set a port to the "up" state
1782 */
1783 static void
1784 sca_port_up(sca_port_t *scp)
1785 {
1786 struct sca_softc *sc = scp->sca;
1787 struct timeval now;
1788 #if 0
1789 u_int8_t ier0, ier1;
1790 #endif
1791
1792 /*
1793 * reset things
1794 */
1795 #if 0
1796 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1797 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1798 #endif
1799 /*
1800 * clear in-use flag
1801 */
1802 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1803 scp->sp_if.if_flags |= IFF_RUNNING;
1804
1805 /*
1806 * raise DTR
1807 */
1808 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1809
1810 /*
1811 * raise RTS
1812 */
1813 msci_write_1(scp, SCA_CTL0,
1814 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1815 | SCA_CTL_RTS_HIGH);
1816
1817 #if 0
1818 /*
1819 * enable interrupts (no timer IER2)
1820 */
1821 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1822 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1823 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1824 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1825 if (scp->sp_port == 1) {
1826 ier0 <<= 4;
1827 ier1 <<= 4;
1828 }
1829 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1830 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1831 #else
1832 if (scp->sp_port == 0) {
1833 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1834 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1835 } else {
1836 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1837 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1838 }
1839 #endif
1840
1841 /*
1842 * enable transmit and receive
1843 */
1844 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1845 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1846
1847 /*
1848 * reset internal state
1849 */
1850 scp->sp_txinuse = 0;
1851 scp->sp_txcur = 0;
1852 getmicrotime(&now);
1853 scp->cka_lasttx = now.tv_usec;
1854 scp->cka_lastrx = 0;
1855 }
1856
1857 /*
1858 * set a port to the "down" state
1859 */
1860 static void
1861 sca_port_down(sca_port_t *scp)
1862 {
1863 struct sca_softc *sc = scp->sca;
1864 #if 0
1865 u_int8_t ier0, ier1;
1866 #endif
1867
1868 /*
1869 * lower DTR
1870 */
1871 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1872
1873 /*
1874 * lower RTS
1875 */
1876 msci_write_1(scp, SCA_CTL0,
1877 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1878 | SCA_CTL_RTS_LOW);
1879
1880 /*
1881 * disable interrupts
1882 */
1883 #if 0
1884 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1885 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1886 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1887 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1888 if (scp->sp_port == 1) {
1889 ier0 <<= 4;
1890 ier1 <<= 4;
1891 }
1892 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1893 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1894 #else
1895 if (scp->sp_port == 0) {
1896 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1897 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1898 } else {
1899 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1900 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1901 }
1902 #endif
1903
1904 /*
1905 * disable transmit and receive
1906 */
1907 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1908 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1909
1910 /*
1911 * no, we're not in use anymore
1912 */
1913 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1914 }
1915
1916 /*
1917 * disable all DMA and interrupts for all ports at once.
1918 */
1919 void
1920 sca_shutdown(struct sca_softc *sca)
1921 {
1922 /*
1923 * disable DMA and interrupts
1924 */
1925 sca_write_1(sca, SCA_DMER, 0);
1926 sca_write_1(sca, SCA_IER0, 0);
1927 sca_write_1(sca, SCA_IER1, 0);
1928 }
1929
1930 /*
1931 * If there are packets to transmit, start the transmit DMA logic.
1932 */
1933 static void
1934 sca_port_starttx(sca_port_t *scp)
1935 {
1936 u_int32_t startdesc_p, enddesc_p;
1937 int enddesc;
1938
1939 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1940
1941 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1942 || scp->sp_txinuse == 0)
1943 return;
1944
1945 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1946
1947 scp->sp_if.if_flags |= IFF_OACTIVE;
1948
1949 /*
1950 * We have something to do, since we have at least one packet
1951 * waiting, and we are not already marked as active.
1952 */
1953 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1954 startdesc_p = scp->sp_txdesc_p;
1955 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1956
1957 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1958 startdesc_p, enddesc_p));
1959
1960 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1961 dmac_write_2(scp, SCA_CDAL1,
1962 (u_int16_t)(startdesc_p & 0x0000ffff));
1963
1964 /*
1965 * enable the DMA
1966 */
1967 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1968 }
1969
1970 /*
1971 * allocate an mbuf at least long enough to hold "len" bytes.
1972 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1973 * otherwise let the caller handle copying the data in.
1974 */
1975 static struct mbuf *
1976 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1977 {
1978 struct mbuf *m;
1979
1980 /*
1981 * allocate an mbuf and copy the important bits of data
1982 * into it. If the packet won't fit in the header,
1983 * allocate a cluster for it and store it there.
1984 */
1985 MGETHDR(m, M_DONTWAIT, MT_DATA);
1986 if (m == NULL)
1987 return NULL;
1988 if (len > MHLEN) {
1989 if (len > MCLBYTES) {
1990 m_freem(m);
1991 return NULL;
1992 }
1993 MCLGET(m, M_DONTWAIT);
1994 if ((m->m_flags & M_EXT) == 0) {
1995 m_freem(m);
1996 return NULL;
1997 }
1998 }
1999 if (p != NULL) {
2000 /* XXX do we need to sync here? */
2001 if (sc->sc_usedma)
2002 memcpy(mtod(m, void *), p, len);
2003 else
2004 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2005 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2006 }
2007 m->m_len = len;
2008 m->m_pkthdr.len = len;
2009
2010 return (m);
2011 }
2012
2013 /*
2014 * get the base clock
2015 */
2016 void
2017 sca_get_base_clock(struct sca_softc *sc)
2018 {
2019 struct timeval btv, ctv, dtv;
2020 u_int64_t bcnt;
2021 u_int32_t cnt;
2022 u_int16_t subcnt;
2023
2024 /* disable the timer, set prescale to 0 */
2025 sca_write_1(sc, SCA_TCSR0, 0);
2026 sca_write_1(sc, SCA_TEPR0, 0);
2027
2028 /* reset the counter */
2029 (void)sca_read_1(sc, SCA_TCSR0);
2030 subcnt = sca_read_2(sc, SCA_TCNTL0);
2031
2032 /* count to max */
2033 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2034
2035 cnt = 0;
2036 microtime(&btv);
2037 /* start the timer -- no interrupt enable */
2038 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2039 for (;;) {
2040 microtime(&ctv);
2041
2042 /* end around 3/4 of a second */
2043 timersub(&ctv, &btv, &dtv);
2044 if (dtv.tv_usec >= 750000)
2045 break;
2046
2047 /* spin */
2048 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2049 ;
2050 /* reset the timer */
2051 (void)sca_read_2(sc, SCA_TCNTL0);
2052 cnt++;
2053 }
2054
2055 /* stop the timer */
2056 sca_write_1(sc, SCA_TCSR0, 0);
2057
2058 subcnt = sca_read_2(sc, SCA_TCNTL0);
2059 /* add the slop in and get the total timer ticks */
2060 cnt = (cnt << 16) | subcnt;
2061
2062 /* cnt is 1/8 the actual time */
2063 bcnt = cnt * 8;
2064 /* make it proportional to 3/4 of a second */
2065 bcnt *= (u_int64_t)750000;
2066 bcnt /= (u_int64_t)dtv.tv_usec;
2067 cnt = bcnt;
2068
2069 /* make it Hz */
2070 cnt *= 4;
2071 cnt /= 3;
2072
2073 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2074 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2075
2076 /*
2077 * round to the nearest 200 -- this allows for +-3 ticks error
2078 */
2079 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2080 }
2081
2082 /*
2083 * print the information about the clock on the ports
2084 */
2085 void
2086 sca_print_clock_info(struct sca_softc *sc)
2087 {
2088 struct sca_port *scp;
2089 u_int32_t mhz, div;
2090 int i;
2091
2092 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2093 sc->sc_baseclock);
2094
2095 /* print the information about the port clock selection */
2096 for (i = 0; i < sc->sc_numports; i++) {
2097 scp = &sc->sc_ports[i];
2098 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2099 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2100
2101 printf("%s: rx clock: ", scp->sp_if.if_xname);
2102 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2103 case SCA_RXS_CLK_LINE:
2104 printf("line");
2105 break;
2106 case SCA_RXS_CLK_LINE_SN:
2107 printf("line with noise suppression");
2108 break;
2109 case SCA_RXS_CLK_INTERNAL:
2110 printf("internal %d Hz", (mhz >> div));
2111 break;
2112 case SCA_RXS_CLK_ADPLL_OUT:
2113 printf("adpll using internal %d Hz", (mhz >> div));
2114 break;
2115 case SCA_RXS_CLK_ADPLL_IN:
2116 printf("adpll using line clock");
2117 break;
2118 }
2119 printf(" tx clock: ");
2120 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2121 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2122 case SCA_TXS_CLK_LINE:
2123 printf("line\n");
2124 break;
2125 case SCA_TXS_CLK_INTERNAL:
2126 printf("internal %d Hz\n", (mhz >> div));
2127 break;
2128 case SCA_TXS_CLK_RXCLK:
2129 printf("rxclock\n");
2130 break;
2131 }
2132 if (scp->sp_eclock)
2133 printf("%s: outputting line clock\n",
2134 scp->sp_if.if_xname);
2135 }
2136 }
2137
2138