hd64570.c revision 1.13 1 /* $NetBSD: hd64570.c,v 1.13 2000/12/18 20:32:08 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using dma. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include "bpfilter.h"
68 #include "opt_inet.h"
69 #include "opt_iso.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/device.h>
74 #include <sys/mbuf.h>
75 #include <sys/socket.h>
76 #include <sys/sockio.h>
77 #include <sys/kernel.h>
78
79 #include <net/if.h>
80 #include <net/if_types.h>
81 #include <net/netisr.h>
82
83 #ifdef INET
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip.h>
88 #endif
89
90 #ifdef ISO
91 #include <net/if_llc.h>
92 #include <netiso/iso.h>
93 #include <netiso/iso_var.h>
94 #endif
95
96 #if NBPFILTER > 0
97 #include <net/bpf.h>
98 #endif
99
100 #include <machine/cpu.h>
101 #include <machine/bus.h>
102 #include <machine/intr.h>
103
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcidevs.h>
107
108 #include <dev/ic/hd64570reg.h>
109 #include <dev/ic/hd64570var.h>
110
111 #define SCA_DEBUG_RX 0x0001
112 #define SCA_DEBUG_TX 0x0002
113 #define SCA_DEBUG_CISCO 0x0004
114 #define SCA_DEBUG_DMA 0x0008
115 #define SCA_DEBUG_RXPKT 0x0010
116 #define SCA_DEBUG_TXPKT 0x0020
117 #define SCA_DEBUG_INTR 0x0040
118 #define SCA_DEBUG_CLOCK 0x0080
119
120 #if 0
121 #define SCA_DEBUG_LEVEL ( 0xFFFF )
122 #else
123 #define SCA_DEBUG_LEVEL 0
124 #endif
125
126 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
127
128 #if SCA_DEBUG_LEVEL > 0
129 #define SCA_DPRINTF(l, x) do { \
130 if ((l) & sca_debug) \
131 printf x;\
132 } while (0)
133 #else
134 #define SCA_DPRINTF(l, x)
135 #endif
136
137 #if 0
138 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
139 #endif
140
141 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
142 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
143
144 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
145 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
146 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
147 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
148
149 static void sca_msci_init(struct sca_softc *, sca_port_t *);
150 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
151 static void sca_dmac_rxinit(sca_port_t *);
152
153 static int sca_dmac_intr(sca_port_t *, u_int8_t);
154 static int sca_msci_intr(sca_port_t *, u_int8_t);
155
156 static void sca_get_packets(sca_port_t *);
157 static int sca_frame_avail(sca_port_t *);
158 static void sca_frame_process(sca_port_t *);
159 static void sca_frame_read_done(sca_port_t *);
160
161 static void sca_port_starttx(sca_port_t *);
162
163 static void sca_port_up(sca_port_t *);
164 static void sca_port_down(sca_port_t *);
165
166 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
167 struct rtentry *));
168 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
169 static void sca_start __P((struct ifnet *));
170 static void sca_watchdog __P((struct ifnet *));
171
172 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
173
174 #if SCA_DEBUG_LEVEL > 0
175 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
176 #endif
177
178
179 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
180 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
181 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
182 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
183
184 #define sca_page_addr(sc, addr) ((bus_addr_t)(addr) & (sc)->scu_pagemask)
185
186 static inline void
187 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
188 {
189 sca_write_1(scp->sca, scp->msci_off + reg, val);
190 }
191
192 static inline u_int8_t
193 msci_read_1(sca_port_t *scp, u_int reg)
194 {
195 return sca_read_1(scp->sca, scp->msci_off + reg);
196 }
197
198 static inline void
199 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
200 {
201 sca_write_1(scp->sca, scp->dmac_off + reg, val);
202 }
203
204 static inline void
205 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
206 {
207 sca_write_2(scp->sca, scp->dmac_off + reg, val);
208 }
209
210 static inline u_int8_t
211 dmac_read_1(sca_port_t *scp, u_int reg)
212 {
213 return sca_read_1(scp->sca, scp->dmac_off + reg);
214 }
215
216 static inline u_int16_t
217 dmac_read_2(sca_port_t *scp, u_int reg)
218 {
219 return sca_read_2(scp->sca, scp->dmac_off + reg);
220 }
221
222 /*
223 * read the chain pointer
224 */
225 static inline u_int16_t
226 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
227 {
228 if (sc->sc_usedma)
229 return ((dp)->sd_chainp);
230 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
231 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
232 }
233
234 /*
235 * write the chain pointer
236 */
237 static inline void
238 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
239 {
240 if (sc->sc_usedma)
241 (dp)->sd_chainp = cp;
242 else
243 bus_space_write_2(sc->scu_memt, sc->scu_memh,
244 sca_page_addr(sc, dp)
245 + offsetof(struct sca_desc, sd_chainp), cp);
246 }
247
248 /*
249 * read the buffer pointer
250 */
251 static inline u_int32_t
252 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
253 {
254 u_int32_t address;
255
256 if (sc->sc_usedma)
257 address = dp->sd_bufp | dp->sd_hbufp << 16;
258 else {
259 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
260 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
261 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
262 sca_page_addr(sc, dp)
263 + offsetof(struct sca_desc, sd_hbufp)) << 16;
264 }
265 return (address);
266 }
267
268 /*
269 * write the buffer pointer
270 */
271 static inline void
272 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
273 {
274 if (sc->sc_usedma) {
275 dp->sd_bufp = bufp & 0xFFFF;
276 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
277 } else {
278 bus_space_write_2(sc->scu_memt, sc->scu_memh,
279 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
280 bufp & 0xFFFF);
281 bus_space_write_1(sc->scu_memt, sc->scu_memh,
282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
283 (bufp & 0x00FF0000) >> 16);
284 }
285 }
286
287 /*
288 * read the buffer length
289 */
290 static inline u_int16_t
291 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
292 {
293 if (sc->sc_usedma)
294 return ((dp)->sd_buflen);
295 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
296 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
297 }
298
299 /*
300 * write the buffer length
301 */
302 static inline void
303 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
304 {
305 if (sc->sc_usedma)
306 (dp)->sd_buflen = len;
307 else
308 bus_space_write_2(sc->scu_memt, sc->scu_memh,
309 sca_page_addr(sc, dp)
310 + offsetof(struct sca_desc, sd_buflen), len);
311 }
312
313 /*
314 * read the descriptor status
315 */
316 static inline u_int8_t
317 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
318 {
319 if (sc->sc_usedma)
320 return ((dp)->sd_stat);
321 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
322 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
323 }
324
325 /*
326 * write the descriptor status
327 */
328 static inline void
329 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
330 {
331 if (sc->sc_usedma)
332 (dp)->sd_stat = stat;
333 else
334 bus_space_write_1(sc->scu_memt, sc->scu_memh,
335 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
336 stat);
337 }
338
339 void
340 sca_init(struct sca_softc *sc)
341 {
342 /*
343 * Do a little sanity check: check number of ports.
344 */
345 if (sc->sc_numports < 1 || sc->sc_numports > 2)
346 panic("sca can\'t handle more than 2 or less than 1 ports");
347
348 /*
349 * disable DMA and MSCI interrupts
350 */
351 sca_write_1(sc, SCA_DMER, 0);
352 sca_write_1(sc, SCA_IER0, 0);
353 sca_write_1(sc, SCA_IER1, 0);
354 sca_write_1(sc, SCA_IER2, 0);
355
356 /*
357 * configure interrupt system
358 */
359 sca_write_1(sc, SCA_ITCR,
360 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
361 #if 0
362 /* these are for the intrerrupt ack cycle which we don't use */
363 sca_write_1(sc, SCA_IVR, 0x40);
364 sca_write_1(sc, SCA_IMVR, 0x40);
365 #endif
366
367 /*
368 * set wait control register to zero wait states
369 */
370 sca_write_1(sc, SCA_PABR0, 0);
371 sca_write_1(sc, SCA_PABR1, 0);
372 sca_write_1(sc, SCA_WCRL, 0);
373 sca_write_1(sc, SCA_WCRM, 0);
374 sca_write_1(sc, SCA_WCRH, 0);
375
376 /*
377 * disable DMA and reset status
378 */
379 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
380
381 /*
382 * disable transmit DMA for all channels
383 */
384 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
385 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
386 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
387 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
388 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
389 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
390 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
391 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
392
393 /*
394 * enable DMA based on channel enable flags for each channel
395 */
396 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
397
398 /*
399 * Should check to see if the chip is responding, but for now
400 * assume it is.
401 */
402 }
403
404 /*
405 * initialize the port and attach it to the networking layer
406 */
407 void
408 sca_port_attach(struct sca_softc *sc, u_int port)
409 {
410 sca_port_t *scp = &sc->sc_ports[port];
411 struct ifnet *ifp;
412 static u_int ntwo_unit = 0;
413
414 scp->sca = sc; /* point back to the parent */
415
416 scp->sp_port = port;
417
418 if (port == 0) {
419 scp->msci_off = SCA_MSCI_OFF_0;
420 scp->dmac_off = SCA_DMAC_OFF_0;
421 if(sc->sc_parent != NULL)
422 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
423 else
424 ntwo_unit = 0; /* XXX */
425 } else {
426 scp->msci_off = SCA_MSCI_OFF_1;
427 scp->dmac_off = SCA_DMAC_OFF_1;
428 if(sc->sc_parent != NULL)
429 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
430 else
431 ntwo_unit = 1; /* XXX */
432 }
433
434 sca_msci_init(sc, scp);
435 sca_dmac_init(sc, scp);
436
437 /*
438 * attach to the network layer
439 */
440 ifp = &scp->sp_if;
441 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
442 ifp->if_softc = scp;
443 ifp->if_mtu = SCA_MTU;
444 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
445 ifp->if_type = IFT_PTPSERIAL;
446 ifp->if_hdrlen = HDLC_HDRLEN;
447 ifp->if_ioctl = sca_ioctl;
448 ifp->if_output = sca_output;
449 ifp->if_watchdog = sca_watchdog;
450 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
451 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
452 #ifdef SCA_USE_FASTQ
453 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
454 #endif
455 if_attach(ifp);
456
457 #if NBPFILTER > 0
458 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
459 #endif
460
461 if (sc->sc_parent == NULL)
462 printf("%s: port %d\n", ifp->if_xname, port);
463 else
464 printf("%s at %s port %d\n",
465 ifp->if_xname, sc->sc_parent->dv_xname, port);
466
467 /*
468 * reset the last seen times on the cisco keepalive protocol
469 */
470 scp->cka_lasttx = time.tv_usec;
471 scp->cka_lastrx = 0;
472 }
473
474 #if 0
475 /*
476 * returns log2(div), sets 'tmc' for the required freq 'hz'
477 */
478 static u_int8_t
479 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
480 {
481 u_int32_t tmc, div;
482 u_int32_t clock;
483
484 /* clock hz = (chipclock / tmc) / 2^(div); */
485 /*
486 * TD == tmc * 2^(n)
487 *
488 * note:
489 * 1 <= TD <= 256 TD is inc of 1
490 * 2 <= TD <= 512 TD is inc of 2
491 * 4 <= TD <= 1024 TD is inc of 4
492 * ...
493 * 512 <= TD <= 256*512 TD is inc of 512
494 *
495 * so note there are overlaps. We lose prec
496 * as div increases so we wish to minize div.
497 *
498 * basically we want to do
499 *
500 * tmc = chip / hz, but have tmc <= 256
501 */
502
503 /* assume system clock is 9.8304Mhz or 9830400hz */
504 clock = clock = 9830400 >> 1;
505
506 /* round down */
507 div = 0;
508 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
509 clock >>= 1;
510 div++;
511 }
512 if (clock / tmc > hz)
513 tmc++;
514 if (!tmc)
515 tmc = 1;
516
517 if (div > SCA_RXS_DIV_512) {
518 /* set to maximums */
519 div = SCA_RXS_DIV_512;
520 tmc = 0;
521 }
522
523 *tmcp = (tmc & 0xFF); /* 0 == 256 */
524 return (div & 0xFF);
525 }
526 #endif
527
528 /*
529 * initialize the port's MSCI
530 */
531 static void
532 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
533 {
534 /* reset the channel */
535 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
536
537 msci_write_1(scp, SCA_MD00,
538 ( SCA_MD0_CRC_1
539 | SCA_MD0_CRC_CCITT
540 | SCA_MD0_CRC_ENABLE
541 | SCA_MD0_MODE_HDLC));
542 #if 0
543 /* immediately send receive reset so the above takes */
544 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
545 #endif
546
547 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
548 msci_write_1(scp, SCA_MD20,
549 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
550
551 /* be safe and do it again */
552 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
553
554 /* setup underrun and idle control, and initial RTS state */
555 msci_write_1(scp, SCA_CTL0,
556 (SCA_CTL_IDLC_PATTERN
557 | SCA_CTL_UDRNC_AFTER_FCS
558 | SCA_CTL_RTS_LOW));
559
560 /* reset the transmitter */
561 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
562
563 /*
564 * set the clock sources
565 */
566 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
567 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
568 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
569
570 /* set external clock generate as requested */
571 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
572
573 /*
574 * XXX don't pay attention to CTS or CD changes right now. I can't
575 * simulate one, and the transmitter will try to transmit even if
576 * CD isn't there anyway, so nothing bad SHOULD happen.
577 */
578 #if 0
579 msci_write_1(scp, SCA_IE00, 0);
580 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
581 #else
582 /* this would deliver transmitter underrun to ST1/ISR1 */
583 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
584 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
585 #endif
586 msci_write_1(scp, SCA_IE20, 0);
587
588 msci_write_1(scp, SCA_FIE0, 0);
589
590 msci_write_1(scp, SCA_SA00, 0);
591 msci_write_1(scp, SCA_SA10, 0);
592
593 msci_write_1(scp, SCA_IDL0, 0x7e);
594
595 msci_write_1(scp, SCA_RRC0, 0x0e);
596 /* msci_write_1(scp, SCA_TRC00, 0x10); */
597 /*
598 * the correct values here are important for avoiding underruns
599 * for any value less than or equal to TRC0 txrdy is activated
600 * which will start the dmac transfer to the fifo.
601 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma.
602 *
603 * thus if we are using a very fast clock that empties the fifo
604 * quickly, delays in the dmac starting to fill the fifo can
605 * lead to underruns so we want a fairly full fifo to still
606 * cause the dmac to start. for cards with on board ram this
607 * has no effect on system performance. For cards that dma
608 * to/from system memory it will cause more, shorter,
609 * bus accesses rather than fewer longer ones.
610 */
611 msci_write_1(scp, SCA_TRC00, 0x00);
612 msci_write_1(scp, SCA_TRC10, 0x1f);
613 }
614
615 /*
616 * Take the memory for the port and construct two circular linked lists of
617 * descriptors (one tx, one rx) and set the pointers in these descriptors
618 * to point to the buffer space for this port.
619 */
620 static void
621 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
622 {
623 sca_desc_t *desc;
624 u_int32_t desc_p;
625 u_int32_t buf_p;
626 int i;
627
628 if (sc->sc_usedma)
629 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
630 BUS_DMASYNC_PREWRITE);
631 else {
632 /*
633 * XXX assumes that all tx desc and bufs in same page
634 */
635 sc->scu_page_on(sc);
636 sc->scu_set_page(sc, scp->sp_txdesc_p);
637 }
638
639 desc = scp->sp_txdesc;
640 desc_p = scp->sp_txdesc_p;
641 buf_p = scp->sp_txbuf_p;
642 scp->sp_txcur = 0;
643 scp->sp_txinuse = 0;
644
645 #ifdef DEBUG
646 /* make sure that we won't wrap */
647 if ((desc_p & 0xffff0000) !=
648 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
649 panic("sca: tx descriptors cross architecural boundry");
650 if ((buf_p & 0xff000000) !=
651 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
652 panic("sca: tx buffers cross architecural boundry");
653 #endif
654
655 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
656 /*
657 * desc_p points to the physcial address of the NEXT desc
658 */
659 desc_p += sizeof(sca_desc_t);
660
661 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
662 sca_desc_write_bufp(sc, desc, buf_p);
663 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
664 sca_desc_write_stat(sc, desc, 0);
665
666 desc++; /* point to the next descriptor */
667 buf_p += SCA_BSIZE;
668 }
669
670 /*
671 * "heal" the circular list by making the last entry point to the
672 * first.
673 */
674 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
675
676 /*
677 * Now, initialize the transmit DMA logic
678 *
679 * CPB == chain pointer base address
680 */
681 dmac_write_1(scp, SCA_DSR1, 0);
682 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
683 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
684 /* XXX1
685 dmac_write_1(scp, SCA_DIR1,
686 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
687 */
688 dmac_write_1(scp, SCA_DIR1,
689 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
690 dmac_write_1(scp, SCA_CPB1,
691 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
692
693 /*
694 * now, do the same thing for receive descriptors
695 *
696 * XXX assumes that all rx desc and bufs in same page
697 */
698 if (!sc->sc_usedma)
699 sc->scu_set_page(sc, scp->sp_rxdesc_p);
700
701 desc = scp->sp_rxdesc;
702 desc_p = scp->sp_rxdesc_p;
703 buf_p = scp->sp_rxbuf_p;
704
705 #ifdef DEBUG
706 /* make sure that we won't wrap */
707 if ((desc_p & 0xffff0000) !=
708 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
709 panic("sca: rx descriptors cross architecural boundry");
710 if ((buf_p & 0xff000000) !=
711 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
712 panic("sca: rx buffers cross architecural boundry");
713 #endif
714
715 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
716 /*
717 * desc_p points to the physcial address of the NEXT desc
718 */
719 desc_p += sizeof(sca_desc_t);
720
721 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
722 sca_desc_write_bufp(sc, desc, buf_p);
723 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
724 sca_desc_write_buflen(sc, desc, 0);
725 sca_desc_write_stat(sc, desc, 0);
726
727 desc++; /* point to the next descriptor */
728 buf_p += SCA_BSIZE;
729 }
730
731 /*
732 * "heal" the circular list by making the last entry point to the
733 * first.
734 */
735 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
736
737 sca_dmac_rxinit(scp);
738
739 if (sc->sc_usedma)
740 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
741 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
742 else
743 sc->scu_page_off(sc);
744 }
745
746 /*
747 * reset and reinitialize the receive DMA logic
748 */
749 static void
750 sca_dmac_rxinit(sca_port_t *scp)
751 {
752 /*
753 * ... and the receive DMA logic ...
754 */
755 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
756 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
757
758 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
759 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
760
761 /* reset descriptors to initial state */
762 scp->sp_rxstart = 0;
763 scp->sp_rxend = scp->sp_nrxdesc - 1;
764
765 /*
766 * CPB == chain pointer base
767 * CDA == current descriptor address
768 * EDA == error descriptor address (overwrite position)
769 * because cda can't be eda when starting we always
770 * have a single buffer gap between cda and eda
771 */
772 dmac_write_1(scp, SCA_CPB0,
773 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
774 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
775 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
776 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
777
778 /*
779 * enable receiver DMA
780 */
781 dmac_write_1(scp, SCA_DIR0,
782 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
783 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
784 }
785
786 /*
787 * Queue the packet for our start routine to transmit
788 */
789 static int
790 sca_output(ifp, m, dst, rt0)
791 struct ifnet *ifp;
792 struct mbuf *m;
793 struct sockaddr *dst;
794 struct rtentry *rt0;
795 {
796 #ifdef ISO
797 struct hdlc_llc_header *llc;
798 #endif
799 struct hdlc_header *hdlc;
800 struct ifqueue *ifq = NULL;
801 int s, error, len;
802 short mflags;
803 ALTQ_DECL(struct altq_pktattr pktattr;)
804
805 error = 0;
806 ifp->if_lastchange = time;
807
808 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
809 error = ENETDOWN;
810 goto bad;
811 }
812
813 /*
814 * If the queueing discipline needs packet classification,
815 * do it before prepending link headers.
816 */
817 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
818
819 /*
820 * determine address family, and priority for this packet
821 */
822 switch (dst->sa_family) {
823 #ifdef INET
824 case AF_INET:
825 #ifdef SCA_USE_FASTQ
826 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
827 == IPTOS_LOWDELAY)
828 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
829 #endif
830 /*
831 * Add cisco serial line header. If there is no
832 * space in the first mbuf, allocate another.
833 */
834 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
835 if (m == 0)
836 return (ENOBUFS);
837 hdlc = mtod(m, struct hdlc_header *);
838 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
839 break;
840 #endif
841 #ifdef ISO
842 case AF_ISO:
843 /*
844 * Add cisco llc serial line header. If there is no
845 * space in the first mbuf, allocate another.
846 */
847 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
848 if (m == 0)
849 return (ENOBUFS);
850 hdlc = mtod(m, struct hdlc_header *);
851 llc = mtod(m, struct hdlc_llc_header *);
852 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
853 llc->hl_ffb = 0;
854 break;
855 #endif
856 default:
857 printf("%s: address family %d unsupported\n",
858 ifp->if_xname, dst->sa_family);
859 error = EAFNOSUPPORT;
860 goto bad;
861 }
862
863 /* finish */
864 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
865 hdlc->h_addr = CISCO_MULTICAST;
866 else
867 hdlc->h_addr = CISCO_UNICAST;
868 hdlc->h_resv = 0;
869
870 /*
871 * queue the packet. If interactive, use the fast queue.
872 */
873 mflags = m->m_flags;
874 len = m->m_pkthdr.len;
875 s = splnet();
876 if (ifq != NULL) {
877 if (IF_QFULL(ifq)) {
878 IF_DROP(ifq);
879 m_freem(m);
880 error = ENOBUFS;
881 } else
882 IF_ENQUEUE(ifq, m);
883 } else
884 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
885 if (error != 0) {
886 splx(s);
887 ifp->if_oerrors++;
888 ifp->if_collisions++;
889 return (error);
890 }
891 ifp->if_obytes += len;
892 ifp->if_lastchange = time;
893 if (mflags & M_MCAST)
894 ifp->if_omcasts++;
895
896 sca_start(ifp);
897 splx(s);
898
899 return (error);
900
901 bad:
902 if (m)
903 m_freem(m);
904 return (error);
905 }
906
907 static int
908 sca_ioctl(ifp, cmd, addr)
909 struct ifnet *ifp;
910 u_long cmd;
911 caddr_t addr;
912 {
913 struct ifreq *ifr;
914 struct ifaddr *ifa;
915 int error;
916 int s;
917
918 s = splnet();
919
920 ifr = (struct ifreq *)addr;
921 ifa = (struct ifaddr *)addr;
922 error = 0;
923
924 switch (cmd) {
925 case SIOCSIFADDR:
926 #ifdef INET
927 if (ifa->ifa_addr->sa_family == AF_INET) {
928 ifp->if_flags |= IFF_UP;
929 sca_port_up(ifp->if_softc);
930 } else
931 #endif
932 error = EAFNOSUPPORT;
933 break;
934
935 case SIOCSIFDSTADDR:
936 #ifdef INET
937 if (ifa->ifa_addr->sa_family != AF_INET)
938 error = EAFNOSUPPORT;
939 #else
940 error = EAFNOSUPPORT;
941 #endif
942 break;
943
944 case SIOCADDMULTI:
945 case SIOCDELMULTI:
946 if (ifr == 0) {
947 error = EAFNOSUPPORT; /* XXX */
948 break;
949 }
950 switch (ifr->ifr_addr.sa_family) {
951 #ifdef INET
952 case AF_INET:
953 break;
954 #endif
955 default:
956 error = EAFNOSUPPORT;
957 break;
958 }
959 break;
960
961 case SIOCSIFFLAGS:
962 if (ifr->ifr_flags & IFF_UP) {
963 ifp->if_flags |= IFF_UP;
964 sca_port_up(ifp->if_softc);
965 } else {
966 ifp->if_flags &= ~IFF_UP;
967 sca_port_down(ifp->if_softc);
968 }
969
970 break;
971
972 default:
973 error = EINVAL;
974 }
975
976 splx(s);
977 return error;
978 }
979
980 /*
981 * start packet transmission on the interface
982 *
983 * MUST BE CALLED AT splnet()
984 */
985 static void
986 sca_start(ifp)
987 struct ifnet *ifp;
988 {
989 sca_port_t *scp = ifp->if_softc;
990 struct sca_softc *sc = scp->sca;
991 struct mbuf *m, *mb_head;
992 sca_desc_t *desc;
993 u_int8_t *buf, stat;
994 u_int32_t buf_p;
995 int nexttx;
996 int trigger_xmit;
997 u_int len;
998
999 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1000
1001 /*
1002 * can't queue when we are full or transmitter is busy
1003 */
1004 #ifdef oldcode
1005 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1006 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1007 return;
1008 #else
1009 if (scp->sp_txinuse
1010 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1011 return;
1012 #endif
1013 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1014
1015 /*
1016 * XXX assume that all tx desc and bufs in same page
1017 */
1018 if (sc->sc_usedma)
1019 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1020 0, sc->scu_allocsize,
1021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022 else {
1023 sc->scu_page_on(sc);
1024 sc->scu_set_page(sc, scp->sp_txdesc_p);
1025 }
1026
1027 trigger_xmit = 0;
1028
1029 txloop:
1030 IF_DEQUEUE(&scp->linkq, mb_head);
1031 if (mb_head == NULL)
1032 #ifdef SCA_USE_FASTQ
1033 IF_DEQUEUE(&scp->fastq, mb_head);
1034 if (mb_head == NULL)
1035 #endif
1036 IF_DEQUEUE(&ifp->if_snd, mb_head);
1037 if (mb_head == NULL)
1038 goto start_xmit;
1039
1040 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1041 #ifdef oldcode
1042 if (scp->txinuse != 0) {
1043 /* Kill EOT interrupts on the previous descriptor. */
1044 desc = &scp->sp_txdesc[scp->txcur];
1045 stat = sca_desc_read_stat(sc, desc);
1046 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1047
1048 /* Figure out what the next free descriptor is. */
1049 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1050 } else
1051 nexttx = 0;
1052 #endif /* oldcode */
1053
1054 if (scp->sp_txinuse)
1055 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1056 else
1057 nexttx = 0;
1058
1059 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1060
1061 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1062 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1063
1064 /* XXX hoping we can delay the desc write till after we don't drop. */
1065 desc = &scp->sp_txdesc[nexttx];
1066
1067 /* XXX isn't this set already?? */
1068 sca_desc_write_bufp(sc, desc, buf_p);
1069 len = 0;
1070
1071 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1072
1073 #if 0 /* uncomment this for a core in cc1 */
1074 X
1075 #endif
1076 /*
1077 * Run through the chain, copying data into the descriptor as we
1078 * go. If it won't fit in one transmission block, drop the packet.
1079 * No, this isn't nice, but most of the time it _will_ fit.
1080 */
1081 for (m = mb_head ; m != NULL ; m = m->m_next) {
1082 if (m->m_len != 0) {
1083 len += m->m_len;
1084 if (len > SCA_BSIZE) {
1085 m_freem(mb_head);
1086 goto txloop;
1087 }
1088 SCA_DPRINTF(SCA_DEBUG_TX,
1089 ("TX: about to mbuf len %d\n", m->m_len));
1090
1091 if (sc->sc_usedma)
1092 bcopy(mtod(m, u_int8_t *), buf, m->m_len);
1093 else
1094 bus_space_write_region_1(sc->scu_memt,
1095 sc->scu_memh, sca_page_addr(sc, buf_p),
1096 mtod(m, u_int8_t *), m->m_len);
1097 buf += m->m_len;
1098 buf_p += m->m_len;
1099 }
1100 }
1101
1102 /* set the buffer, the length, and mark end of frame and end of xfer */
1103 sca_desc_write_buflen(sc, desc, len);
1104 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1105
1106 ifp->if_opackets++;
1107
1108 #if NBPFILTER > 0
1109 /*
1110 * Pass packet to bpf if there is a listener.
1111 */
1112 if (ifp->if_bpf)
1113 bpf_mtap(ifp->if_bpf, mb_head);
1114 #endif
1115
1116 m_freem(mb_head);
1117
1118 scp->sp_txcur = nexttx;
1119 scp->sp_txinuse++;
1120 trigger_xmit = 1;
1121
1122 SCA_DPRINTF(SCA_DEBUG_TX,
1123 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1124
1125 /*
1126 * XXX so didn't this used to limit us to 1?! - multi may be untested
1127 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1128 * to find bug
1129 */
1130 #ifdef oldcode
1131 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1132 #endif
1133 if (scp->sp_txinuse < scp->sp_ntxdesc)
1134 goto txloop;
1135
1136 start_xmit:
1137 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1138
1139 if (trigger_xmit != 0) {
1140 /* set EOT on final descriptor */
1141 desc = &scp->sp_txdesc[scp->sp_txcur];
1142 stat = sca_desc_read_stat(sc, desc);
1143 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1144 }
1145
1146 if (sc->sc_usedma)
1147 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1148 sc->scu_allocsize,
1149 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1150
1151 if (trigger_xmit != 0)
1152 sca_port_starttx(scp);
1153
1154 if (!sc->sc_usedma)
1155 sc->scu_page_off(sc);
1156 }
1157
1158 static void
1159 sca_watchdog(ifp)
1160 struct ifnet *ifp;
1161 {
1162 }
1163
1164 int
1165 sca_hardintr(struct sca_softc *sc)
1166 {
1167 u_int8_t isr0, isr1, isr2;
1168 int ret;
1169
1170 ret = 0; /* non-zero means we processed at least one interrupt */
1171
1172 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1173
1174 while (1) {
1175 /*
1176 * read SCA interrupts
1177 */
1178 isr0 = sca_read_1(sc, SCA_ISR0);
1179 isr1 = sca_read_1(sc, SCA_ISR1);
1180 isr2 = sca_read_1(sc, SCA_ISR2);
1181
1182 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1183 break;
1184
1185 SCA_DPRINTF(SCA_DEBUG_INTR,
1186 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1187 isr0, isr1, isr2));
1188
1189 /*
1190 * check DMAC interrupt
1191 */
1192 if (isr1 & 0x0f)
1193 ret += sca_dmac_intr(&sc->sc_ports[0],
1194 isr1 & 0x0f);
1195
1196 if (isr1 & 0xf0)
1197 ret += sca_dmac_intr(&sc->sc_ports[1],
1198 (isr1 & 0xf0) >> 4);
1199
1200 /*
1201 * mcsi intterupts
1202 */
1203 if (isr0 & 0x0f)
1204 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1205
1206 if (isr0 & 0xf0)
1207 ret += sca_msci_intr(&sc->sc_ports[1],
1208 (isr0 & 0xf0) >> 4);
1209
1210 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1211 if (isr2)
1212 ret += sca_timer_intr(sc, isr2);
1213 #endif
1214 }
1215
1216 return (ret);
1217 }
1218
1219 static int
1220 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1221 {
1222 u_int8_t dsr;
1223 int ret;
1224
1225 ret = 0;
1226
1227 /*
1228 * Check transmit channel
1229 */
1230 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1231 SCA_DPRINTF(SCA_DEBUG_INTR,
1232 ("TX INTERRUPT port %d\n", scp->sp_port));
1233
1234 dsr = 1;
1235 while (dsr != 0) {
1236 ret++;
1237 /*
1238 * reset interrupt
1239 */
1240 dsr = dmac_read_1(scp, SCA_DSR1);
1241 dmac_write_1(scp, SCA_DSR1,
1242 dsr | SCA_DSR_DEWD);
1243
1244 /*
1245 * filter out the bits we don't care about
1246 */
1247 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1248 if (dsr == 0)
1249 break;
1250
1251 /*
1252 * check for counter overflow
1253 */
1254 if (dsr & SCA_DSR_COF) {
1255 printf("%s: TXDMA counter overflow\n",
1256 scp->sp_if.if_xname);
1257
1258 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1259 scp->sp_txcur = 0;
1260 scp->sp_txinuse = 0;
1261 }
1262
1263 /*
1264 * check for buffer overflow
1265 */
1266 if (dsr & SCA_DSR_BOF) {
1267 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1268 scp->sp_if.if_xname,
1269 dmac_read_2(scp, SCA_CDAL1),
1270 dmac_read_2(scp, SCA_EDAL1),
1271 dmac_read_1(scp, SCA_CPB1));
1272
1273 /*
1274 * Yikes. Arrange for a full
1275 * transmitter restart.
1276 */
1277 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1278 scp->sp_txcur = 0;
1279 scp->sp_txinuse = 0;
1280 }
1281
1282 /*
1283 * check for end of transfer, which is not
1284 * an error. It means that all data queued
1285 * was transmitted, and we mark ourself as
1286 * not in use and stop the watchdog timer.
1287 */
1288 if (dsr & SCA_DSR_EOT) {
1289 SCA_DPRINTF(SCA_DEBUG_TX,
1290 ("Transmit completed. cda %x eda %x dsr %x\n",
1291 dmac_read_2(scp, SCA_CDAL1),
1292 dmac_read_2(scp, SCA_EDAL1),
1293 dsr));
1294
1295 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1296 scp->sp_txcur = 0;
1297 scp->sp_txinuse = 0;
1298
1299 /*
1300 * check for more packets
1301 */
1302 sca_start(&scp->sp_if);
1303 }
1304 }
1305 }
1306 /*
1307 * receive channel check
1308 */
1309 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1310 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1311 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1312
1313 dsr = 1;
1314 while (dsr != 0) {
1315 ret++;
1316
1317 dsr = dmac_read_1(scp, SCA_DSR0);
1318 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1319
1320 /*
1321 * filter out the bits we don't care about
1322 */
1323 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1324 | SCA_DSR_BOF | SCA_DSR_EOT);
1325 if (dsr == 0)
1326 break;
1327
1328 /*
1329 * End of frame
1330 */
1331 if (dsr & SCA_DSR_EOM) {
1332 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1333
1334 sca_get_packets(scp);
1335 }
1336
1337 /*
1338 * check for counter overflow
1339 */
1340 if (dsr & SCA_DSR_COF) {
1341 printf("%s: RXDMA counter overflow\n",
1342 scp->sp_if.if_xname);
1343
1344 sca_dmac_rxinit(scp);
1345 }
1346
1347 /*
1348 * check for end of transfer, which means we
1349 * ran out of descriptors to receive into.
1350 * This means the line is much faster than
1351 * we can handle.
1352 */
1353 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1354 printf("%s: RXDMA buffer overflow\n",
1355 scp->sp_if.if_xname);
1356
1357 sca_dmac_rxinit(scp);
1358 }
1359 }
1360 }
1361
1362 return ret;
1363 }
1364
1365 static int
1366 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1367 {
1368 u_int8_t st1, trc0;
1369
1370 /* get and clear the specific interrupt -- should act on it :)*/
1371 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1372 /* clear the interrupt */
1373 msci_write_1(scp, SCA_ST10, st1);
1374
1375 if (st1 & SCA_ST1_UDRN) {
1376 /* underrun -- try to increase ready control */
1377 trc0 = msci_read_1(scp, SCA_TRC00);
1378 if (trc0 == 0x1f)
1379 printf("TX: underun - fifo depth maxed\n");
1380 else {
1381 if ((trc0 += 2) > 0x1f)
1382 trc0 = 0x1f;
1383 SCA_DPRINTF(SCA_DEBUG_TX,
1384 ("TX: udrn - incr fifo to %d\n", trc0));
1385 msci_write_1(scp, SCA_TRC00, trc0);
1386 }
1387 }
1388 }
1389 return (0);
1390 }
1391
1392 static void
1393 sca_get_packets(sca_port_t *scp)
1394 {
1395 struct sca_softc *sc;
1396
1397 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1398
1399 sc = scp->sca;
1400 if (sc->sc_usedma)
1401 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1402 0, sc->scu_allocsize,
1403 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1404 else {
1405 /*
1406 * XXX this code is unable to deal with rx stuff
1407 * in more than 1 page
1408 */
1409 sc->scu_page_on(sc);
1410 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1411 }
1412
1413 /* process as many frames as are available */
1414 while (sca_frame_avail(scp)) {
1415 sca_frame_process(scp);
1416 sca_frame_read_done(scp);
1417 }
1418
1419 if (sc->sc_usedma)
1420 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1421 0, sc->scu_allocsize,
1422 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1423 else
1424 sc->scu_page_off(sc);
1425 }
1426
1427 /*
1428 * Starting with the first descriptor we wanted to read into, up to but
1429 * not including the current SCA read descriptor, look for a packet.
1430 *
1431 * must be called at splnet()
1432 */
1433 static int
1434 sca_frame_avail(sca_port_t *scp)
1435 {
1436 struct sca_softc *sc;
1437 u_int16_t cda;
1438 u_int32_t desc_p; /* physical address (lower 16 bits) */
1439 sca_desc_t *desc;
1440 u_int8_t rxstat;
1441 int cdaidx, toolong;
1442
1443 /*
1444 * Read the current descriptor from the SCA.
1445 */
1446 sc = scp->sca;
1447 cda = dmac_read_2(scp, SCA_CDAL0);
1448
1449 /*
1450 * calculate the index of the current descriptor
1451 */
1452 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1453 desc_p = cda - desc_p;
1454 cdaidx = desc_p / sizeof(sca_desc_t);
1455
1456 SCA_DPRINTF(SCA_DEBUG_RX,
1457 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1458 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1459
1460 /* note confusion */
1461 if (cdaidx >= scp->sp_nrxdesc)
1462 panic("current descriptor index out of range");
1463
1464 /* see if we have a valid frame available */
1465 toolong = 0;
1466 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1467 /*
1468 * We might have a valid descriptor. Set up a pointer
1469 * to the kva address for it so we can more easily examine
1470 * the contents.
1471 */
1472 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1473 rxstat = sca_desc_read_stat(scp->sca, desc);
1474
1475 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1476 scp->sp_port, scp->sp_rxstart, rxstat));
1477
1478 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1479 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1480
1481 /*
1482 * check for errors
1483 */
1484 if (rxstat & SCA_DESC_ERRORS) {
1485 /*
1486 * consider an error condition the end
1487 * of a frame
1488 */
1489 scp->sp_if.if_ierrors++;
1490 toolong = 0;
1491 continue;
1492 }
1493
1494 /*
1495 * if we aren't skipping overlong frames
1496 * we are done, otherwise reset and look for
1497 * another good frame
1498 */
1499 if (rxstat & SCA_DESC_EOM) {
1500 if (!toolong)
1501 return (1);
1502 toolong = 0;
1503 } else if (!toolong) {
1504 /*
1505 * we currently don't deal with frames
1506 * larger than a single buffer (fixed MTU)
1507 */
1508 scp->sp_if.if_ierrors++;
1509 toolong = 1;
1510 }
1511 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1512 scp->sp_rxstart));
1513 }
1514
1515 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1516 return 0;
1517 }
1518
1519 /*
1520 * Pass the packet up to the kernel if it is a packet we want to pay
1521 * attention to.
1522 *
1523 * MUST BE CALLED AT splnet()
1524 */
1525 static void
1526 sca_frame_process(sca_port_t *scp)
1527 {
1528 struct ifqueue *ifq;
1529 struct hdlc_header *hdlc;
1530 struct cisco_pkt *cisco;
1531 sca_desc_t *desc;
1532 struct mbuf *m;
1533 u_int8_t *bufp;
1534 u_int16_t len;
1535 u_int32_t t;
1536
1537 t = (time.tv_sec - boottime.tv_sec) * 1000;
1538 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1539 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1540 len = sca_desc_read_buflen(scp->sca, desc);
1541
1542 SCA_DPRINTF(SCA_DEBUG_RX,
1543 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1544 (bus_addr_t)bufp, len));
1545
1546 #if SCA_DEBUG_LEVEL > 0
1547 if (sca_debug & SCA_DEBUG_RXPKT)
1548 sca_frame_print(scp, desc, bufp);
1549 #endif
1550 /*
1551 * skip packets that are too short
1552 */
1553 if (len < sizeof(struct hdlc_header)) {
1554 scp->sp_if.if_ierrors++;
1555 return;
1556 }
1557
1558 m = sca_mbuf_alloc(scp->sca, bufp, len);
1559 if (m == NULL) {
1560 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1561 return;
1562 }
1563
1564 /*
1565 * read and then strip off the HDLC information
1566 */
1567 m = m_pullup(m, sizeof(struct hdlc_header));
1568 if (m == NULL) {
1569 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1570 return;
1571 }
1572
1573 #if NBPFILTER > 0
1574 if (scp->sp_if.if_bpf)
1575 bpf_mtap(scp->sp_if.if_bpf, m);
1576 #endif
1577
1578 scp->sp_if.if_ipackets++;
1579 scp->sp_if.if_lastchange = time;
1580
1581 hdlc = mtod(m, struct hdlc_header *);
1582 switch (ntohs(hdlc->h_proto)) {
1583 #ifdef INET
1584 case HDLC_PROTOCOL_IP:
1585 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1586 m->m_pkthdr.rcvif = &scp->sp_if;
1587 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1588 m->m_data += sizeof(struct hdlc_header);
1589 m->m_len -= sizeof(struct hdlc_header);
1590 ifq = &ipintrq;
1591 schednetisr(NETISR_IP);
1592 break;
1593 #endif /* INET */
1594 #ifdef ISO
1595 case HDLC_PROTOCOL_ISO:
1596 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1597 goto dropit;
1598 m->m_pkthdr.rcvif = &scp->sp_if;
1599 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1600 m->m_data += sizeof(struct hdlc_llc_header);
1601 m->m_len -= sizeof(struct hdlc_llc_header);
1602 ifq = &clnlintrq;
1603 schednetisr(NETISR_ISO);
1604 break;
1605 #endif /* ISO */
1606 case CISCO_KEEPALIVE:
1607 SCA_DPRINTF(SCA_DEBUG_CISCO,
1608 ("Received CISCO keepalive packet\n"));
1609
1610 if (len < CISCO_PKT_LEN) {
1611 SCA_DPRINTF(SCA_DEBUG_CISCO,
1612 ("short CISCO packet %d, wanted %d\n",
1613 len, CISCO_PKT_LEN));
1614 scp->sp_if.if_ierrors++;
1615 goto dropit;
1616 }
1617
1618 m = m_pullup(m, sizeof(struct cisco_pkt));
1619 if (m == NULL) {
1620 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1621 return;
1622 }
1623
1624 cisco = (struct cisco_pkt *)
1625 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1626 m->m_pkthdr.rcvif = &scp->sp_if;
1627
1628 switch (ntohl(cisco->type)) {
1629 case CISCO_ADDR_REQ:
1630 printf("Got CISCO addr_req, ignoring\n");
1631 scp->sp_if.if_ierrors++;
1632 goto dropit;
1633
1634 case CISCO_ADDR_REPLY:
1635 printf("Got CISCO addr_reply, ignoring\n");
1636 scp->sp_if.if_ierrors++;
1637 goto dropit;
1638
1639 case CISCO_KEEPALIVE_REQ:
1640
1641 SCA_DPRINTF(SCA_DEBUG_CISCO,
1642 ("Received KA, mseq %d,"
1643 " yseq %d, rel 0x%04x, t0"
1644 " %04x, t1 %04x\n",
1645 ntohl(cisco->par1), ntohl(cisco->par2),
1646 ntohs(cisco->rel), ntohs(cisco->time0),
1647 ntohs(cisco->time1)));
1648
1649 scp->cka_lastrx = ntohl(cisco->par1);
1650 scp->cka_lasttx++;
1651
1652 /*
1653 * schedule the transmit right here.
1654 */
1655 cisco->par2 = cisco->par1;
1656 cisco->par1 = htonl(scp->cka_lasttx);
1657 cisco->time0 = htons((u_int16_t)(t >> 16));
1658 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1659
1660 ifq = &scp->linkq;
1661 if (IF_QFULL(ifq)) {
1662 IF_DROP(ifq);
1663 goto dropit;
1664 }
1665 IF_ENQUEUE(ifq, m);
1666
1667 sca_start(&scp->sp_if);
1668
1669 /* since start may have reset this fix */
1670 if (!scp->sca->sc_usedma) {
1671 scp->sca->scu_set_page(scp->sca,
1672 scp->sp_rxdesc_p);
1673 scp->sca->scu_page_on(scp->sca);
1674 }
1675 return;
1676 default:
1677 SCA_DPRINTF(SCA_DEBUG_CISCO,
1678 ("Unknown CISCO keepalive protocol 0x%04x\n",
1679 ntohl(cisco->type)));
1680
1681 scp->sp_if.if_noproto++;
1682 goto dropit;
1683 }
1684 return;
1685 default:
1686 SCA_DPRINTF(SCA_DEBUG_RX,
1687 ("Unknown/unexpected ethertype 0x%04x\n",
1688 ntohs(hdlc->h_proto)));
1689 scp->sp_if.if_noproto++;
1690 goto dropit;
1691 }
1692
1693 /* queue the packet */
1694 if (!IF_QFULL(ifq)) {
1695 IF_ENQUEUE(ifq, m);
1696 } else {
1697 IF_DROP(ifq);
1698 scp->sp_if.if_iqdrops++;
1699 goto dropit;
1700 }
1701 return;
1702 dropit:
1703 if (m)
1704 m_freem(m);
1705 return;
1706 }
1707
1708 #if SCA_DEBUG_LEVEL > 0
1709 /*
1710 * do a hex dump of the packet received into descriptor "desc" with
1711 * data buffer "p"
1712 */
1713 static void
1714 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1715 {
1716 int i;
1717 int nothing_yet = 1;
1718 struct sca_softc *sc;
1719 u_int len;
1720
1721 sc = scp->sca;
1722 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1723 desc,
1724 sca_desc_read_chainp(sc, desc),
1725 sca_desc_read_bufp(sc, desc),
1726 sca_desc_read_stat(sc, desc),
1727 (len = sca_desc_read_buflen(sc, desc)));
1728
1729 for (i = 0 ; i < len && i < 256; i++) {
1730 if (nothing_yet == 1 &&
1731 (sc->sc_usedma ? *p
1732 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1733 sca_page_addr(sc, p))) == 0) {
1734 p++;
1735 continue;
1736 }
1737 nothing_yet = 0;
1738 if (i % 16 == 0)
1739 printf("\n");
1740 printf("%02x ",
1741 (sc->sc_usedma ? *p
1742 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1743 sca_page_addr(sc, p))));
1744 p++;
1745 }
1746
1747 if (i % 16 != 1)
1748 printf("\n");
1749 }
1750 #endif
1751
1752 /*
1753 * adjust things becuase we have just read the current starting
1754 * frame
1755 *
1756 * must be called at splnet()
1757 */
1758 static void
1759 sca_frame_read_done(sca_port_t *scp)
1760 {
1761 u_int16_t edesc_p;
1762
1763 /* update where our indicies are */
1764 scp->sp_rxend = scp->sp_rxstart;
1765 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1766
1767 /* update the error [end] descriptor */
1768 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1769 (sizeof(sca_desc_t) * scp->sp_rxend);
1770 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1771 }
1772
1773 /*
1774 * set a port to the "up" state
1775 */
1776 static void
1777 sca_port_up(sca_port_t *scp)
1778 {
1779 struct sca_softc *sc = scp->sca;
1780 #if 0
1781 u_int8_t ier0, ier1;
1782 #endif
1783
1784 /*
1785 * reset things
1786 */
1787 #if 0
1788 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1789 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1790 #endif
1791 /*
1792 * clear in-use flag
1793 */
1794 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1795 scp->sp_if.if_flags |= IFF_RUNNING;
1796
1797 /*
1798 * raise DTR
1799 */
1800 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1801
1802 /*
1803 * raise RTS
1804 */
1805 msci_write_1(scp, SCA_CTL0,
1806 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1807 | SCA_CTL_RTS_HIGH);
1808
1809 #if 0
1810 /*
1811 * enable interrupts (no timer IER2)
1812 */
1813 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1814 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1815 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1816 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1817 if (scp->sp_port == 1) {
1818 ier0 <<= 4;
1819 ier1 <<= 4;
1820 }
1821 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1822 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1823 #else
1824 if (scp->sp_port == 0) {
1825 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1826 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1827 } else {
1828 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1829 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1830 }
1831 #endif
1832
1833 /*
1834 * enable transmit and receive
1835 */
1836 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1837 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1838
1839 /*
1840 * reset internal state
1841 */
1842 scp->sp_txinuse = 0;
1843 scp->sp_txcur = 0;
1844 scp->cka_lasttx = time.tv_usec;
1845 scp->cka_lastrx = 0;
1846 }
1847
1848 /*
1849 * set a port to the "down" state
1850 */
1851 static void
1852 sca_port_down(sca_port_t *scp)
1853 {
1854 struct sca_softc *sc = scp->sca;
1855 #if 0
1856 u_int8_t ier0, ier1;
1857 #endif
1858
1859 /*
1860 * lower DTR
1861 */
1862 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1863
1864 /*
1865 * lower RTS
1866 */
1867 msci_write_1(scp, SCA_CTL0,
1868 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1869 | SCA_CTL_RTS_LOW);
1870
1871 /*
1872 * disable interrupts
1873 */
1874 #if 0
1875 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1876 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1877 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1878 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1879 if (scp->sp_port == 1) {
1880 ier0 <<= 4;
1881 ier1 <<= 4;
1882 }
1883 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1884 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1885 #else
1886 if (scp->sp_port == 0) {
1887 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1888 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1889 } else {
1890 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1891 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1892 }
1893 #endif
1894
1895 /*
1896 * disable transmit and receive
1897 */
1898 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1899 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1900
1901 /*
1902 * no, we're not in use anymore
1903 */
1904 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1905 }
1906
1907 /*
1908 * disable all DMA and interrupts for all ports at once.
1909 */
1910 void
1911 sca_shutdown(struct sca_softc *sca)
1912 {
1913 /*
1914 * disable DMA and interrupts
1915 */
1916 sca_write_1(sca, SCA_DMER, 0);
1917 sca_write_1(sca, SCA_IER0, 0);
1918 sca_write_1(sca, SCA_IER1, 0);
1919 }
1920
1921 /*
1922 * If there are packets to transmit, start the transmit DMA logic.
1923 */
1924 static void
1925 sca_port_starttx(sca_port_t *scp)
1926 {
1927 struct sca_softc *sc;
1928 u_int32_t startdesc_p, enddesc_p;
1929 int enddesc;
1930
1931 sc = scp->sca;
1932
1933 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1934
1935 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1936 || scp->sp_txinuse == 0)
1937 return;
1938
1939 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1940
1941 scp->sp_if.if_flags |= IFF_OACTIVE;
1942
1943 /*
1944 * We have something to do, since we have at least one packet
1945 * waiting, and we are not already marked as active.
1946 */
1947 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1948 startdesc_p = scp->sp_txdesc_p;
1949 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1950
1951 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1952 startdesc_p, enddesc_p));
1953
1954 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1955 dmac_write_2(scp, SCA_CDAL1,
1956 (u_int16_t)(startdesc_p & 0x0000ffff));
1957
1958 /*
1959 * enable the DMA
1960 */
1961 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1962 }
1963
1964 /*
1965 * allocate an mbuf at least long enough to hold "len" bytes.
1966 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1967 * otherwise let the caller handle copying the data in.
1968 */
1969 static struct mbuf *
1970 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
1971 {
1972 struct mbuf *m;
1973
1974 /*
1975 * allocate an mbuf and copy the important bits of data
1976 * into it. If the packet won't fit in the header,
1977 * allocate a cluster for it and store it there.
1978 */
1979 MGETHDR(m, M_DONTWAIT, MT_DATA);
1980 if (m == NULL)
1981 return NULL;
1982 if (len > MHLEN) {
1983 if (len > MCLBYTES) {
1984 m_freem(m);
1985 return NULL;
1986 }
1987 MCLGET(m, M_DONTWAIT);
1988 if ((m->m_flags & M_EXT) == 0) {
1989 m_freem(m);
1990 return NULL;
1991 }
1992 }
1993 if (p != NULL) {
1994 /* XXX do we need to sync here? */
1995 if (sc->sc_usedma)
1996 bcopy(p, mtod(m, caddr_t), len);
1997 else
1998 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
1999 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2000 }
2001 m->m_len = len;
2002 m->m_pkthdr.len = len;
2003
2004 return (m);
2005 }
2006
2007 /*
2008 * get the base clock
2009 */
2010 void
2011 sca_get_base_clock(struct sca_softc *sc)
2012 {
2013 struct timeval btv, ctv, dtv;
2014 u_int64_t bcnt;
2015 u_int32_t cnt;
2016 u_int16_t subcnt;
2017
2018 /* disable the timer, set prescale to 0 */
2019 sca_write_1(sc, SCA_TCSR0, 0);
2020 sca_write_1(sc, SCA_TEPR0, 0);
2021
2022 /* reset the counter */
2023 (void)sca_read_1(sc, SCA_TCSR0);
2024 subcnt = sca_read_2(sc, SCA_TCNTL0);
2025
2026 /* count to max */
2027 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2028
2029 cnt = 0;
2030 microtime(&btv);
2031 /* start the timer -- no interrupt enable */
2032 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2033 for (;;) {
2034 microtime(&ctv);
2035
2036 /* end around 3/4 of a second */
2037 timersub(&ctv, &btv, &dtv);
2038 if (dtv.tv_usec >= 750000)
2039 break;
2040
2041 /* spin */
2042 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2043 ;
2044 /* reset the timer */
2045 (void)sca_read_2(sc, SCA_TCNTL0);
2046 cnt++;
2047 }
2048
2049 /* stop the timer */
2050 sca_write_1(sc, SCA_TCSR0, 0);
2051
2052 subcnt = sca_read_2(sc, SCA_TCNTL0);
2053 /* add the slop in and get the total timer ticks */
2054 cnt = (cnt << 16) | subcnt;
2055
2056 /* cnt is 1/8 the actual time */
2057 bcnt = cnt * 8;
2058 /* make it proportional to 3/4 of a second */
2059 bcnt *= (u_int64_t)750000;
2060 bcnt /= (u_int64_t)dtv.tv_usec;
2061 cnt = bcnt;
2062
2063 /* make it Hz */
2064 cnt *= 4;
2065 cnt /= 3;
2066
2067 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2068 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2069
2070 /*
2071 * round to the nearest 200 -- this allows for +-3 ticks error
2072 */
2073 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2074 }
2075
2076 /*
2077 * print the information about the clock on the ports
2078 */
2079 void
2080 sca_print_clock_info(struct sca_softc *sc)
2081 {
2082 struct sca_port *scp;
2083 u_int32_t mhz, div;
2084 int i;
2085
2086 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2087 sc->sc_baseclock);
2088
2089 /* print the information about the port clock selection */
2090 for (i = 0; i < sc->sc_numports; i++) {
2091 scp = &sc->sc_ports[i];
2092 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2093 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2094
2095 printf("%s: rx clock: ", scp->sp_if.if_xname);
2096 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2097 case SCA_RXS_CLK_LINE:
2098 printf("line");
2099 break;
2100 case SCA_RXS_CLK_LINE_SN:
2101 printf("line with noise suppression");
2102 break;
2103 case SCA_RXS_CLK_INTERNAL:
2104 printf("internal %d Hz", (mhz >> div));
2105 break;
2106 case SCA_RXS_CLK_ADPLL_OUT:
2107 printf("adpll using internal %d Hz", (mhz >> div));
2108 break;
2109 case SCA_RXS_CLK_ADPLL_IN:
2110 printf("adpll using line clock");
2111 break;
2112 }
2113 printf(" tx clock: ");
2114 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2115 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2116 case SCA_TXS_CLK_LINE:
2117 printf("line\n");
2118 break;
2119 case SCA_TXS_CLK_INTERNAL:
2120 printf("internal %d Hz\n", (mhz >> div));
2121 break;
2122 case SCA_TXS_CLK_RXCLK:
2123 printf("rxclock\n");
2124 break;
2125 }
2126 if (scp->sp_eclock)
2127 printf("%s: outputting line clock\n",
2128 scp->sp_if.if_xname);
2129 }
2130 }
2131
2132