hd64570.c revision 1.56 1 1.56 andvar /* $NetBSD: hd64570.c,v 1.56 2021/08/17 22:00:31 andvar Exp $ */
2 1.1 explorer
3 1.1 explorer /*
4 1.8 chopps * Copyright (c) 1999 Christian E. Hopps
5 1.1 explorer * Copyright (c) 1998 Vixie Enterprises
6 1.1 explorer * All rights reserved.
7 1.1 explorer *
8 1.1 explorer * Redistribution and use in source and binary forms, with or without
9 1.1 explorer * modification, are permitted provided that the following conditions
10 1.1 explorer * are met:
11 1.1 explorer *
12 1.1 explorer * 1. Redistributions of source code must retain the above copyright
13 1.1 explorer * notice, this list of conditions and the following disclaimer.
14 1.1 explorer * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 explorer * notice, this list of conditions and the following disclaimer in the
16 1.1 explorer * documentation and/or other materials provided with the distribution.
17 1.1 explorer * 3. Neither the name of Vixie Enterprises nor the names
18 1.1 explorer * of its contributors may be used to endorse or promote products derived
19 1.1 explorer * from this software without specific prior written permission.
20 1.1 explorer *
21 1.1 explorer * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 1.1 explorer * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 1.1 explorer * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 1.1 explorer * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 1.1 explorer * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 1.1 explorer * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 1.1 explorer * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 1.1 explorer * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 1.1 explorer * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 1.1 explorer * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 1.1 explorer * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 1.1 explorer * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.1 explorer * SUCH DAMAGE.
34 1.1 explorer *
35 1.1 explorer * This software has been written for Vixie Enterprises by Michael Graff
36 1.1 explorer * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see
37 1.1 explorer * ``http://www.vix.com''.
38 1.7 erh */
39 1.7 erh
40 1.7 erh /*
41 1.1 explorer * TODO:
42 1.1 explorer *
43 1.1 explorer * o teach the receive logic about errors, and about long frames that
44 1.1 explorer * span more than one input buffer. (Right now, receive/transmit is
45 1.1 explorer * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 1.1 explorer * This is currently 1504, which is large enough to hold the HDLC
47 1.1 explorer * header and the packet itself. Packets which are too long are
48 1.1 explorer * silently dropped on transmit and silently dropped on receive.
49 1.1 explorer * o write code to handle the msci interrupts, needed only for CD
50 1.1 explorer * and CTS changes.
51 1.1 explorer * o consider switching back to a "queue tx with DMA active" model which
52 1.1 explorer * should help sustain outgoing traffic
53 1.1 explorer * o through clever use of bus_dma*() functions, it should be possible
54 1.1 explorer * to map the mbuf's data area directly into a descriptor transmit
55 1.1 explorer * buffer, removing the need to allocate extra memory. If, however,
56 1.1 explorer * we run out of descriptors for this, we will need to then allocate
57 1.1 explorer * one large mbuf, copy the fragmented chain into it, and put it onto
58 1.1 explorer * a single descriptor.
59 1.1 explorer * o use bus_dmamap_sync() with the right offset and lengths, rather
60 1.1 explorer * than cheating and always sync'ing the whole region.
61 1.8 chopps *
62 1.8 chopps * o perhaps allow rx and tx to be in more than one page
63 1.23 wiz * if not using DMA. currently the assumption is that
64 1.8 chopps * rx uses a page and tx uses a page.
65 1.1 explorer */
66 1.20 lukem
67 1.20 lukem #include <sys/cdefs.h>
68 1.56 andvar __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.56 2021/08/17 22:00:31 andvar Exp $");
69 1.1 explorer
70 1.9 chopps #include "opt_inet.h"
71 1.1 explorer
72 1.1 explorer #include <sys/param.h>
73 1.1 explorer #include <sys/systm.h>
74 1.1 explorer #include <sys/device.h>
75 1.1 explorer #include <sys/mbuf.h>
76 1.1 explorer #include <sys/socket.h>
77 1.1 explorer #include <sys/sockio.h>
78 1.1 explorer #include <sys/kernel.h>
79 1.1 explorer
80 1.1 explorer #include <net/if.h>
81 1.1 explorer #include <net/if_types.h>
82 1.1 explorer #include <net/netisr.h>
83 1.1 explorer
84 1.15 itojun #if defined(INET) || defined(INET6)
85 1.1 explorer #include <netinet/in.h>
86 1.1 explorer #include <netinet/in_systm.h>
87 1.1 explorer #include <netinet/in_var.h>
88 1.1 explorer #include <netinet/ip.h>
89 1.15 itojun #ifdef INET6
90 1.15 itojun #include <netinet6/in6_var.h>
91 1.15 itojun #endif
92 1.9 chopps #endif
93 1.9 chopps
94 1.1 explorer #include <net/bpf.h>
95 1.1 explorer
96 1.38 ad #include <sys/cpu.h>
97 1.38 ad #include <sys/bus.h>
98 1.38 ad #include <sys/intr.h>
99 1.1 explorer
100 1.1 explorer #include <dev/pci/pcivar.h>
101 1.1 explorer #include <dev/pci/pcireg.h>
102 1.1 explorer #include <dev/pci/pcidevs.h>
103 1.1 explorer
104 1.1 explorer #include <dev/ic/hd64570reg.h>
105 1.1 explorer #include <dev/ic/hd64570var.h>
106 1.1 explorer
107 1.1 explorer #define SCA_DEBUG_RX 0x0001
108 1.1 explorer #define SCA_DEBUG_TX 0x0002
109 1.1 explorer #define SCA_DEBUG_CISCO 0x0004
110 1.1 explorer #define SCA_DEBUG_DMA 0x0008
111 1.1 explorer #define SCA_DEBUG_RXPKT 0x0010
112 1.1 explorer #define SCA_DEBUG_TXPKT 0x0020
113 1.1 explorer #define SCA_DEBUG_INTR 0x0040
114 1.8 chopps #define SCA_DEBUG_CLOCK 0x0080
115 1.1 explorer
116 1.1 explorer #if 0
117 1.8 chopps #define SCA_DEBUG_LEVEL ( 0xFFFF )
118 1.1 explorer #else
119 1.1 explorer #define SCA_DEBUG_LEVEL 0
120 1.1 explorer #endif
121 1.1 explorer
122 1.1 explorer u_int32_t sca_debug = SCA_DEBUG_LEVEL;
123 1.1 explorer
124 1.1 explorer #if SCA_DEBUG_LEVEL > 0
125 1.1 explorer #define SCA_DPRINTF(l, x) do { \
126 1.1 explorer if ((l) & sca_debug) \
127 1.1 explorer printf x;\
128 1.1 explorer } while (0)
129 1.1 explorer #else
130 1.1 explorer #define SCA_DPRINTF(l, x)
131 1.1 explorer #endif
132 1.1 explorer
133 1.1 explorer #if 0
134 1.1 explorer #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
135 1.1 explorer #endif
136 1.1 explorer
137 1.1 explorer static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
138 1.1 explorer static inline u_int8_t msci_read_1(sca_port_t *, u_int);
139 1.1 explorer
140 1.1 explorer static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
141 1.1 explorer static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
142 1.1 explorer static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
143 1.1 explorer static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
144 1.1 explorer
145 1.1 explorer static void sca_msci_init(struct sca_softc *, sca_port_t *);
146 1.1 explorer static void sca_dmac_init(struct sca_softc *, sca_port_t *);
147 1.1 explorer static void sca_dmac_rxinit(sca_port_t *);
148 1.1 explorer
149 1.1 explorer static int sca_dmac_intr(sca_port_t *, u_int8_t);
150 1.8 chopps static int sca_msci_intr(sca_port_t *, u_int8_t);
151 1.1 explorer
152 1.1 explorer static void sca_get_packets(sca_port_t *);
153 1.8 chopps static int sca_frame_avail(sca_port_t *);
154 1.8 chopps static void sca_frame_process(sca_port_t *);
155 1.8 chopps static void sca_frame_read_done(sca_port_t *);
156 1.1 explorer
157 1.1 explorer static void sca_port_starttx(sca_port_t *);
158 1.1 explorer
159 1.1 explorer static void sca_port_up(sca_port_t *);
160 1.1 explorer static void sca_port_down(sca_port_t *);
161 1.1 explorer
162 1.35 dyoung static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
163 1.49 ozaki const struct rtentry *);
164 1.36 christos static int sca_ioctl(struct ifnet *, u_long, void *);
165 1.28 perry static void sca_start(struct ifnet *);
166 1.28 perry static void sca_watchdog(struct ifnet *);
167 1.1 explorer
168 1.36 christos static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
169 1.1 explorer
170 1.1 explorer #if SCA_DEBUG_LEVEL > 0
171 1.1 explorer static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
172 1.1 explorer #endif
173 1.1 explorer
174 1.1 explorer
175 1.8 chopps #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
176 1.8 chopps #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
177 1.8 chopps #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
178 1.8 chopps #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
179 1.1 explorer
180 1.19 mrg #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
181 1.1 explorer
182 1.1 explorer static inline void
183 1.1 explorer msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
184 1.1 explorer {
185 1.1 explorer sca_write_1(scp->sca, scp->msci_off + reg, val);
186 1.1 explorer }
187 1.1 explorer
188 1.1 explorer static inline u_int8_t
189 1.1 explorer msci_read_1(sca_port_t *scp, u_int reg)
190 1.1 explorer {
191 1.1 explorer return sca_read_1(scp->sca, scp->msci_off + reg);
192 1.1 explorer }
193 1.1 explorer
194 1.1 explorer static inline void
195 1.1 explorer dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
196 1.1 explorer {
197 1.1 explorer sca_write_1(scp->sca, scp->dmac_off + reg, val);
198 1.1 explorer }
199 1.1 explorer
200 1.1 explorer static inline void
201 1.1 explorer dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
202 1.1 explorer {
203 1.1 explorer sca_write_2(scp->sca, scp->dmac_off + reg, val);
204 1.1 explorer }
205 1.1 explorer
206 1.1 explorer static inline u_int8_t
207 1.1 explorer dmac_read_1(sca_port_t *scp, u_int reg)
208 1.1 explorer {
209 1.1 explorer return sca_read_1(scp->sca, scp->dmac_off + reg);
210 1.1 explorer }
211 1.1 explorer
212 1.1 explorer static inline u_int16_t
213 1.1 explorer dmac_read_2(sca_port_t *scp, u_int reg)
214 1.1 explorer {
215 1.1 explorer return sca_read_2(scp->sca, scp->dmac_off + reg);
216 1.1 explorer }
217 1.1 explorer
218 1.45 joerg #if SCA_DEBUG_LEVEL > 0
219 1.8 chopps /*
220 1.8 chopps * read the chain pointer
221 1.8 chopps */
222 1.8 chopps static inline u_int16_t
223 1.8 chopps sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
224 1.8 chopps {
225 1.8 chopps if (sc->sc_usedma)
226 1.8 chopps return ((dp)->sd_chainp);
227 1.8 chopps return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
228 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
229 1.8 chopps }
230 1.45 joerg #endif
231 1.8 chopps
232 1.8 chopps /*
233 1.8 chopps * write the chain pointer
234 1.8 chopps */
235 1.8 chopps static inline void
236 1.8 chopps sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
237 1.8 chopps {
238 1.8 chopps if (sc->sc_usedma)
239 1.8 chopps (dp)->sd_chainp = cp;
240 1.8 chopps else
241 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh,
242 1.8 chopps sca_page_addr(sc, dp)
243 1.8 chopps + offsetof(struct sca_desc, sd_chainp), cp);
244 1.8 chopps }
245 1.8 chopps
246 1.45 joerg #if SCA_DEBUG_LEVEL > 0
247 1.8 chopps /*
248 1.8 chopps * read the buffer pointer
249 1.8 chopps */
250 1.8 chopps static inline u_int32_t
251 1.8 chopps sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
252 1.8 chopps {
253 1.8 chopps u_int32_t address;
254 1.8 chopps
255 1.8 chopps if (sc->sc_usedma)
256 1.8 chopps address = dp->sd_bufp | dp->sd_hbufp << 16;
257 1.8 chopps else {
258 1.8 chopps address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
259 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
260 1.8 chopps address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
261 1.8 chopps sca_page_addr(sc, dp)
262 1.8 chopps + offsetof(struct sca_desc, sd_hbufp)) << 16;
263 1.8 chopps }
264 1.8 chopps return (address);
265 1.8 chopps }
266 1.45 joerg #endif
267 1.8 chopps
268 1.8 chopps /*
269 1.8 chopps * write the buffer pointer
270 1.8 chopps */
271 1.8 chopps static inline void
272 1.8 chopps sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
273 1.8 chopps {
274 1.8 chopps if (sc->sc_usedma) {
275 1.8 chopps dp->sd_bufp = bufp & 0xFFFF;
276 1.8 chopps dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
277 1.8 chopps } else {
278 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh,
279 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
280 1.8 chopps bufp & 0xFFFF);
281 1.8 chopps bus_space_write_1(sc->scu_memt, sc->scu_memh,
282 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
283 1.8 chopps (bufp & 0x00FF0000) >> 16);
284 1.8 chopps }
285 1.8 chopps }
286 1.8 chopps
287 1.8 chopps /*
288 1.8 chopps * read the buffer length
289 1.8 chopps */
290 1.8 chopps static inline u_int16_t
291 1.8 chopps sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
292 1.8 chopps {
293 1.8 chopps if (sc->sc_usedma)
294 1.8 chopps return ((dp)->sd_buflen);
295 1.8 chopps return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
296 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
297 1.8 chopps }
298 1.29 perry
299 1.8 chopps /*
300 1.8 chopps * write the buffer length
301 1.8 chopps */
302 1.8 chopps static inline void
303 1.8 chopps sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
304 1.8 chopps {
305 1.8 chopps if (sc->sc_usedma)
306 1.8 chopps (dp)->sd_buflen = len;
307 1.8 chopps else
308 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh,
309 1.8 chopps sca_page_addr(sc, dp)
310 1.8 chopps + offsetof(struct sca_desc, sd_buflen), len);
311 1.8 chopps }
312 1.8 chopps
313 1.8 chopps /*
314 1.8 chopps * read the descriptor status
315 1.8 chopps */
316 1.8 chopps static inline u_int8_t
317 1.8 chopps sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
318 1.1 explorer {
319 1.8 chopps if (sc->sc_usedma)
320 1.8 chopps return ((dp)->sd_stat);
321 1.8 chopps return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
322 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
323 1.8 chopps }
324 1.1 explorer
325 1.8 chopps /*
326 1.8 chopps * write the descriptor status
327 1.8 chopps */
328 1.8 chopps static inline void
329 1.8 chopps sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
330 1.8 chopps {
331 1.8 chopps if (sc->sc_usedma)
332 1.8 chopps (dp)->sd_stat = stat;
333 1.8 chopps else
334 1.8 chopps bus_space_write_1(sc->scu_memt, sc->scu_memh,
335 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
336 1.8 chopps stat);
337 1.8 chopps }
338 1.1 explorer
339 1.8 chopps void
340 1.8 chopps sca_init(struct sca_softc *sc)
341 1.8 chopps {
342 1.1 explorer /*
343 1.8 chopps * Do a little sanity check: check number of ports.
344 1.1 explorer */
345 1.8 chopps if (sc->sc_numports < 1 || sc->sc_numports > 2)
346 1.8 chopps panic("sca can\'t handle more than 2 or less than 1 ports");
347 1.1 explorer
348 1.1 explorer /*
349 1.1 explorer * disable DMA and MSCI interrupts
350 1.1 explorer */
351 1.1 explorer sca_write_1(sc, SCA_DMER, 0);
352 1.1 explorer sca_write_1(sc, SCA_IER0, 0);
353 1.1 explorer sca_write_1(sc, SCA_IER1, 0);
354 1.1 explorer sca_write_1(sc, SCA_IER2, 0);
355 1.1 explorer
356 1.1 explorer /*
357 1.1 explorer * configure interrupt system
358 1.1 explorer */
359 1.8 chopps sca_write_1(sc, SCA_ITCR,
360 1.8 chopps SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
361 1.8 chopps #if 0
362 1.8 chopps /* these are for the intrerrupt ack cycle which we don't use */
363 1.1 explorer sca_write_1(sc, SCA_IVR, 0x40);
364 1.1 explorer sca_write_1(sc, SCA_IMVR, 0x40);
365 1.8 chopps #endif
366 1.1 explorer
367 1.1 explorer /*
368 1.1 explorer * set wait control register to zero wait states
369 1.1 explorer */
370 1.1 explorer sca_write_1(sc, SCA_PABR0, 0);
371 1.1 explorer sca_write_1(sc, SCA_PABR1, 0);
372 1.1 explorer sca_write_1(sc, SCA_WCRL, 0);
373 1.1 explorer sca_write_1(sc, SCA_WCRM, 0);
374 1.1 explorer sca_write_1(sc, SCA_WCRH, 0);
375 1.1 explorer
376 1.1 explorer /*
377 1.1 explorer * disable DMA and reset status
378 1.1 explorer */
379 1.1 explorer sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
380 1.1 explorer
381 1.1 explorer /*
382 1.1 explorer * disable transmit DMA for all channels
383 1.1 explorer */
384 1.1 explorer sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
385 1.1 explorer sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
386 1.1 explorer sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
387 1.1 explorer sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
388 1.1 explorer sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
389 1.1 explorer sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
390 1.1 explorer sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
391 1.1 explorer sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
392 1.1 explorer
393 1.1 explorer /*
394 1.1 explorer * enable DMA based on channel enable flags for each channel
395 1.1 explorer */
396 1.1 explorer sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
397 1.1 explorer
398 1.1 explorer /*
399 1.1 explorer * Should check to see if the chip is responding, but for now
400 1.1 explorer * assume it is.
401 1.1 explorer */
402 1.1 explorer }
403 1.1 explorer
404 1.1 explorer /*
405 1.1 explorer * initialize the port and attach it to the networking layer
406 1.1 explorer */
407 1.1 explorer void
408 1.1 explorer sca_port_attach(struct sca_softc *sc, u_int port)
409 1.1 explorer {
410 1.32 kardel struct timeval now;
411 1.1 explorer sca_port_t *scp = &sc->sc_ports[port];
412 1.1 explorer struct ifnet *ifp;
413 1.1 explorer static u_int ntwo_unit = 0;
414 1.1 explorer
415 1.1 explorer scp->sca = sc; /* point back to the parent */
416 1.1 explorer
417 1.1 explorer scp->sp_port = port;
418 1.1 explorer
419 1.1 explorer if (port == 0) {
420 1.1 explorer scp->msci_off = SCA_MSCI_OFF_0;
421 1.1 explorer scp->dmac_off = SCA_DMAC_OFF_0;
422 1.8 chopps if(sc->sc_parent != NULL)
423 1.31 thorpej ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
424 1.4 tls else
425 1.4 tls ntwo_unit = 0; /* XXX */
426 1.1 explorer } else {
427 1.1 explorer scp->msci_off = SCA_MSCI_OFF_1;
428 1.1 explorer scp->dmac_off = SCA_DMAC_OFF_1;
429 1.8 chopps if(sc->sc_parent != NULL)
430 1.31 thorpej ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
431 1.4 tls else
432 1.4 tls ntwo_unit = 1; /* XXX */
433 1.1 explorer }
434 1.1 explorer
435 1.1 explorer sca_msci_init(sc, scp);
436 1.1 explorer sca_dmac_init(sc, scp);
437 1.1 explorer
438 1.1 explorer /*
439 1.1 explorer * attach to the network layer
440 1.1 explorer */
441 1.1 explorer ifp = &scp->sp_if;
442 1.27 itojun snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
443 1.1 explorer ifp->if_softc = scp;
444 1.1 explorer ifp->if_mtu = SCA_MTU;
445 1.1 explorer ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
446 1.9 chopps ifp->if_type = IFT_PTPSERIAL;
447 1.1 explorer ifp->if_hdrlen = HDLC_HDRLEN;
448 1.1 explorer ifp->if_ioctl = sca_ioctl;
449 1.1 explorer ifp->if_output = sca_output;
450 1.1 explorer ifp->if_watchdog = sca_watchdog;
451 1.1 explorer ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
452 1.1 explorer scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
453 1.1 explorer #ifdef SCA_USE_FASTQ
454 1.1 explorer scp->fastq.ifq_maxlen = IFQ_MAXLEN;
455 1.1 explorer #endif
456 1.21 itojun IFQ_SET_READY(&ifp->if_snd);
457 1.1 explorer if_attach(ifp);
458 1.53 ozaki if_deferred_start_init(ifp, NULL);
459 1.14 thorpej if_alloc_sadl(ifp);
460 1.43 joerg bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
461 1.52 ozaki bpf_mtap_softint_init(ifp);
462 1.1 explorer
463 1.8 chopps if (sc->sc_parent == NULL)
464 1.1 explorer printf("%s: port %d\n", ifp->if_xname, port);
465 1.1 explorer else
466 1.1 explorer printf("%s at %s port %d\n",
467 1.39 cegger ifp->if_xname, device_xname(sc->sc_parent), port);
468 1.1 explorer
469 1.1 explorer /*
470 1.1 explorer * reset the last seen times on the cisco keepalive protocol
471 1.1 explorer */
472 1.32 kardel getmicrotime(&now);
473 1.32 kardel scp->cka_lasttx = now.tv_usec;
474 1.1 explorer scp->cka_lastrx = 0;
475 1.1 explorer }
476 1.1 explorer
477 1.8 chopps #if 0
478 1.8 chopps /*
479 1.8 chopps * returns log2(div), sets 'tmc' for the required freq 'hz'
480 1.8 chopps */
481 1.8 chopps static u_int8_t
482 1.8 chopps sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
483 1.8 chopps {
484 1.8 chopps u_int32_t tmc, div;
485 1.8 chopps u_int32_t clock;
486 1.8 chopps
487 1.8 chopps /* clock hz = (chipclock / tmc) / 2^(div); */
488 1.8 chopps /*
489 1.8 chopps * TD == tmc * 2^(n)
490 1.8 chopps *
491 1.8 chopps * note:
492 1.8 chopps * 1 <= TD <= 256 TD is inc of 1
493 1.8 chopps * 2 <= TD <= 512 TD is inc of 2
494 1.8 chopps * 4 <= TD <= 1024 TD is inc of 4
495 1.8 chopps * ...
496 1.29 perry * 512 <= TD <= 256*512 TD is inc of 512
497 1.8 chopps *
498 1.8 chopps * so note there are overlaps. We lose prec
499 1.8 chopps * as div increases so we wish to minize div.
500 1.8 chopps *
501 1.8 chopps * basically we want to do
502 1.8 chopps *
503 1.8 chopps * tmc = chip / hz, but have tmc <= 256
504 1.8 chopps */
505 1.8 chopps
506 1.22 tsutsui /* assume system clock is 9.8304MHz or 9830400Hz */
507 1.8 chopps clock = clock = 9830400 >> 1;
508 1.8 chopps
509 1.8 chopps /* round down */
510 1.8 chopps div = 0;
511 1.8 chopps while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
512 1.8 chopps clock >>= 1;
513 1.8 chopps div++;
514 1.8 chopps }
515 1.8 chopps if (clock / tmc > hz)
516 1.8 chopps tmc++;
517 1.8 chopps if (!tmc)
518 1.8 chopps tmc = 1;
519 1.8 chopps
520 1.8 chopps if (div > SCA_RXS_DIV_512) {
521 1.8 chopps /* set to maximums */
522 1.8 chopps div = SCA_RXS_DIV_512;
523 1.8 chopps tmc = 0;
524 1.8 chopps }
525 1.8 chopps
526 1.8 chopps *tmcp = (tmc & 0xFF); /* 0 == 256 */
527 1.8 chopps return (div & 0xFF);
528 1.8 chopps }
529 1.8 chopps #endif
530 1.8 chopps
531 1.1 explorer /*
532 1.1 explorer * initialize the port's MSCI
533 1.1 explorer */
534 1.1 explorer static void
535 1.1 explorer sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
536 1.1 explorer {
537 1.8 chopps /* reset the channel */
538 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
539 1.8 chopps
540 1.1 explorer msci_write_1(scp, SCA_MD00,
541 1.1 explorer ( SCA_MD0_CRC_1
542 1.1 explorer | SCA_MD0_CRC_CCITT
543 1.1 explorer | SCA_MD0_CRC_ENABLE
544 1.1 explorer | SCA_MD0_MODE_HDLC));
545 1.8 chopps #if 0
546 1.8 chopps /* immediately send receive reset so the above takes */
547 1.8 chopps msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
548 1.8 chopps #endif
549 1.8 chopps
550 1.1 explorer msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
551 1.1 explorer msci_write_1(scp, SCA_MD20,
552 1.8 chopps (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
553 1.1 explorer
554 1.8 chopps /* be safe and do it again */
555 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
556 1.8 chopps
557 1.8 chopps /* setup underrun and idle control, and initial RTS state */
558 1.1 explorer msci_write_1(scp, SCA_CTL0,
559 1.8 chopps (SCA_CTL_IDLC_PATTERN
560 1.8 chopps | SCA_CTL_UDRNC_AFTER_FCS
561 1.8 chopps | SCA_CTL_RTS_LOW));
562 1.8 chopps
563 1.8 chopps /* reset the transmitter */
564 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
565 1.1 explorer
566 1.1 explorer /*
567 1.8 chopps * set the clock sources
568 1.1 explorer */
569 1.8 chopps msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
570 1.8 chopps msci_write_1(scp, SCA_TXS0, scp->sp_txs);
571 1.8 chopps msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
572 1.8 chopps
573 1.8 chopps /* set external clock generate as requested */
574 1.8 chopps sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
575 1.1 explorer
576 1.1 explorer /*
577 1.1 explorer * XXX don't pay attention to CTS or CD changes right now. I can't
578 1.1 explorer * simulate one, and the transmitter will try to transmit even if
579 1.1 explorer * CD isn't there anyway, so nothing bad SHOULD happen.
580 1.1 explorer */
581 1.8 chopps #if 0
582 1.1 explorer msci_write_1(scp, SCA_IE00, 0);
583 1.1 explorer msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
584 1.8 chopps #else
585 1.8 chopps /* this would deliver transmitter underrun to ST1/ISR1 */
586 1.8 chopps msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
587 1.8 chopps msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
588 1.8 chopps #endif
589 1.1 explorer msci_write_1(scp, SCA_IE20, 0);
590 1.8 chopps
591 1.1 explorer msci_write_1(scp, SCA_FIE0, 0);
592 1.1 explorer
593 1.1 explorer msci_write_1(scp, SCA_SA00, 0);
594 1.1 explorer msci_write_1(scp, SCA_SA10, 0);
595 1.1 explorer
596 1.1 explorer msci_write_1(scp, SCA_IDL0, 0x7e);
597 1.1 explorer
598 1.1 explorer msci_write_1(scp, SCA_RRC0, 0x0e);
599 1.8 chopps /* msci_write_1(scp, SCA_TRC00, 0x10); */
600 1.8 chopps /*
601 1.8 chopps * the correct values here are important for avoiding underruns
602 1.8 chopps * for any value less than or equal to TRC0 txrdy is activated
603 1.8 chopps * which will start the dmac transfer to the fifo.
604 1.23 wiz * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
605 1.8 chopps *
606 1.8 chopps * thus if we are using a very fast clock that empties the fifo
607 1.8 chopps * quickly, delays in the dmac starting to fill the fifo can
608 1.8 chopps * lead to underruns so we want a fairly full fifo to still
609 1.8 chopps * cause the dmac to start. for cards with on board ram this
610 1.23 wiz * has no effect on system performance. For cards that DMA
611 1.8 chopps * to/from system memory it will cause more, shorter,
612 1.8 chopps * bus accesses rather than fewer longer ones.
613 1.8 chopps */
614 1.8 chopps msci_write_1(scp, SCA_TRC00, 0x00);
615 1.1 explorer msci_write_1(scp, SCA_TRC10, 0x1f);
616 1.1 explorer }
617 1.1 explorer
618 1.1 explorer /*
619 1.1 explorer * Take the memory for the port and construct two circular linked lists of
620 1.1 explorer * descriptors (one tx, one rx) and set the pointers in these descriptors
621 1.1 explorer * to point to the buffer space for this port.
622 1.1 explorer */
623 1.1 explorer static void
624 1.1 explorer sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
625 1.1 explorer {
626 1.1 explorer sca_desc_t *desc;
627 1.1 explorer u_int32_t desc_p;
628 1.1 explorer u_int32_t buf_p;
629 1.1 explorer int i;
630 1.1 explorer
631 1.8 chopps if (sc->sc_usedma)
632 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
633 1.8 chopps BUS_DMASYNC_PREWRITE);
634 1.8 chopps else {
635 1.8 chopps /*
636 1.8 chopps * XXX assumes that all tx desc and bufs in same page
637 1.8 chopps */
638 1.8 chopps sc->scu_page_on(sc);
639 1.8 chopps sc->scu_set_page(sc, scp->sp_txdesc_p);
640 1.8 chopps }
641 1.1 explorer
642 1.8 chopps desc = scp->sp_txdesc;
643 1.8 chopps desc_p = scp->sp_txdesc_p;
644 1.8 chopps buf_p = scp->sp_txbuf_p;
645 1.8 chopps scp->sp_txcur = 0;
646 1.8 chopps scp->sp_txinuse = 0;
647 1.8 chopps
648 1.8 chopps #ifdef DEBUG
649 1.8 chopps /* make sure that we won't wrap */
650 1.8 chopps if ((desc_p & 0xffff0000) !=
651 1.8 chopps ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
652 1.25 wiz panic("sca: tx descriptors cross architecural boundary");
653 1.8 chopps if ((buf_p & 0xff000000) !=
654 1.8 chopps ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
655 1.25 wiz panic("sca: tx buffers cross architecural boundary");
656 1.8 chopps #endif
657 1.1 explorer
658 1.8 chopps for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
659 1.1 explorer /*
660 1.56 andvar * desc_p points to the physical address of the NEXT desc
661 1.1 explorer */
662 1.1 explorer desc_p += sizeof(sca_desc_t);
663 1.1 explorer
664 1.8 chopps sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
665 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p);
666 1.8 chopps sca_desc_write_buflen(sc, desc, SCA_BSIZE);
667 1.8 chopps sca_desc_write_stat(sc, desc, 0);
668 1.1 explorer
669 1.1 explorer desc++; /* point to the next descriptor */
670 1.1 explorer buf_p += SCA_BSIZE;
671 1.1 explorer }
672 1.1 explorer
673 1.1 explorer /*
674 1.1 explorer * "heal" the circular list by making the last entry point to the
675 1.1 explorer * first.
676 1.1 explorer */
677 1.8 chopps sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
678 1.1 explorer
679 1.1 explorer /*
680 1.1 explorer * Now, initialize the transmit DMA logic
681 1.1 explorer *
682 1.1 explorer * CPB == chain pointer base address
683 1.1 explorer */
684 1.1 explorer dmac_write_1(scp, SCA_DSR1, 0);
685 1.1 explorer dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
686 1.1 explorer dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
687 1.8 chopps /* XXX1
688 1.1 explorer dmac_write_1(scp, SCA_DIR1,
689 1.1 explorer (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
690 1.8 chopps */
691 1.8 chopps dmac_write_1(scp, SCA_DIR1,
692 1.8 chopps (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
693 1.1 explorer dmac_write_1(scp, SCA_CPB1,
694 1.8 chopps (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
695 1.1 explorer
696 1.1 explorer /*
697 1.1 explorer * now, do the same thing for receive descriptors
698 1.8 chopps *
699 1.8 chopps * XXX assumes that all rx desc and bufs in same page
700 1.1 explorer */
701 1.8 chopps if (!sc->sc_usedma)
702 1.8 chopps sc->scu_set_page(sc, scp->sp_rxdesc_p);
703 1.1 explorer
704 1.8 chopps desc = scp->sp_rxdesc;
705 1.8 chopps desc_p = scp->sp_rxdesc_p;
706 1.8 chopps buf_p = scp->sp_rxbuf_p;
707 1.8 chopps
708 1.8 chopps #ifdef DEBUG
709 1.8 chopps /* make sure that we won't wrap */
710 1.8 chopps if ((desc_p & 0xffff0000) !=
711 1.8 chopps ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
712 1.25 wiz panic("sca: rx descriptors cross architecural boundary");
713 1.8 chopps if ((buf_p & 0xff000000) !=
714 1.8 chopps ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
715 1.25 wiz panic("sca: rx buffers cross architecural boundary");
716 1.8 chopps #endif
717 1.8 chopps
718 1.8 chopps for (i = 0 ; i < scp->sp_nrxdesc; i++) {
719 1.1 explorer /*
720 1.56 andvar * desc_p points to the physical address of the NEXT desc
721 1.1 explorer */
722 1.1 explorer desc_p += sizeof(sca_desc_t);
723 1.1 explorer
724 1.8 chopps sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
725 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p);
726 1.8 chopps /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
727 1.8 chopps sca_desc_write_buflen(sc, desc, 0);
728 1.8 chopps sca_desc_write_stat(sc, desc, 0);
729 1.1 explorer
730 1.1 explorer desc++; /* point to the next descriptor */
731 1.1 explorer buf_p += SCA_BSIZE;
732 1.1 explorer }
733 1.1 explorer
734 1.1 explorer /*
735 1.1 explorer * "heal" the circular list by making the last entry point to the
736 1.1 explorer * first.
737 1.1 explorer */
738 1.8 chopps sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
739 1.1 explorer
740 1.1 explorer sca_dmac_rxinit(scp);
741 1.1 explorer
742 1.8 chopps if (sc->sc_usedma)
743 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
744 1.8 chopps 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
745 1.8 chopps else
746 1.8 chopps sc->scu_page_off(sc);
747 1.1 explorer }
748 1.1 explorer
749 1.1 explorer /*
750 1.1 explorer * reset and reinitialize the receive DMA logic
751 1.1 explorer */
752 1.1 explorer static void
753 1.1 explorer sca_dmac_rxinit(sca_port_t *scp)
754 1.1 explorer {
755 1.1 explorer /*
756 1.1 explorer * ... and the receive DMA logic ...
757 1.1 explorer */
758 1.1 explorer dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
759 1.1 explorer dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
760 1.1 explorer
761 1.1 explorer dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
762 1.1 explorer dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
763 1.1 explorer
764 1.8 chopps /* reset descriptors to initial state */
765 1.8 chopps scp->sp_rxstart = 0;
766 1.8 chopps scp->sp_rxend = scp->sp_nrxdesc - 1;
767 1.8 chopps
768 1.1 explorer /*
769 1.1 explorer * CPB == chain pointer base
770 1.1 explorer * CDA == current descriptor address
771 1.1 explorer * EDA == error descriptor address (overwrite position)
772 1.8 chopps * because cda can't be eda when starting we always
773 1.8 chopps * have a single buffer gap between cda and eda
774 1.1 explorer */
775 1.1 explorer dmac_write_1(scp, SCA_CPB0,
776 1.8 chopps (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
777 1.8 chopps dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
778 1.8 chopps dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
779 1.8 chopps (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
780 1.1 explorer
781 1.1 explorer /*
782 1.1 explorer * enable receiver DMA
783 1.1 explorer */
784 1.29 perry dmac_write_1(scp, SCA_DIR0,
785 1.1 explorer (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
786 1.1 explorer dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
787 1.1 explorer }
788 1.1 explorer
789 1.1 explorer /*
790 1.1 explorer * Queue the packet for our start routine to transmit
791 1.1 explorer */
792 1.1 explorer static int
793 1.33 christos sca_output(
794 1.33 christos struct ifnet *ifp,
795 1.33 christos struct mbuf *m,
796 1.35 dyoung const struct sockaddr *dst,
797 1.49 ozaki const struct rtentry *rt0)
798 1.1 explorer {
799 1.9 chopps struct hdlc_header *hdlc;
800 1.13 thorpej struct ifqueue *ifq = NULL;
801 1.13 thorpej int s, error, len;
802 1.13 thorpej short mflags;
803 1.1 explorer
804 1.1 explorer error = 0;
805 1.1 explorer
806 1.1 explorer if ((ifp->if_flags & IFF_UP) != IFF_UP) {
807 1.1 explorer error = ENETDOWN;
808 1.1 explorer goto bad;
809 1.1 explorer }
810 1.1 explorer
811 1.13 thorpej /*
812 1.13 thorpej * If the queueing discipline needs packet classification,
813 1.13 thorpej * do it before prepending link headers.
814 1.13 thorpej */
815 1.48 knakahar IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
816 1.1 explorer
817 1.1 explorer /*
818 1.1 explorer * determine address family, and priority for this packet
819 1.1 explorer */
820 1.1 explorer switch (dst->sa_family) {
821 1.9 chopps #ifdef INET
822 1.1 explorer case AF_INET:
823 1.1 explorer #ifdef SCA_USE_FASTQ
824 1.9 chopps if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
825 1.9 chopps == IPTOS_LOWDELAY)
826 1.9 chopps ifq = &((sca_port_t *)ifp->if_softc)->fastq;
827 1.1 explorer #endif
828 1.9 chopps /*
829 1.9 chopps * Add cisco serial line header. If there is no
830 1.9 chopps * space in the first mbuf, allocate another.
831 1.29 perry */
832 1.9 chopps M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
833 1.9 chopps if (m == 0)
834 1.9 chopps return (ENOBUFS);
835 1.9 chopps hdlc = mtod(m, struct hdlc_header *);
836 1.9 chopps hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
837 1.29 perry break;
838 1.9 chopps #endif
839 1.15 itojun #ifdef INET6
840 1.15 itojun case AF_INET6:
841 1.15 itojun /*
842 1.15 itojun * Add cisco serial line header. If there is no
843 1.15 itojun * space in the first mbuf, allocate another.
844 1.29 perry */
845 1.15 itojun M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
846 1.15 itojun if (m == 0)
847 1.15 itojun return (ENOBUFS);
848 1.15 itojun hdlc = mtod(m, struct hdlc_header *);
849 1.15 itojun hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
850 1.29 perry break;
851 1.15 itojun #endif
852 1.1 explorer default:
853 1.1 explorer printf("%s: address family %d unsupported\n",
854 1.1 explorer ifp->if_xname, dst->sa_family);
855 1.1 explorer error = EAFNOSUPPORT;
856 1.1 explorer goto bad;
857 1.1 explorer }
858 1.1 explorer
859 1.9 chopps /* finish */
860 1.1 explorer if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
861 1.9 chopps hdlc->h_addr = CISCO_MULTICAST;
862 1.1 explorer else
863 1.9 chopps hdlc->h_addr = CISCO_UNICAST;
864 1.9 chopps hdlc->h_resv = 0;
865 1.1 explorer
866 1.1 explorer /*
867 1.1 explorer * queue the packet. If interactive, use the fast queue.
868 1.1 explorer */
869 1.13 thorpej mflags = m->m_flags;
870 1.13 thorpej len = m->m_pkthdr.len;
871 1.2 mycroft s = splnet();
872 1.13 thorpej if (ifq != NULL) {
873 1.13 thorpej if (IF_QFULL(ifq)) {
874 1.13 thorpej IF_DROP(ifq);
875 1.13 thorpej m_freem(m);
876 1.13 thorpej error = ENOBUFS;
877 1.13 thorpej } else
878 1.13 thorpej IF_ENQUEUE(ifq, m);
879 1.13 thorpej } else
880 1.48 knakahar IFQ_ENQUEUE(&ifp->if_snd, m, error);
881 1.55 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
882 1.13 thorpej if (error != 0) {
883 1.55 thorpej if_statinc_ref(nsr, if_oerrors);
884 1.55 thorpej if_statinc_ref(nsr, if_collisions);
885 1.55 thorpej IF_STAT_PUTREF(ifp);
886 1.13 thorpej splx(s);
887 1.13 thorpej return (error);
888 1.1 explorer }
889 1.55 thorpej if_statadd_ref(nsr, if_obytes, len);
890 1.13 thorpej if (mflags & M_MCAST)
891 1.55 thorpej if_statinc_ref(nsr, if_omcasts);
892 1.55 thorpej IF_STAT_PUTREF(ifp);
893 1.1 explorer
894 1.1 explorer sca_start(ifp);
895 1.1 explorer splx(s);
896 1.1 explorer
897 1.1 explorer return (error);
898 1.1 explorer
899 1.1 explorer bad:
900 1.1 explorer if (m)
901 1.1 explorer m_freem(m);
902 1.1 explorer return (error);
903 1.1 explorer }
904 1.1 explorer
905 1.1 explorer static int
906 1.41 dsl sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
907 1.1 explorer {
908 1.1 explorer struct ifreq *ifr;
909 1.1 explorer struct ifaddr *ifa;
910 1.1 explorer int error;
911 1.1 explorer int s;
912 1.1 explorer
913 1.2 mycroft s = splnet();
914 1.1 explorer
915 1.40 dyoung ifr = (struct ifreq *)data;
916 1.40 dyoung ifa = (struct ifaddr *)data;
917 1.1 explorer error = 0;
918 1.1 explorer
919 1.1 explorer switch (cmd) {
920 1.40 dyoung case SIOCINITIFADDR:
921 1.15 itojun switch(ifa->ifa_addr->sa_family) {
922 1.9 chopps #ifdef INET
923 1.15 itojun case AF_INET:
924 1.15 itojun #endif
925 1.15 itojun #ifdef INET6
926 1.15 itojun case AF_INET6:
927 1.15 itojun #endif
928 1.15 itojun #if defined(INET) || defined(INET6)
929 1.9 chopps ifp->if_flags |= IFF_UP;
930 1.1 explorer sca_port_up(ifp->if_softc);
931 1.15 itojun break;
932 1.9 chopps #endif
933 1.15 itojun default:
934 1.1 explorer error = EAFNOSUPPORT;
935 1.15 itojun break;
936 1.15 itojun }
937 1.1 explorer break;
938 1.1 explorer
939 1.1 explorer case SIOCSIFDSTADDR:
940 1.9 chopps #ifdef INET
941 1.15 itojun if (ifa->ifa_addr->sa_family == AF_INET)
942 1.15 itojun break;
943 1.15 itojun #endif
944 1.15 itojun #ifdef INET6
945 1.15 itojun if (ifa->ifa_addr->sa_family == AF_INET6)
946 1.15 itojun break;
947 1.15 itojun #endif
948 1.9 chopps error = EAFNOSUPPORT;
949 1.1 explorer break;
950 1.1 explorer
951 1.1 explorer case SIOCADDMULTI:
952 1.1 explorer case SIOCDELMULTI:
953 1.15 itojun /* XXX need multicast group management code */
954 1.1 explorer if (ifr == 0) {
955 1.1 explorer error = EAFNOSUPPORT; /* XXX */
956 1.1 explorer break;
957 1.1 explorer }
958 1.37 dyoung switch (ifreq_getaddr(cmd, ifr)->sa_family) {
959 1.1 explorer #ifdef INET
960 1.1 explorer case AF_INET:
961 1.1 explorer break;
962 1.1 explorer #endif
963 1.15 itojun #ifdef INET6
964 1.15 itojun case AF_INET6:
965 1.15 itojun break;
966 1.15 itojun #endif
967 1.1 explorer default:
968 1.1 explorer error = EAFNOSUPPORT;
969 1.1 explorer break;
970 1.1 explorer }
971 1.1 explorer break;
972 1.1 explorer
973 1.1 explorer case SIOCSIFFLAGS:
974 1.40 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0)
975 1.40 dyoung break;
976 1.9 chopps if (ifr->ifr_flags & IFF_UP) {
977 1.9 chopps ifp->if_flags |= IFF_UP;
978 1.1 explorer sca_port_up(ifp->if_softc);
979 1.9 chopps } else {
980 1.9 chopps ifp->if_flags &= ~IFF_UP;
981 1.1 explorer sca_port_down(ifp->if_softc);
982 1.9 chopps }
983 1.1 explorer
984 1.1 explorer break;
985 1.1 explorer
986 1.1 explorer default:
987 1.40 dyoung error = ifioctl_common(ifp, cmd, data);
988 1.1 explorer }
989 1.1 explorer
990 1.1 explorer splx(s);
991 1.1 explorer return error;
992 1.1 explorer }
993 1.1 explorer
994 1.1 explorer /*
995 1.1 explorer * start packet transmission on the interface
996 1.1 explorer *
997 1.2 mycroft * MUST BE CALLED AT splnet()
998 1.1 explorer */
999 1.1 explorer static void
1000 1.41 dsl sca_start(struct ifnet *ifp)
1001 1.1 explorer {
1002 1.1 explorer sca_port_t *scp = ifp->if_softc;
1003 1.1 explorer struct sca_softc *sc = scp->sca;
1004 1.1 explorer struct mbuf *m, *mb_head;
1005 1.1 explorer sca_desc_t *desc;
1006 1.8 chopps u_int8_t *buf, stat;
1007 1.1 explorer u_int32_t buf_p;
1008 1.6 erh int nexttx;
1009 1.1 explorer int trigger_xmit;
1010 1.8 chopps u_int len;
1011 1.8 chopps
1012 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1013 1.1 explorer
1014 1.1 explorer /*
1015 1.1 explorer * can't queue when we are full or transmitter is busy
1016 1.1 explorer */
1017 1.8 chopps #ifdef oldcode
1018 1.8 chopps if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1019 1.8 chopps || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1020 1.8 chopps return;
1021 1.8 chopps #else
1022 1.8 chopps if (scp->sp_txinuse
1023 1.1 explorer || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1024 1.1 explorer return;
1025 1.8 chopps #endif
1026 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1027 1.1 explorer
1028 1.8 chopps /*
1029 1.8 chopps * XXX assume that all tx desc and bufs in same page
1030 1.8 chopps */
1031 1.8 chopps if (sc->sc_usedma)
1032 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1033 1.8 chopps 0, sc->scu_allocsize,
1034 1.8 chopps BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1035 1.8 chopps else {
1036 1.8 chopps sc->scu_page_on(sc);
1037 1.8 chopps sc->scu_set_page(sc, scp->sp_txdesc_p);
1038 1.8 chopps }
1039 1.1 explorer
1040 1.1 explorer trigger_xmit = 0;
1041 1.1 explorer
1042 1.1 explorer txloop:
1043 1.1 explorer IF_DEQUEUE(&scp->linkq, mb_head);
1044 1.1 explorer if (mb_head == NULL)
1045 1.1 explorer #ifdef SCA_USE_FASTQ
1046 1.1 explorer IF_DEQUEUE(&scp->fastq, mb_head);
1047 1.1 explorer if (mb_head == NULL)
1048 1.1 explorer #endif
1049 1.21 itojun IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1050 1.1 explorer if (mb_head == NULL)
1051 1.1 explorer goto start_xmit;
1052 1.1 explorer
1053 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1054 1.8 chopps #ifdef oldcode
1055 1.1 explorer if (scp->txinuse != 0) {
1056 1.6 erh /* Kill EOT interrupts on the previous descriptor. */
1057 1.8 chopps desc = &scp->sp_txdesc[scp->txcur];
1058 1.8 chopps stat = sca_desc_read_stat(sc, desc);
1059 1.8 chopps sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1060 1.6 erh
1061 1.6 erh /* Figure out what the next free descriptor is. */
1062 1.8 chopps nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1063 1.6 erh } else
1064 1.6 erh nexttx = 0;
1065 1.8 chopps #endif /* oldcode */
1066 1.8 chopps
1067 1.8 chopps if (scp->sp_txinuse)
1068 1.8 chopps nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1069 1.8 chopps else
1070 1.8 chopps nexttx = 0;
1071 1.8 chopps
1072 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1073 1.8 chopps
1074 1.8 chopps buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1075 1.8 chopps buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1076 1.8 chopps
1077 1.8 chopps /* XXX hoping we can delay the desc write till after we don't drop. */
1078 1.8 chopps desc = &scp->sp_txdesc[nexttx];
1079 1.8 chopps
1080 1.8 chopps /* XXX isn't this set already?? */
1081 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p);
1082 1.8 chopps len = 0;
1083 1.6 erh
1084 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1085 1.1 explorer
1086 1.8 chopps #if 0 /* uncomment this for a core in cc1 */
1087 1.8 chopps X
1088 1.8 chopps #endif
1089 1.1 explorer /*
1090 1.1 explorer * Run through the chain, copying data into the descriptor as we
1091 1.1 explorer * go. If it won't fit in one transmission block, drop the packet.
1092 1.1 explorer * No, this isn't nice, but most of the time it _will_ fit.
1093 1.1 explorer */
1094 1.1 explorer for (m = mb_head ; m != NULL ; m = m->m_next) {
1095 1.1 explorer if (m->m_len != 0) {
1096 1.8 chopps len += m->m_len;
1097 1.8 chopps if (len > SCA_BSIZE) {
1098 1.1 explorer m_freem(mb_head);
1099 1.1 explorer goto txloop;
1100 1.1 explorer }
1101 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX,
1102 1.8 chopps ("TX: about to mbuf len %d\n", m->m_len));
1103 1.8 chopps
1104 1.8 chopps if (sc->sc_usedma)
1105 1.18 thorpej memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1106 1.8 chopps else
1107 1.8 chopps bus_space_write_region_1(sc->scu_memt,
1108 1.8 chopps sc->scu_memh, sca_page_addr(sc, buf_p),
1109 1.8 chopps mtod(m, u_int8_t *), m->m_len);
1110 1.1 explorer buf += m->m_len;
1111 1.8 chopps buf_p += m->m_len;
1112 1.1 explorer }
1113 1.1 explorer }
1114 1.1 explorer
1115 1.8 chopps /* set the buffer, the length, and mark end of frame and end of xfer */
1116 1.8 chopps sca_desc_write_buflen(sc, desc, len);
1117 1.8 chopps sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1118 1.8 chopps
1119 1.55 thorpej if_statinc(ifp, if_opackets);
1120 1.1 explorer
1121 1.1 explorer /*
1122 1.1 explorer * Pass packet to bpf if there is a listener.
1123 1.1 explorer */
1124 1.54 msaitoh bpf_mtap(ifp, mb_head, BPF_D_OUT);
1125 1.1 explorer
1126 1.1 explorer m_freem(mb_head);
1127 1.1 explorer
1128 1.8 chopps scp->sp_txcur = nexttx;
1129 1.8 chopps scp->sp_txinuse++;
1130 1.1 explorer trigger_xmit = 1;
1131 1.1 explorer
1132 1.1 explorer SCA_DPRINTF(SCA_DEBUG_TX,
1133 1.8 chopps ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1134 1.1 explorer
1135 1.8 chopps /*
1136 1.8 chopps * XXX so didn't this used to limit us to 1?! - multi may be untested
1137 1.8 chopps * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1138 1.8 chopps * to find bug
1139 1.8 chopps */
1140 1.8 chopps #ifdef oldcode
1141 1.8 chopps if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1142 1.8 chopps #endif
1143 1.8 chopps if (scp->sp_txinuse < scp->sp_ntxdesc)
1144 1.1 explorer goto txloop;
1145 1.1 explorer
1146 1.1 explorer start_xmit:
1147 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1148 1.8 chopps
1149 1.8 chopps if (trigger_xmit != 0) {
1150 1.8 chopps /* set EOT on final descriptor */
1151 1.8 chopps desc = &scp->sp_txdesc[scp->sp_txcur];
1152 1.8 chopps stat = sca_desc_read_stat(sc, desc);
1153 1.8 chopps sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1154 1.8 chopps }
1155 1.8 chopps
1156 1.8 chopps if (sc->sc_usedma)
1157 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1158 1.8 chopps sc->scu_allocsize,
1159 1.8 chopps BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1160 1.1 explorer
1161 1.1 explorer if (trigger_xmit != 0)
1162 1.1 explorer sca_port_starttx(scp);
1163 1.8 chopps
1164 1.8 chopps if (!sc->sc_usedma)
1165 1.8 chopps sc->scu_page_off(sc);
1166 1.1 explorer }
1167 1.1 explorer
1168 1.1 explorer static void
1169 1.34 christos sca_watchdog(struct ifnet *ifp)
1170 1.1 explorer {
1171 1.1 explorer }
1172 1.1 explorer
1173 1.1 explorer int
1174 1.1 explorer sca_hardintr(struct sca_softc *sc)
1175 1.1 explorer {
1176 1.1 explorer u_int8_t isr0, isr1, isr2;
1177 1.1 explorer int ret;
1178 1.1 explorer
1179 1.1 explorer ret = 0; /* non-zero means we processed at least one interrupt */
1180 1.1 explorer
1181 1.8 chopps SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1182 1.8 chopps
1183 1.1 explorer while (1) {
1184 1.1 explorer /*
1185 1.1 explorer * read SCA interrupts
1186 1.1 explorer */
1187 1.1 explorer isr0 = sca_read_1(sc, SCA_ISR0);
1188 1.1 explorer isr1 = sca_read_1(sc, SCA_ISR1);
1189 1.1 explorer isr2 = sca_read_1(sc, SCA_ISR2);
1190 1.1 explorer
1191 1.1 explorer if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1192 1.1 explorer break;
1193 1.1 explorer
1194 1.1 explorer SCA_DPRINTF(SCA_DEBUG_INTR,
1195 1.1 explorer ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1196 1.1 explorer isr0, isr1, isr2));
1197 1.1 explorer
1198 1.1 explorer /*
1199 1.8 chopps * check DMAC interrupt
1200 1.1 explorer */
1201 1.1 explorer if (isr1 & 0x0f)
1202 1.1 explorer ret += sca_dmac_intr(&sc->sc_ports[0],
1203 1.1 explorer isr1 & 0x0f);
1204 1.8 chopps
1205 1.1 explorer if (isr1 & 0xf0)
1206 1.1 explorer ret += sca_dmac_intr(&sc->sc_ports[1],
1207 1.8 chopps (isr1 & 0xf0) >> 4);
1208 1.8 chopps
1209 1.8 chopps /*
1210 1.8 chopps * mcsi intterupts
1211 1.8 chopps */
1212 1.8 chopps if (isr0 & 0x0f)
1213 1.8 chopps ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1214 1.1 explorer
1215 1.8 chopps if (isr0 & 0xf0)
1216 1.8 chopps ret += sca_msci_intr(&sc->sc_ports[1],
1217 1.8 chopps (isr0 & 0xf0) >> 4);
1218 1.1 explorer
1219 1.1 explorer #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1220 1.1 explorer if (isr2)
1221 1.1 explorer ret += sca_timer_intr(sc, isr2);
1222 1.1 explorer #endif
1223 1.1 explorer }
1224 1.1 explorer
1225 1.1 explorer return (ret);
1226 1.1 explorer }
1227 1.1 explorer
1228 1.1 explorer static int
1229 1.1 explorer sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1230 1.1 explorer {
1231 1.1 explorer u_int8_t dsr;
1232 1.1 explorer int ret;
1233 1.1 explorer
1234 1.1 explorer ret = 0;
1235 1.1 explorer
1236 1.1 explorer /*
1237 1.1 explorer * Check transmit channel
1238 1.1 explorer */
1239 1.8 chopps if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1240 1.1 explorer SCA_DPRINTF(SCA_DEBUG_INTR,
1241 1.8 chopps ("TX INTERRUPT port %d\n", scp->sp_port));
1242 1.1 explorer
1243 1.1 explorer dsr = 1;
1244 1.1 explorer while (dsr != 0) {
1245 1.1 explorer ret++;
1246 1.1 explorer /*
1247 1.1 explorer * reset interrupt
1248 1.1 explorer */
1249 1.1 explorer dsr = dmac_read_1(scp, SCA_DSR1);
1250 1.1 explorer dmac_write_1(scp, SCA_DSR1,
1251 1.1 explorer dsr | SCA_DSR_DEWD);
1252 1.1 explorer
1253 1.1 explorer /*
1254 1.1 explorer * filter out the bits we don't care about
1255 1.1 explorer */
1256 1.1 explorer dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1257 1.1 explorer if (dsr == 0)
1258 1.1 explorer break;
1259 1.1 explorer
1260 1.1 explorer /*
1261 1.1 explorer * check for counter overflow
1262 1.1 explorer */
1263 1.1 explorer if (dsr & SCA_DSR_COF) {
1264 1.1 explorer printf("%s: TXDMA counter overflow\n",
1265 1.1 explorer scp->sp_if.if_xname);
1266 1.29 perry
1267 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE;
1268 1.8 chopps scp->sp_txcur = 0;
1269 1.8 chopps scp->sp_txinuse = 0;
1270 1.1 explorer }
1271 1.1 explorer
1272 1.1 explorer /*
1273 1.1 explorer * check for buffer overflow
1274 1.1 explorer */
1275 1.1 explorer if (dsr & SCA_DSR_BOF) {
1276 1.1 explorer printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1277 1.1 explorer scp->sp_if.if_xname,
1278 1.1 explorer dmac_read_2(scp, SCA_CDAL1),
1279 1.1 explorer dmac_read_2(scp, SCA_EDAL1),
1280 1.1 explorer dmac_read_1(scp, SCA_CPB1));
1281 1.1 explorer
1282 1.1 explorer /*
1283 1.1 explorer * Yikes. Arrange for a full
1284 1.1 explorer * transmitter restart.
1285 1.1 explorer */
1286 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE;
1287 1.8 chopps scp->sp_txcur = 0;
1288 1.8 chopps scp->sp_txinuse = 0;
1289 1.1 explorer }
1290 1.1 explorer
1291 1.1 explorer /*
1292 1.1 explorer * check for end of transfer, which is not
1293 1.1 explorer * an error. It means that all data queued
1294 1.1 explorer * was transmitted, and we mark ourself as
1295 1.1 explorer * not in use and stop the watchdog timer.
1296 1.1 explorer */
1297 1.1 explorer if (dsr & SCA_DSR_EOT) {
1298 1.1 explorer SCA_DPRINTF(SCA_DEBUG_TX,
1299 1.8 chopps ("Transmit completed. cda %x eda %x dsr %x\n",
1300 1.8 chopps dmac_read_2(scp, SCA_CDAL1),
1301 1.8 chopps dmac_read_2(scp, SCA_EDAL1),
1302 1.8 chopps dsr));
1303 1.1 explorer
1304 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE;
1305 1.8 chopps scp->sp_txcur = 0;
1306 1.8 chopps scp->sp_txinuse = 0;
1307 1.1 explorer
1308 1.1 explorer /*
1309 1.1 explorer * check for more packets
1310 1.1 explorer */
1311 1.53 ozaki if_schedule_deferred_start(&scp->sp_if);
1312 1.1 explorer }
1313 1.1 explorer }
1314 1.1 explorer }
1315 1.1 explorer /*
1316 1.1 explorer * receive channel check
1317 1.1 explorer */
1318 1.8 chopps if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1319 1.8 chopps SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1320 1.8 chopps (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1321 1.1 explorer
1322 1.1 explorer dsr = 1;
1323 1.1 explorer while (dsr != 0) {
1324 1.1 explorer ret++;
1325 1.1 explorer
1326 1.1 explorer dsr = dmac_read_1(scp, SCA_DSR0);
1327 1.1 explorer dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1328 1.1 explorer
1329 1.1 explorer /*
1330 1.1 explorer * filter out the bits we don't care about
1331 1.1 explorer */
1332 1.1 explorer dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1333 1.1 explorer | SCA_DSR_BOF | SCA_DSR_EOT);
1334 1.1 explorer if (dsr == 0)
1335 1.1 explorer break;
1336 1.1 explorer
1337 1.1 explorer /*
1338 1.1 explorer * End of frame
1339 1.1 explorer */
1340 1.1 explorer if (dsr & SCA_DSR_EOM) {
1341 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1342 1.1 explorer
1343 1.1 explorer sca_get_packets(scp);
1344 1.1 explorer }
1345 1.1 explorer
1346 1.1 explorer /*
1347 1.1 explorer * check for counter overflow
1348 1.1 explorer */
1349 1.1 explorer if (dsr & SCA_DSR_COF) {
1350 1.1 explorer printf("%s: RXDMA counter overflow\n",
1351 1.1 explorer scp->sp_if.if_xname);
1352 1.1 explorer
1353 1.1 explorer sca_dmac_rxinit(scp);
1354 1.1 explorer }
1355 1.1 explorer
1356 1.1 explorer /*
1357 1.1 explorer * check for end of transfer, which means we
1358 1.1 explorer * ran out of descriptors to receive into.
1359 1.1 explorer * This means the line is much faster than
1360 1.1 explorer * we can handle.
1361 1.1 explorer */
1362 1.1 explorer if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1363 1.1 explorer printf("%s: RXDMA buffer overflow\n",
1364 1.1 explorer scp->sp_if.if_xname);
1365 1.1 explorer
1366 1.1 explorer sca_dmac_rxinit(scp);
1367 1.1 explorer }
1368 1.1 explorer }
1369 1.1 explorer }
1370 1.1 explorer
1371 1.1 explorer return ret;
1372 1.1 explorer }
1373 1.1 explorer
1374 1.1 explorer static int
1375 1.34 christos sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1376 1.1 explorer {
1377 1.8 chopps u_int8_t st1, trc0;
1378 1.1 explorer
1379 1.8 chopps /* get and clear the specific interrupt -- should act on it :)*/
1380 1.8 chopps if ((st1 = msci_read_1(scp, SCA_ST10))) {
1381 1.8 chopps /* clear the interrupt */
1382 1.8 chopps msci_write_1(scp, SCA_ST10, st1);
1383 1.8 chopps
1384 1.8 chopps if (st1 & SCA_ST1_UDRN) {
1385 1.8 chopps /* underrun -- try to increase ready control */
1386 1.8 chopps trc0 = msci_read_1(scp, SCA_TRC00);
1387 1.8 chopps if (trc0 == 0x1f)
1388 1.16 wiz printf("TX: underrun - fifo depth maxed\n");
1389 1.8 chopps else {
1390 1.8 chopps if ((trc0 += 2) > 0x1f)
1391 1.8 chopps trc0 = 0x1f;
1392 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX,
1393 1.8 chopps ("TX: udrn - incr fifo to %d\n", trc0));
1394 1.8 chopps msci_write_1(scp, SCA_TRC00, trc0);
1395 1.8 chopps }
1396 1.8 chopps }
1397 1.8 chopps }
1398 1.8 chopps return (0);
1399 1.1 explorer }
1400 1.1 explorer
1401 1.1 explorer static void
1402 1.1 explorer sca_get_packets(sca_port_t *scp)
1403 1.1 explorer {
1404 1.8 chopps struct sca_softc *sc;
1405 1.1 explorer
1406 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1407 1.1 explorer
1408 1.8 chopps sc = scp->sca;
1409 1.8 chopps if (sc->sc_usedma)
1410 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1411 1.8 chopps 0, sc->scu_allocsize,
1412 1.8 chopps BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1413 1.8 chopps else {
1414 1.8 chopps /*
1415 1.8 chopps * XXX this code is unable to deal with rx stuff
1416 1.8 chopps * in more than 1 page
1417 1.8 chopps */
1418 1.8 chopps sc->scu_page_on(sc);
1419 1.8 chopps sc->scu_set_page(sc, scp->sp_rxdesc_p);
1420 1.8 chopps }
1421 1.1 explorer
1422 1.8 chopps /* process as many frames as are available */
1423 1.8 chopps while (sca_frame_avail(scp)) {
1424 1.8 chopps sca_frame_process(scp);
1425 1.8 chopps sca_frame_read_done(scp);
1426 1.1 explorer }
1427 1.1 explorer
1428 1.8 chopps if (sc->sc_usedma)
1429 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1430 1.8 chopps 0, sc->scu_allocsize,
1431 1.8 chopps BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1432 1.8 chopps else
1433 1.8 chopps sc->scu_page_off(sc);
1434 1.1 explorer }
1435 1.1 explorer
1436 1.1 explorer /*
1437 1.1 explorer * Starting with the first descriptor we wanted to read into, up to but
1438 1.1 explorer * not including the current SCA read descriptor, look for a packet.
1439 1.8 chopps *
1440 1.8 chopps * must be called at splnet()
1441 1.1 explorer */
1442 1.1 explorer static int
1443 1.8 chopps sca_frame_avail(sca_port_t *scp)
1444 1.1 explorer {
1445 1.8 chopps u_int16_t cda;
1446 1.8 chopps u_int32_t desc_p; /* physical address (lower 16 bits) */
1447 1.8 chopps sca_desc_t *desc;
1448 1.8 chopps u_int8_t rxstat;
1449 1.8 chopps int cdaidx, toolong;
1450 1.1 explorer
1451 1.1 explorer /*
1452 1.1 explorer * Read the current descriptor from the SCA.
1453 1.1 explorer */
1454 1.1 explorer cda = dmac_read_2(scp, SCA_CDAL0);
1455 1.1 explorer
1456 1.1 explorer /*
1457 1.1 explorer * calculate the index of the current descriptor
1458 1.1 explorer */
1459 1.8 chopps desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1460 1.8 chopps desc_p = cda - desc_p;
1461 1.1 explorer cdaidx = desc_p / sizeof(sca_desc_t);
1462 1.1 explorer
1463 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX,
1464 1.8 chopps ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1465 1.8 chopps cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1466 1.8 chopps
1467 1.8 chopps /* note confusion */
1468 1.8 chopps if (cdaidx >= scp->sp_nrxdesc)
1469 1.8 chopps panic("current descriptor index out of range");
1470 1.8 chopps
1471 1.8 chopps /* see if we have a valid frame available */
1472 1.8 chopps toolong = 0;
1473 1.8 chopps for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1474 1.1 explorer /*
1475 1.1 explorer * We might have a valid descriptor. Set up a pointer
1476 1.1 explorer * to the kva address for it so we can more easily examine
1477 1.1 explorer * the contents.
1478 1.1 explorer */
1479 1.8 chopps desc = &scp->sp_rxdesc[scp->sp_rxstart];
1480 1.8 chopps rxstat = sca_desc_read_stat(scp->sca, desc);
1481 1.8 chopps
1482 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1483 1.8 chopps scp->sp_port, scp->sp_rxstart, rxstat));
1484 1.1 explorer
1485 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1486 1.8 chopps scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1487 1.1 explorer
1488 1.1 explorer /*
1489 1.1 explorer * check for errors
1490 1.1 explorer */
1491 1.8 chopps if (rxstat & SCA_DESC_ERRORS) {
1492 1.8 chopps /*
1493 1.8 chopps * consider an error condition the end
1494 1.8 chopps * of a frame
1495 1.8 chopps */
1496 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1497 1.8 chopps toolong = 0;
1498 1.8 chopps continue;
1499 1.8 chopps }
1500 1.1 explorer
1501 1.1 explorer /*
1502 1.8 chopps * if we aren't skipping overlong frames
1503 1.8 chopps * we are done, otherwise reset and look for
1504 1.8 chopps * another good frame
1505 1.1 explorer */
1506 1.1 explorer if (rxstat & SCA_DESC_EOM) {
1507 1.8 chopps if (!toolong)
1508 1.8 chopps return (1);
1509 1.8 chopps toolong = 0;
1510 1.8 chopps } else if (!toolong) {
1511 1.8 chopps /*
1512 1.8 chopps * we currently don't deal with frames
1513 1.8 chopps * larger than a single buffer (fixed MTU)
1514 1.8 chopps */
1515 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1516 1.8 chopps toolong = 1;
1517 1.1 explorer }
1518 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1519 1.8 chopps scp->sp_rxstart));
1520 1.1 explorer }
1521 1.1 explorer
1522 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1523 1.1 explorer return 0;
1524 1.1 explorer }
1525 1.1 explorer
1526 1.1 explorer /*
1527 1.1 explorer * Pass the packet up to the kernel if it is a packet we want to pay
1528 1.1 explorer * attention to.
1529 1.1 explorer *
1530 1.2 mycroft * MUST BE CALLED AT splnet()
1531 1.1 explorer */
1532 1.1 explorer static void
1533 1.8 chopps sca_frame_process(sca_port_t *scp)
1534 1.1 explorer {
1535 1.47 rmind pktqueue_t *pktq = NULL;
1536 1.47 rmind struct ifqueue *ifq = NULL;
1537 1.9 chopps struct hdlc_header *hdlc;
1538 1.9 chopps struct cisco_pkt *cisco;
1539 1.8 chopps sca_desc_t *desc;
1540 1.8 chopps struct mbuf *m;
1541 1.8 chopps u_int8_t *bufp;
1542 1.8 chopps u_int16_t len;
1543 1.8 chopps u_int32_t t;
1544 1.46 msaitoh int isr = 0;
1545 1.8 chopps
1546 1.32 kardel t = time_uptime * 1000;
1547 1.8 chopps desc = &scp->sp_rxdesc[scp->sp_rxstart];
1548 1.8 chopps bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1549 1.8 chopps len = sca_desc_read_buflen(scp->sca, desc);
1550 1.8 chopps
1551 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX,
1552 1.8 chopps ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1553 1.8 chopps (bus_addr_t)bufp, len));
1554 1.1 explorer
1555 1.8 chopps #if SCA_DEBUG_LEVEL > 0
1556 1.8 chopps if (sca_debug & SCA_DEBUG_RXPKT)
1557 1.8 chopps sca_frame_print(scp, desc, bufp);
1558 1.8 chopps #endif
1559 1.1 explorer /*
1560 1.1 explorer * skip packets that are too short
1561 1.1 explorer */
1562 1.9 chopps if (len < sizeof(struct hdlc_header)) {
1563 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1564 1.1 explorer return;
1565 1.9 chopps }
1566 1.1 explorer
1567 1.8 chopps m = sca_mbuf_alloc(scp->sca, bufp, len);
1568 1.8 chopps if (m == NULL) {
1569 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1570 1.8 chopps return;
1571 1.8 chopps }
1572 1.1 explorer
1573 1.1 explorer /*
1574 1.1 explorer * read and then strip off the HDLC information
1575 1.1 explorer */
1576 1.9 chopps m = m_pullup(m, sizeof(struct hdlc_header));
1577 1.8 chopps if (m == NULL) {
1578 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1579 1.9 chopps return;
1580 1.8 chopps }
1581 1.8 chopps
1582 1.52 ozaki bpf_mtap_softint(&scp->sp_if, m);
1583 1.1 explorer
1584 1.55 thorpej if_statinc(&scp->sp_if, if_ipackets);
1585 1.1 explorer
1586 1.9 chopps hdlc = mtod(m, struct hdlc_header *);
1587 1.9 chopps switch (ntohs(hdlc->h_proto)) {
1588 1.9 chopps #ifdef INET
1589 1.1 explorer case HDLC_PROTOCOL_IP:
1590 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1591 1.50 ozaki m_set_rcvif(m, &scp->sp_if);
1592 1.9 chopps m->m_pkthdr.len -= sizeof(struct hdlc_header);
1593 1.9 chopps m->m_data += sizeof(struct hdlc_header);
1594 1.9 chopps m->m_len -= sizeof(struct hdlc_header);
1595 1.47 rmind pktq = ip_pktq;
1596 1.9 chopps break;
1597 1.9 chopps #endif /* INET */
1598 1.15 itojun #ifdef INET6
1599 1.15 itojun case HDLC_PROTOCOL_IPV6:
1600 1.15 itojun SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1601 1.50 ozaki m_set_rcvif(m, &scp->sp_if);
1602 1.15 itojun m->m_pkthdr.len -= sizeof(struct hdlc_header);
1603 1.15 itojun m->m_data += sizeof(struct hdlc_header);
1604 1.15 itojun m->m_len -= sizeof(struct hdlc_header);
1605 1.47 rmind pktq = ip6_pktq;
1606 1.15 itojun break;
1607 1.15 itojun #endif /* INET6 */
1608 1.1 explorer case CISCO_KEEPALIVE:
1609 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO,
1610 1.1 explorer ("Received CISCO keepalive packet\n"));
1611 1.1 explorer
1612 1.1 explorer if (len < CISCO_PKT_LEN) {
1613 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO,
1614 1.1 explorer ("short CISCO packet %d, wanted %d\n",
1615 1.1 explorer len, CISCO_PKT_LEN));
1616 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1617 1.9 chopps goto dropit;
1618 1.1 explorer }
1619 1.1 explorer
1620 1.9 chopps m = m_pullup(m, sizeof(struct cisco_pkt));
1621 1.8 chopps if (m == NULL) {
1622 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1623 1.9 chopps return;
1624 1.8 chopps }
1625 1.1 explorer
1626 1.9 chopps cisco = (struct cisco_pkt *)
1627 1.9 chopps (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1628 1.50 ozaki m_set_rcvif(m, &scp->sp_if);
1629 1.1 explorer
1630 1.1 explorer switch (ntohl(cisco->type)) {
1631 1.1 explorer case CISCO_ADDR_REQ:
1632 1.1 explorer printf("Got CISCO addr_req, ignoring\n");
1633 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1634 1.9 chopps goto dropit;
1635 1.1 explorer
1636 1.1 explorer case CISCO_ADDR_REPLY:
1637 1.1 explorer printf("Got CISCO addr_reply, ignoring\n");
1638 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors);
1639 1.9 chopps goto dropit;
1640 1.1 explorer
1641 1.1 explorer case CISCO_KEEPALIVE_REQ:
1642 1.8 chopps
1643 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO,
1644 1.1 explorer ("Received KA, mseq %d,"
1645 1.1 explorer " yseq %d, rel 0x%04x, t0"
1646 1.1 explorer " %04x, t1 %04x\n",
1647 1.1 explorer ntohl(cisco->par1), ntohl(cisco->par2),
1648 1.1 explorer ntohs(cisco->rel), ntohs(cisco->time0),
1649 1.1 explorer ntohs(cisco->time1)));
1650 1.1 explorer
1651 1.1 explorer scp->cka_lastrx = ntohl(cisco->par1);
1652 1.1 explorer scp->cka_lasttx++;
1653 1.1 explorer
1654 1.1 explorer /*
1655 1.1 explorer * schedule the transmit right here.
1656 1.1 explorer */
1657 1.8 chopps cisco->par2 = cisco->par1;
1658 1.8 chopps cisco->par1 = htonl(scp->cka_lasttx);
1659 1.8 chopps cisco->time0 = htons((u_int16_t)(t >> 16));
1660 1.8 chopps cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1661 1.1 explorer
1662 1.1 explorer ifq = &scp->linkq;
1663 1.1 explorer if (IF_QFULL(ifq)) {
1664 1.1 explorer IF_DROP(ifq);
1665 1.9 chopps goto dropit;
1666 1.1 explorer }
1667 1.1 explorer IF_ENQUEUE(ifq, m);
1668 1.1 explorer
1669 1.1 explorer sca_start(&scp->sp_if);
1670 1.1 explorer
1671 1.8 chopps /* since start may have reset this fix */
1672 1.8 chopps if (!scp->sca->sc_usedma) {
1673 1.8 chopps scp->sca->scu_set_page(scp->sca,
1674 1.8 chopps scp->sp_rxdesc_p);
1675 1.8 chopps scp->sca->scu_page_on(scp->sca);
1676 1.8 chopps }
1677 1.9 chopps return;
1678 1.1 explorer default:
1679 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO,
1680 1.1 explorer ("Unknown CISCO keepalive protocol 0x%04x\n",
1681 1.1 explorer ntohl(cisco->type)));
1682 1.29 perry
1683 1.55 thorpej if_statinc(&scp->sp_if, if_noproto);
1684 1.9 chopps goto dropit;
1685 1.1 explorer }
1686 1.9 chopps return;
1687 1.1 explorer default:
1688 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX,
1689 1.1 explorer ("Unknown/unexpected ethertype 0x%04x\n",
1690 1.9 chopps ntohs(hdlc->h_proto)));
1691 1.55 thorpej if_statinc(&scp->sp_if, if_noproto);
1692 1.9 chopps goto dropit;
1693 1.9 chopps }
1694 1.9 chopps
1695 1.47 rmind /* Queue the packet */
1696 1.47 rmind if (__predict_true(pktq)) {
1697 1.47 rmind if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1698 1.55 thorpej if_statinc(&scp->sp_if, if_iqdrops);
1699 1.47 rmind goto dropit;
1700 1.47 rmind }
1701 1.47 rmind return;
1702 1.47 rmind }
1703 1.9 chopps if (!IF_QFULL(ifq)) {
1704 1.9 chopps IF_ENQUEUE(ifq, m);
1705 1.46 msaitoh schednetisr(isr);
1706 1.9 chopps } else {
1707 1.9 chopps IF_DROP(ifq);
1708 1.55 thorpej if_statinc(&scp->sp_if, if_iqdrops);
1709 1.9 chopps goto dropit;
1710 1.9 chopps }
1711 1.9 chopps return;
1712 1.9 chopps dropit:
1713 1.9 chopps if (m)
1714 1.8 chopps m_freem(m);
1715 1.9 chopps return;
1716 1.1 explorer }
1717 1.1 explorer
1718 1.1 explorer #if SCA_DEBUG_LEVEL > 0
1719 1.1 explorer /*
1720 1.1 explorer * do a hex dump of the packet received into descriptor "desc" with
1721 1.1 explorer * data buffer "p"
1722 1.1 explorer */
1723 1.1 explorer static void
1724 1.1 explorer sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1725 1.1 explorer {
1726 1.1 explorer int i;
1727 1.1 explorer int nothing_yet = 1;
1728 1.8 chopps struct sca_softc *sc;
1729 1.8 chopps u_int len;
1730 1.1 explorer
1731 1.8 chopps sc = scp->sca;
1732 1.8 chopps printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1733 1.8 chopps desc,
1734 1.8 chopps sca_desc_read_chainp(sc, desc),
1735 1.8 chopps sca_desc_read_bufp(sc, desc),
1736 1.8 chopps sca_desc_read_stat(sc, desc),
1737 1.8 chopps (len = sca_desc_read_buflen(sc, desc)));
1738 1.8 chopps
1739 1.8 chopps for (i = 0 ; i < len && i < 256; i++) {
1740 1.8 chopps if (nothing_yet == 1 &&
1741 1.8 chopps (sc->sc_usedma ? *p
1742 1.8 chopps : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1743 1.8 chopps sca_page_addr(sc, p))) == 0) {
1744 1.1 explorer p++;
1745 1.1 explorer continue;
1746 1.1 explorer }
1747 1.1 explorer nothing_yet = 0;
1748 1.1 explorer if (i % 16 == 0)
1749 1.1 explorer printf("\n");
1750 1.29 perry printf("%02x ",
1751 1.8 chopps (sc->sc_usedma ? *p
1752 1.8 chopps : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1753 1.8 chopps sca_page_addr(sc, p))));
1754 1.8 chopps p++;
1755 1.1 explorer }
1756 1.1 explorer
1757 1.1 explorer if (i % 16 != 1)
1758 1.1 explorer printf("\n");
1759 1.1 explorer }
1760 1.1 explorer #endif
1761 1.1 explorer
1762 1.1 explorer /*
1763 1.26 wiz * adjust things because we have just read the current starting
1764 1.8 chopps * frame
1765 1.8 chopps *
1766 1.8 chopps * must be called at splnet()
1767 1.1 explorer */
1768 1.1 explorer static void
1769 1.8 chopps sca_frame_read_done(sca_port_t *scp)
1770 1.1 explorer {
1771 1.8 chopps u_int16_t edesc_p;
1772 1.1 explorer
1773 1.8 chopps /* update where our indicies are */
1774 1.8 chopps scp->sp_rxend = scp->sp_rxstart;
1775 1.8 chopps scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1776 1.8 chopps
1777 1.8 chopps /* update the error [end] descriptor */
1778 1.8 chopps edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1779 1.8 chopps (sizeof(sca_desc_t) * scp->sp_rxend);
1780 1.8 chopps dmac_write_2(scp, SCA_EDAL0, edesc_p);
1781 1.1 explorer }
1782 1.1 explorer
1783 1.1 explorer /*
1784 1.1 explorer * set a port to the "up" state
1785 1.1 explorer */
1786 1.1 explorer static void
1787 1.1 explorer sca_port_up(sca_port_t *scp)
1788 1.1 explorer {
1789 1.1 explorer struct sca_softc *sc = scp->sca;
1790 1.32 kardel struct timeval now;
1791 1.8 chopps #if 0
1792 1.8 chopps u_int8_t ier0, ier1;
1793 1.8 chopps #endif
1794 1.1 explorer
1795 1.1 explorer /*
1796 1.1 explorer * reset things
1797 1.1 explorer */
1798 1.1 explorer #if 0
1799 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1800 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1801 1.1 explorer #endif
1802 1.1 explorer /*
1803 1.1 explorer * clear in-use flag
1804 1.1 explorer */
1805 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE;
1806 1.8 chopps scp->sp_if.if_flags |= IFF_RUNNING;
1807 1.1 explorer
1808 1.1 explorer /*
1809 1.1 explorer * raise DTR
1810 1.1 explorer */
1811 1.8 chopps sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1812 1.1 explorer
1813 1.1 explorer /*
1814 1.1 explorer * raise RTS
1815 1.1 explorer */
1816 1.1 explorer msci_write_1(scp, SCA_CTL0,
1817 1.8 chopps (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1818 1.8 chopps | SCA_CTL_RTS_HIGH);
1819 1.1 explorer
1820 1.8 chopps #if 0
1821 1.1 explorer /*
1822 1.8 chopps * enable interrupts (no timer IER2)
1823 1.1 explorer */
1824 1.8 chopps ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1825 1.8 chopps | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1826 1.8 chopps ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1827 1.8 chopps | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1828 1.8 chopps if (scp->sp_port == 1) {
1829 1.8 chopps ier0 <<= 4;
1830 1.8 chopps ier1 <<= 4;
1831 1.8 chopps }
1832 1.8 chopps sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1833 1.8 chopps sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1834 1.8 chopps #else
1835 1.1 explorer if (scp->sp_port == 0) {
1836 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1837 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1838 1.29 perry } else {
1839 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1840 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1841 1.1 explorer }
1842 1.8 chopps #endif
1843 1.1 explorer
1844 1.1 explorer /*
1845 1.1 explorer * enable transmit and receive
1846 1.1 explorer */
1847 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1848 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1849 1.1 explorer
1850 1.1 explorer /*
1851 1.1 explorer * reset internal state
1852 1.1 explorer */
1853 1.8 chopps scp->sp_txinuse = 0;
1854 1.8 chopps scp->sp_txcur = 0;
1855 1.32 kardel getmicrotime(&now);
1856 1.32 kardel scp->cka_lasttx = now.tv_usec;
1857 1.1 explorer scp->cka_lastrx = 0;
1858 1.1 explorer }
1859 1.1 explorer
1860 1.1 explorer /*
1861 1.1 explorer * set a port to the "down" state
1862 1.1 explorer */
1863 1.1 explorer static void
1864 1.1 explorer sca_port_down(sca_port_t *scp)
1865 1.1 explorer {
1866 1.1 explorer struct sca_softc *sc = scp->sca;
1867 1.8 chopps #if 0
1868 1.8 chopps u_int8_t ier0, ier1;
1869 1.8 chopps #endif
1870 1.1 explorer
1871 1.1 explorer /*
1872 1.1 explorer * lower DTR
1873 1.1 explorer */
1874 1.8 chopps sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1875 1.1 explorer
1876 1.1 explorer /*
1877 1.1 explorer * lower RTS
1878 1.1 explorer */
1879 1.1 explorer msci_write_1(scp, SCA_CTL0,
1880 1.8 chopps (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1881 1.8 chopps | SCA_CTL_RTS_LOW);
1882 1.1 explorer
1883 1.1 explorer /*
1884 1.1 explorer * disable interrupts
1885 1.1 explorer */
1886 1.8 chopps #if 0
1887 1.8 chopps ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1888 1.8 chopps | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1889 1.8 chopps ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1890 1.8 chopps | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1891 1.8 chopps if (scp->sp_port == 1) {
1892 1.8 chopps ier0 <<= 4;
1893 1.8 chopps ier1 <<= 4;
1894 1.8 chopps }
1895 1.8 chopps sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1896 1.8 chopps sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1897 1.8 chopps #else
1898 1.1 explorer if (scp->sp_port == 0) {
1899 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1900 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1901 1.29 perry } else {
1902 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1903 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1904 1.1 explorer }
1905 1.8 chopps #endif
1906 1.1 explorer
1907 1.1 explorer /*
1908 1.1 explorer * disable transmit and receive
1909 1.1 explorer */
1910 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1911 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1912 1.1 explorer
1913 1.1 explorer /*
1914 1.1 explorer * no, we're not in use anymore
1915 1.1 explorer */
1916 1.8 chopps scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1917 1.1 explorer }
1918 1.1 explorer
1919 1.1 explorer /*
1920 1.1 explorer * disable all DMA and interrupts for all ports at once.
1921 1.1 explorer */
1922 1.1 explorer void
1923 1.1 explorer sca_shutdown(struct sca_softc *sca)
1924 1.1 explorer {
1925 1.1 explorer /*
1926 1.1 explorer * disable DMA and interrupts
1927 1.1 explorer */
1928 1.1 explorer sca_write_1(sca, SCA_DMER, 0);
1929 1.1 explorer sca_write_1(sca, SCA_IER0, 0);
1930 1.1 explorer sca_write_1(sca, SCA_IER1, 0);
1931 1.1 explorer }
1932 1.1 explorer
1933 1.1 explorer /*
1934 1.1 explorer * If there are packets to transmit, start the transmit DMA logic.
1935 1.1 explorer */
1936 1.1 explorer static void
1937 1.1 explorer sca_port_starttx(sca_port_t *scp)
1938 1.1 explorer {
1939 1.1 explorer u_int32_t startdesc_p, enddesc_p;
1940 1.1 explorer int enddesc;
1941 1.1 explorer
1942 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1943 1.8 chopps
1944 1.1 explorer if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1945 1.8 chopps || scp->sp_txinuse == 0)
1946 1.1 explorer return;
1947 1.8 chopps
1948 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1949 1.8 chopps
1950 1.1 explorer scp->sp_if.if_flags |= IFF_OACTIVE;
1951 1.1 explorer
1952 1.1 explorer /*
1953 1.1 explorer * We have something to do, since we have at least one packet
1954 1.1 explorer * waiting, and we are not already marked as active.
1955 1.1 explorer */
1956 1.8 chopps enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1957 1.8 chopps startdesc_p = scp->sp_txdesc_p;
1958 1.8 chopps enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1959 1.1 explorer
1960 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1961 1.8 chopps startdesc_p, enddesc_p));
1962 1.1 explorer
1963 1.1 explorer dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1964 1.1 explorer dmac_write_2(scp, SCA_CDAL1,
1965 1.1 explorer (u_int16_t)(startdesc_p & 0x0000ffff));
1966 1.1 explorer
1967 1.1 explorer /*
1968 1.1 explorer * enable the DMA
1969 1.1 explorer */
1970 1.1 explorer dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1971 1.1 explorer }
1972 1.1 explorer
1973 1.1 explorer /*
1974 1.1 explorer * allocate an mbuf at least long enough to hold "len" bytes.
1975 1.1 explorer * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1976 1.1 explorer * otherwise let the caller handle copying the data in.
1977 1.1 explorer */
1978 1.1 explorer static struct mbuf *
1979 1.36 christos sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1980 1.1 explorer {
1981 1.1 explorer struct mbuf *m;
1982 1.1 explorer
1983 1.1 explorer /*
1984 1.1 explorer * allocate an mbuf and copy the important bits of data
1985 1.1 explorer * into it. If the packet won't fit in the header,
1986 1.1 explorer * allocate a cluster for it and store it there.
1987 1.1 explorer */
1988 1.1 explorer MGETHDR(m, M_DONTWAIT, MT_DATA);
1989 1.1 explorer if (m == NULL)
1990 1.1 explorer return NULL;
1991 1.1 explorer if (len > MHLEN) {
1992 1.1 explorer if (len > MCLBYTES) {
1993 1.1 explorer m_freem(m);
1994 1.1 explorer return NULL;
1995 1.1 explorer }
1996 1.1 explorer MCLGET(m, M_DONTWAIT);
1997 1.1 explorer if ((m->m_flags & M_EXT) == 0) {
1998 1.1 explorer m_freem(m);
1999 1.1 explorer return NULL;
2000 1.1 explorer }
2001 1.1 explorer }
2002 1.8 chopps if (p != NULL) {
2003 1.8 chopps /* XXX do we need to sync here? */
2004 1.8 chopps if (sc->sc_usedma)
2005 1.36 christos memcpy(mtod(m, void *), p, len);
2006 1.8 chopps else
2007 1.8 chopps bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2008 1.8 chopps sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2009 1.8 chopps }
2010 1.1 explorer m->m_len = len;
2011 1.1 explorer m->m_pkthdr.len = len;
2012 1.1 explorer
2013 1.1 explorer return (m);
2014 1.1 explorer }
2015 1.8 chopps
2016 1.8 chopps /*
2017 1.8 chopps * get the base clock
2018 1.8 chopps */
2019 1.29 perry void
2020 1.8 chopps sca_get_base_clock(struct sca_softc *sc)
2021 1.8 chopps {
2022 1.8 chopps struct timeval btv, ctv, dtv;
2023 1.8 chopps u_int64_t bcnt;
2024 1.8 chopps u_int32_t cnt;
2025 1.8 chopps u_int16_t subcnt;
2026 1.8 chopps
2027 1.8 chopps /* disable the timer, set prescale to 0 */
2028 1.8 chopps sca_write_1(sc, SCA_TCSR0, 0);
2029 1.8 chopps sca_write_1(sc, SCA_TEPR0, 0);
2030 1.8 chopps
2031 1.8 chopps /* reset the counter */
2032 1.8 chopps (void)sca_read_1(sc, SCA_TCSR0);
2033 1.8 chopps subcnt = sca_read_2(sc, SCA_TCNTL0);
2034 1.8 chopps
2035 1.8 chopps /* count to max */
2036 1.8 chopps sca_write_2(sc, SCA_TCONRL0, 0xffff);
2037 1.8 chopps
2038 1.8 chopps cnt = 0;
2039 1.8 chopps microtime(&btv);
2040 1.8 chopps /* start the timer -- no interrupt enable */
2041 1.8 chopps sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2042 1.8 chopps for (;;) {
2043 1.8 chopps microtime(&ctv);
2044 1.8 chopps
2045 1.8 chopps /* end around 3/4 of a second */
2046 1.8 chopps timersub(&ctv, &btv, &dtv);
2047 1.8 chopps if (dtv.tv_usec >= 750000)
2048 1.8 chopps break;
2049 1.8 chopps
2050 1.8 chopps /* spin */
2051 1.8 chopps while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2052 1.8 chopps ;
2053 1.8 chopps /* reset the timer */
2054 1.8 chopps (void)sca_read_2(sc, SCA_TCNTL0);
2055 1.8 chopps cnt++;
2056 1.8 chopps }
2057 1.8 chopps
2058 1.8 chopps /* stop the timer */
2059 1.8 chopps sca_write_1(sc, SCA_TCSR0, 0);
2060 1.8 chopps
2061 1.8 chopps subcnt = sca_read_2(sc, SCA_TCNTL0);
2062 1.8 chopps /* add the slop in and get the total timer ticks */
2063 1.8 chopps cnt = (cnt << 16) | subcnt;
2064 1.8 chopps
2065 1.8 chopps /* cnt is 1/8 the actual time */
2066 1.8 chopps bcnt = cnt * 8;
2067 1.8 chopps /* make it proportional to 3/4 of a second */
2068 1.8 chopps bcnt *= (u_int64_t)750000;
2069 1.8 chopps bcnt /= (u_int64_t)dtv.tv_usec;
2070 1.8 chopps cnt = bcnt;
2071 1.8 chopps
2072 1.8 chopps /* make it Hz */
2073 1.8 chopps cnt *= 4;
2074 1.8 chopps cnt /= 3;
2075 1.8 chopps
2076 1.8 chopps SCA_DPRINTF(SCA_DEBUG_CLOCK,
2077 1.8 chopps ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2078 1.8 chopps
2079 1.8 chopps /*
2080 1.8 chopps * round to the nearest 200 -- this allows for +-3 ticks error
2081 1.8 chopps */
2082 1.8 chopps sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2083 1.8 chopps }
2084 1.8 chopps
2085 1.8 chopps /*
2086 1.8 chopps * print the information about the clock on the ports
2087 1.8 chopps */
2088 1.8 chopps void
2089 1.8 chopps sca_print_clock_info(struct sca_softc *sc)
2090 1.8 chopps {
2091 1.8 chopps struct sca_port *scp;
2092 1.8 chopps u_int32_t mhz, div;
2093 1.8 chopps int i;
2094 1.8 chopps
2095 1.39 cegger printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2096 1.8 chopps sc->sc_baseclock);
2097 1.8 chopps
2098 1.8 chopps /* print the information about the port clock selection */
2099 1.8 chopps for (i = 0; i < sc->sc_numports; i++) {
2100 1.8 chopps scp = &sc->sc_ports[i];
2101 1.8 chopps mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2102 1.8 chopps div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2103 1.8 chopps
2104 1.8 chopps printf("%s: rx clock: ", scp->sp_if.if_xname);
2105 1.8 chopps switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2106 1.8 chopps case SCA_RXS_CLK_LINE:
2107 1.8 chopps printf("line");
2108 1.8 chopps break;
2109 1.8 chopps case SCA_RXS_CLK_LINE_SN:
2110 1.8 chopps printf("line with noise suppression");
2111 1.8 chopps break;
2112 1.8 chopps case SCA_RXS_CLK_INTERNAL:
2113 1.8 chopps printf("internal %d Hz", (mhz >> div));
2114 1.8 chopps break;
2115 1.8 chopps case SCA_RXS_CLK_ADPLL_OUT:
2116 1.8 chopps printf("adpll using internal %d Hz", (mhz >> div));
2117 1.8 chopps break;
2118 1.8 chopps case SCA_RXS_CLK_ADPLL_IN:
2119 1.8 chopps printf("adpll using line clock");
2120 1.8 chopps break;
2121 1.8 chopps }
2122 1.8 chopps printf(" tx clock: ");
2123 1.8 chopps div = scp->sp_txs & SCA_TXS_DIV_MASK;
2124 1.8 chopps switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2125 1.8 chopps case SCA_TXS_CLK_LINE:
2126 1.8 chopps printf("line\n");
2127 1.8 chopps break;
2128 1.8 chopps case SCA_TXS_CLK_INTERNAL:
2129 1.8 chopps printf("internal %d Hz\n", (mhz >> div));
2130 1.8 chopps break;
2131 1.8 chopps case SCA_TXS_CLK_RXCLK:
2132 1.8 chopps printf("rxclock\n");
2133 1.8 chopps break;
2134 1.8 chopps }
2135 1.8 chopps if (scp->sp_eclock)
2136 1.8 chopps printf("%s: outputting line clock\n",
2137 1.8 chopps scp->sp_if.if_xname);
2138 1.8 chopps }
2139 1.8 chopps }
2140 1.8 chopps
2141