ralink_eth.c revision 1.13 1 /* $NetBSD: ralink_eth.c,v 1.13 2017/02/20 08:25:57 ozaki-r Exp $ */
2 /*-
3 * Copyright (c) 2011 CradlePoint Technology, Inc.
4 * All rights reserved.
5 *
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY CRADLEPOINT TECHNOLOGY, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* ralink_eth.c -- Ralink Ethernet Driver */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.13 2017/02/20 08:25:57 ozaki-r Exp $");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/callout.h>
37 #include <sys/device.h>
38 #include <sys/endian.h>
39 #include <sys/errno.h>
40 #include <sys/ioctl.h>
41 #include <sys/intr.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/systm.h>
47
48 #include <uvm/uvm_extern.h>
49
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_ether.h>
54 #include <net/if_vlanvar.h>
55
56 #include <net/bpf.h>
57
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60 #include <dev/mii/mii_bitbang.h>
61
62 #include <mips/ralink/ralink_var.h>
63 #include <mips/ralink/ralink_reg.h>
64 #if 0
65 #define CPDEBUG /* XXX TMP DEBUG FIXME */
66 #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */
67 #define ENABLE_RALINK_DEBUG_ERROR 1
68 #define ENABLE_RALINK_DEBUG_MISC 1
69 #define ENABLE_RALINK_DEBUG_INFO 1
70 #define ENABLE_RALINK_DEBUG_FORCE 1
71 #define ENABLE_RALINK_DEBUG_REG 1
72 #endif
73 #include <mips/ralink/ralink_debug.h>
74
75
76 /* PDMA RX Descriptor Format */
77 struct ralink_rx_desc {
78 uint32_t data_ptr;
79 uint32_t rxd_info1;
80 #define RXD_LEN1(x) (((x) >> 0) & 0x3fff)
81 #define RXD_LAST1 (1 << 14)
82 #define RXD_LEN0(x) (((x) >> 16) & 0x3fff)
83 #define RXD_LAST0 (1 << 30)
84 #define RXD_DDONE (1 << 31)
85 uint32_t unused;
86 uint32_t rxd_info2;
87 #define RXD_FOE(x) (((x) >> 0) & 0x3fff)
88 #define RXD_FVLD (1 << 14)
89 #define RXD_INFO(x) (((x) >> 16) & 0xff)
90 #define RXD_PORT(x) (((x) >> 24) & 0x7)
91 #define RXD_INFO_CPU (1 << 27)
92 #define RXD_L4_FAIL (1 << 28)
93 #define RXD_IP_FAIL (1 << 29)
94 #define RXD_L4_VLD (1 << 30)
95 #define RXD_IP_VLD (1 << 31)
96 };
97
98 /* PDMA TX Descriptor Format */
99 struct ralink_tx_desc {
100 uint32_t data_ptr0;
101 uint32_t txd_info1;
102 #define TXD_LEN1(x) (((x) & 0x3fff) << 0)
103 #define TXD_LAST1 (1 << 14)
104 #define TXD_BURST (1 << 15)
105 #define TXD_LEN0(x) (((x) & 0x3fff) << 16)
106 #define TXD_LAST0 (1 << 30)
107 #define TXD_DDONE (1 << 31)
108 uint32_t data_ptr1;
109 uint32_t txd_info2;
110 #define TXD_VIDX(x) (((x) & 0xf) << 0)
111 #define TXD_VPRI(x) (((x) & 0x7) << 4)
112 #define TXD_VEN (1 << 7)
113 #define TXD_SIDX(x) (((x) & 0xf) << 8)
114 #define TXD_SEN(x) (1 << 13)
115 #define TXD_QN(x) (((x) & 0x7) << 16)
116 #define TXD_PN(x) (((x) & 0x7) << 24)
117 #define TXD_PN_CPU 0
118 #define TXD_PN_GDMA1 1
119 #define TXD_PN_GDMA2 2
120 #define TXD_TCP_EN (1 << 29)
121 #define TXD_UDP_EN (1 << 30)
122 #define TXD_IP_EN (1 << 31)
123 };
124
125 /* TODO:
126 * try to scale number of descriptors swith size of memory
127 * these numbers may have a significant impact on performance/memory/mbuf usage
128 */
129 #if RTMEMSIZE >= 64
130 #define RALINK_ETH_NUM_RX_DESC 256
131 #define RALINK_ETH_NUM_TX_DESC 256
132 #else
133 #define RALINK_ETH_NUM_RX_DESC 64
134 #define RALINK_ETH_NUM_TX_DESC 64
135 #endif
136 /* maximum segments per packet */
137 #define RALINK_ETH_MAX_TX_SEGS 1
138
139 /* define a struct for ease of dma memory allocation */
140 struct ralink_descs {
141 struct ralink_rx_desc rxdesc[RALINK_ETH_NUM_RX_DESC];
142 struct ralink_tx_desc txdesc[RALINK_ETH_NUM_TX_DESC];
143 };
144
145 /* Software state for transmit jobs. */
146 struct ralink_eth_txstate {
147 struct mbuf *txs_mbuf; /* head of our mbuf chain */
148 bus_dmamap_t txs_dmamap; /* our DMA map */
149 int txs_idx; /* the index in txdesc ring that */
150 /* this state is tracking */
151 SIMPLEQ_ENTRY(ralink_eth_txstate) txs_q;
152 };
153
154 SIMPLEQ_HEAD(ralink_eth_txsq, ralink_eth_txstate);
155
156 /*
157 * Software state for receive jobs.
158 */
159 struct ralink_eth_rxstate {
160 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
161 bus_dmamap_t rxs_dmamap; /* our DMA map */
162 };
163
164 typedef struct ralink_eth_softc {
165 device_t sc_dev; /* generic device information */
166 bus_space_tag_t sc_memt; /* bus space tag */
167 bus_space_handle_t sc_sy_memh; /* handle at SYSCTL_BASE */
168 bus_space_handle_t sc_fe_memh; /* handle at FRAME_ENGINE_BASE */
169 bus_space_handle_t sc_sw_memh; /* handle at ETH_SW_BASE */
170 int sc_sy_size; /* size of Sysctl regs space */
171 int sc_fe_size; /* size of Frame Engine regs space */
172 int sc_sw_size; /* size of Ether Switch regs space */
173 bus_dma_tag_t sc_dmat; /* bus DMA tag */
174 void *sc_ih; /* interrupt handle */
175
176 /* tx/rx dma mapping */
177 bus_dma_segment_t sc_dseg;
178 int sc_ndseg;
179 bus_dmamap_t sc_pdmamap; /* PDMA DMA map */
180 #define sc_pdma sc_pdmamap->dm_segs[0].ds_addr
181
182 struct ralink_descs *sc_descs;
183 #define sc_rxdesc sc_descs->rxdesc
184 #define sc_txdesc sc_descs->txdesc
185
186 #define RALINK_MIN_BUF 64
187 char ralink_zero_buf[RALINK_MIN_BUF];
188
189 struct ralink_eth_txstate sc_txstate[RALINK_ETH_NUM_TX_DESC];
190 struct ralink_eth_rxstate sc_rxstate[RALINK_ETH_NUM_RX_DESC];
191
192 struct ralink_eth_txsq sc_txfreeq; /* free Tx descsofts */
193 struct ralink_eth_txsq sc_txdirtyq; /* dirty Tx descsofts */
194
195 struct ethercom sc_ethercom; /* ethernet common data */
196 u_int sc_pending_tx;
197
198 /* mii */
199 struct mii_data sc_mii;
200 struct callout sc_tick_callout;
201
202 struct evcnt sc_evcnt_spurious_intr;
203 struct evcnt sc_evcnt_rxintr;
204 struct evcnt sc_evcnt_rxintr_skip_len;
205 struct evcnt sc_evcnt_rxintr_skip_tag_none;
206 struct evcnt sc_evcnt_rxintr_skip_tag_inval;
207 struct evcnt sc_evcnt_rxintr_skip_inact;
208 struct evcnt sc_evcnt_txintr;
209 struct evcnt sc_evcnt_input;
210 struct evcnt sc_evcnt_output;
211 struct evcnt sc_evcnt_watchdog;
212 struct evcnt sc_evcnt_wd_reactivate;
213 struct evcnt sc_evcnt_wd_tx;
214 struct evcnt sc_evcnt_wd_spurious;
215 struct evcnt sc_evcnt_add_rxbuf_hdr_fail;
216 struct evcnt sc_evcnt_add_rxbuf_mcl_fail;
217 } ralink_eth_softc_t;
218
219 /* alignment so the IP header is aligned */
220 #define RALINK_ETHER_ALIGN 2
221
222 /* device functions */
223 static int ralink_eth_match(device_t, cfdata_t, void *);
224 static void ralink_eth_attach(device_t, device_t, void *);
225 static int ralink_eth_detach(device_t, int);
226 static int ralink_eth_activate(device_t, enum devact);
227
228 /* local driver functions */
229 static void ralink_eth_hw_init(ralink_eth_softc_t *);
230 static int ralink_eth_intr(void *);
231 static void ralink_eth_reset(ralink_eth_softc_t *);
232 static void ralink_eth_rxintr(ralink_eth_softc_t *);
233 static void ralink_eth_txintr(ralink_eth_softc_t *);
234
235 /* partition functions */
236 static int ralink_eth_enable(ralink_eth_softc_t *);
237 static void ralink_eth_disable(ralink_eth_softc_t *);
238
239 /* ifnet functions */
240 static int ralink_eth_init(struct ifnet *);
241 static void ralink_eth_rxdrain(ralink_eth_softc_t *);
242 static void ralink_eth_stop(struct ifnet *, int);
243 static int ralink_eth_add_rxbuf(ralink_eth_softc_t *, int);
244 static void ralink_eth_start(struct ifnet *);
245 static void ralink_eth_watchdog(struct ifnet *);
246 static int ralink_eth_ioctl(struct ifnet *, u_long, void *);
247
248 /* mii functions */
249 #if defined(RT3050) || defined(RT3052)
250 static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool);
251 #endif
252 static void ralink_eth_mii_statchg(struct ifnet *);
253 static void ralink_eth_mii_tick(void *);
254 static int ralink_eth_mii_read(device_t, int, int);
255 static void ralink_eth_mii_write(device_t, int, int, int);
256
257 CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc),
258 ralink_eth_match, ralink_eth_attach, ralink_eth_detach,
259 ralink_eth_activate);
260
261 static inline uint32_t
262 sy_read(const ralink_eth_softc_t *sc, const bus_size_t off)
263 {
264 return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off);
265 }
266
267 static inline void
268 sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
269 {
270 bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val);
271 }
272
273 static inline uint32_t
274 fe_read(const ralink_eth_softc_t *sc, const bus_size_t off)
275 {
276 return bus_space_read_4(sc->sc_memt, sc->sc_fe_memh, off);
277 }
278
279 static inline void
280 fe_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
281 {
282 bus_space_write_4(sc->sc_memt, sc->sc_fe_memh, off, val);
283 }
284
285 static inline uint32_t
286 sw_read(const ralink_eth_softc_t *sc, const bus_size_t off)
287 {
288 return bus_space_read_4(sc->sc_memt, sc->sc_sw_memh, off);
289 }
290
291 static inline void
292 sw_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
293 {
294 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, off, val);
295 }
296
297 /*
298 * ralink_eth_match
299 */
300 int
301 ralink_eth_match(device_t parent, cfdata_t cf, void *aux)
302 {
303 return 1;
304 }
305
306 /*
307 * ralink_eth_attach
308 */
309 void
310 ralink_eth_attach(device_t parent, device_t self, void *aux)
311 {
312 ralink_eth_softc_t * const sc = device_private(self);
313 const struct mainbus_attach_args *ma = aux;
314 int error;
315 int i;
316
317 aprint_naive(": Ralink Ethernet\n");
318 aprint_normal(": Ralink Ethernet\n");
319
320 evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL,
321 device_xname(self), "spurious intr");
322 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL,
323 device_xname(self), "rxintr");
324 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len,
325 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
326 device_xname(self), "rxintr skip: no room for VLAN header");
327 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none,
328 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
329 device_xname(self), "rxintr skip: no VLAN tag");
330 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval,
331 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
332 device_xname(self), "rxintr skip: invalid VLAN tag");
333 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact,
334 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
335 device_xname(self), "rxintr skip: partition inactive");
336 evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL,
337 device_xname(self), "txintr");
338 evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL,
339 device_xname(self), "input");
340 evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL,
341 device_xname(self), "output");
342 evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL,
343 device_xname(self), "watchdog");
344 evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx,
345 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
346 device_xname(self), "watchdog TX timeout");
347 evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious,
348 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
349 device_xname(self), "watchdog spurious");
350 evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate,
351 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
352 device_xname(self), "watchdog reactivate");
353 evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail,
354 EVCNT_TYPE_INTR, NULL,
355 device_xname(self), "add rxbuf hdr fail");
356 evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail,
357 EVCNT_TYPE_INTR, NULL,
358 device_xname(self), "add rxbuf mcl fail");
359
360 /*
361 * In order to obtain unique initial Ethernet address on a host,
362 * do some randomisation using the current uptime. It's not meant
363 * for anything but avoiding hard-coding an address.
364 */
365 #ifdef RALINK_ETH_MACADDR
366 uint8_t enaddr[ETHER_ADDR_LEN];
367 ether_aton_r(enaddr, sizeof(enaddr), ___STRING(RALINK_ETH_MACADDR));
368 #else
369 uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 };
370 #endif
371
372 sc->sc_dev = self;
373 sc->sc_dmat = ma->ma_dmat;
374 sc->sc_memt = ma->ma_memt;
375 sc->sc_sy_size = 0x10000;
376 sc->sc_fe_size = 0x10000;
377 sc->sc_sw_size = 0x08000;
378
379 /*
380 * map the registers
381 *
382 * we map the Sysctl, Frame Engine and Ether Switch registers
383 * seperately so we can use the defined register offsets sanely
384 */
385 if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE,
386 sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) {
387 aprint_error_dev(self, "unable to map Sysctl registers, "
388 "error=%d\n", error);
389 goto fail_0a;
390 }
391 if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE,
392 sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) {
393 aprint_error_dev(self, "unable to map Frame Engine registers, "
394 "error=%d\n", error);
395 goto fail_0b;
396 }
397 if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE,
398 sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) {
399 aprint_error_dev(self, "unable to map Ether Switch registers, "
400 "error=%d\n", error);
401 goto fail_0c;
402 }
403
404 /* Allocate desc structures, and create & load the DMA map for them */
405 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs),
406 PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) {
407 aprint_error_dev(self, "unable to allocate transmit descs, "
408 "error=%d\n", error);
409 goto fail_1;
410 }
411
412 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg,
413 sizeof(struct ralink_descs), (void **)&sc->sc_descs,
414 BUS_DMA_COHERENT)) != 0) {
415 aprint_error_dev(self, "unable to map control data, "
416 "error=%d\n", error);
417 goto fail_2;
418 }
419
420 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs),
421 1, sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) {
422 aprint_error_dev(self, "unable to create control data DMA map, "
423 "error=%d\n", error);
424 goto fail_3;
425 }
426
427 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs,
428 sizeof(struct ralink_descs), NULL, 0)) != 0) {
429 aprint_error_dev(self, "unable to load control data DMA map, "
430 "error=%d\n", error);
431 goto fail_4;
432 }
433
434 /* Create the transmit buffer DMA maps. */
435 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
436 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
437 RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0,
438 &sc->sc_txstate[i].txs_dmamap)) != 0) {
439 aprint_error_dev(self,
440 "unable to create tx DMA map %d, error=%d\n",
441 i, error);
442 goto fail_5;
443 }
444 }
445
446 /* Create the receive buffer DMA maps. */
447 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
448 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
449 MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) {
450 aprint_error_dev(self,
451 "unable to create rx DMA map %d, error=%d\n",
452 i, error);
453 goto fail_6;
454 }
455 sc->sc_rxstate[i].rxs_mbuf = NULL;
456 }
457
458 /* this is a zero buffer used for zero'ing out short packets */
459 memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF);
460
461 /* setup some address in hardware */
462 fe_write(sc, RA_FE_GDMA1_MAC_LSB,
463 (enaddr[5] | (enaddr[4] << 8) |
464 (enaddr[3] << 16) | (enaddr[2] << 24)));
465 fe_write(sc, RA_FE_GDMA1_MAC_MSB,
466 (enaddr[1] | (enaddr[0] << 8)));
467
468 /*
469 * iterate through ports
470 * slickrock must use specific non-linear sequence
471 * others are linear
472 */
473 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
474
475 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
476
477 /*
478 * Initialize our media structures.
479 * This may probe the PHY, if present.
480 */
481 sc->sc_mii.mii_ifp = ifp;
482 sc->sc_mii.mii_readreg = ralink_eth_mii_read;
483 sc->sc_mii.mii_writereg = ralink_eth_mii_write;
484 sc->sc_mii.mii_statchg = ralink_eth_mii_statchg;
485 sc->sc_ethercom.ec_mii = &sc->sc_mii;
486 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
487 ether_mediastatus);
488 mii_attach(sc->sc_dev, &sc->sc_mii, ~0, MII_PHY_ANY, MII_OFFSET_ANY,
489 MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE);
490
491 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
492 #if 1
493 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|
494 IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL);
495 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|
496 IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE);
497 #else
498 ifmedia_add(&sc->sc_mii.mii_media,
499 IFM_ETHER|IFM_MANUAL, 0, NULL);
500 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
501 #endif
502 } else {
503 /* Ensure we mask ok for the switch multiple phy's */
504 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
505 }
506
507 ifp->if_softc = sc;
508 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
509 ifp->if_init = ralink_eth_init;
510 ifp->if_start = ralink_eth_start;
511 ifp->if_ioctl = ralink_eth_ioctl;
512 ifp->if_stop = ralink_eth_stop;
513 ifp->if_watchdog = ralink_eth_watchdog;
514 IFQ_SET_READY(&ifp->if_snd);
515
516 /* We can support 802.1Q VLAN-sized frames. */
517 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
518
519 /* We support IPV4 CRC Offload */
520 ifp->if_capabilities |=
521 (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
522 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
523 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx);
524
525 /* Attach the interface. */
526 if_attach(ifp);
527 if_deferred_start_init(ifp, NULL);
528 ether_ifattach(ifp, enaddr);
529
530 /* init our mii ticker */
531 callout_init(&sc->sc_tick_callout, 0);
532 callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc);
533
534 return;
535
536 /*
537 * Free any resources we've allocated during the failed attach
538 * attempt. Do this in reverse order and fall through.
539 */
540 fail_6:
541 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
542 if (sc->sc_rxstate[i].rxs_dmamap != NULL)
543 bus_dmamap_destroy(sc->sc_dmat,
544 sc->sc_rxstate[i].rxs_dmamap);
545 }
546 fail_5:
547 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
548 if (sc->sc_txstate[i].txs_dmamap != NULL)
549 bus_dmamap_destroy(sc->sc_dmat,
550 sc->sc_txstate[i].txs_dmamap);
551 }
552 bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap);
553 fail_4:
554 bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap);
555 fail_3:
556 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs,
557 sizeof(struct ralink_descs));
558 fail_2:
559 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg);
560 fail_1:
561 bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size);
562 fail_0c:
563 bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size);
564 fail_0b:
565 bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size);
566 fail_0a:
567 return;
568 }
569
570 /*
571 * ralink_eth_activate:
572 *
573 * Handle device activation/deactivation requests.
574 */
575 int
576 ralink_eth_activate(device_t self, enum devact act)
577 {
578 ralink_eth_softc_t * const sc = device_private(self);
579 int error = 0;
580 int s;
581
582 s = splnet();
583 switch (act) {
584 case DVACT_DEACTIVATE:
585 if_deactivate(&sc->sc_ethercom.ec_if);
586 break;
587 }
588 splx(s);
589
590 return error;
591 }
592
593 /*
594 * ralink_eth_partition_enable
595 */
596 static int
597 ralink_eth_enable(ralink_eth_softc_t *sc)
598 {
599 RALINK_DEBUG_FUNC_ENTRY();
600
601 if (sc->sc_ih != NULL) {
602 RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active",
603 __func__);
604 return EALREADY;
605 }
606
607 sc->sc_pending_tx = 0;
608
609 int s = splnet();
610 ralink_eth_hw_init(sc);
611 sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE,
612 ralink_eth_intr, sc, 1);
613 splx(s);
614 if (sc->sc_ih == NULL) {
615 RALINK_DEBUG(RALINK_DEBUG_ERROR,
616 "%s: unable to establish interrupt\n",
617 device_xname(sc->sc_dev));
618 return EIO;
619 }
620
621 return 0;
622 }
623
624 /*
625 * ralink_eth_partition_disable
626 */
627 static void
628 ralink_eth_disable(ralink_eth_softc_t *sc)
629 {
630 RALINK_DEBUG_FUNC_ENTRY();
631
632 int s = splnet();
633 ralink_eth_rxdrain(sc);
634 ra_intr_disestablish(sc->sc_ih);
635 sc->sc_ih = NULL;
636
637 /* stop the mii ticker */
638 callout_stop(&sc->sc_tick_callout);
639
640 /* quiesce the block */
641 ralink_eth_reset(sc);
642 splx(s);
643 }
644
645 /*
646 * ralink_eth_detach
647 */
648 static int
649 ralink_eth_detach(device_t self, int flags)
650 {
651 RALINK_DEBUG_FUNC_ENTRY();
652 ralink_eth_softc_t * const sc = device_private(self);
653 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
654 struct ralink_eth_rxstate *rxs;
655 struct ralink_eth_txstate *txs;
656 int i;
657
658 ralink_eth_disable(sc);
659 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
660 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
661 ether_ifdetach(ifp);
662 if_detach(ifp);
663
664 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
665 rxs = &sc->sc_rxstate[i];
666 if (rxs->rxs_mbuf != NULL) {
667 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
668 m_freem(rxs->rxs_mbuf);
669 rxs->rxs_mbuf = NULL;
670 }
671 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
672 }
673
674 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
675 txs = &sc->sc_txstate[i];
676 if (txs->txs_mbuf != NULL) {
677 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
678 m_freem(txs->txs_mbuf);
679 txs->txs_mbuf = NULL;
680 }
681 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
682 }
683
684 bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap);
685 bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap);
686 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs,
687 sizeof(struct ralink_descs));
688 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg);
689
690 bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size);
691 bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size);
692
693 return 0;
694 }
695
696 /*
697 * ralink_eth_reset
698 */
699 static void
700 ralink_eth_reset(ralink_eth_softc_t *sc)
701 {
702 RALINK_DEBUG_FUNC_ENTRY();
703 uint32_t r;
704
705 /* Reset the frame engine */
706 r = sy_read(sc, RA_SYSCTL_RST);
707 r |= RST_FE;
708 sy_write(sc, RA_SYSCTL_RST, r);
709 r ^= RST_FE;
710 sy_write(sc, RA_SYSCTL_RST, r);
711
712 /* Wait until the PDMA is quiescent */
713 for (;;) {
714 r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
715 if (r & FE_PDMA_GLOBAL_CFG_RX_DMA_BUSY) {
716 aprint_normal_dev(sc->sc_dev, "RX DMA BUSY\n");
717 continue;
718 }
719 if (r & FE_PDMA_GLOBAL_CFG_TX_DMA_BUSY) {
720 aprint_normal_dev(sc->sc_dev, "TX DMA BUSY\n");
721 continue;
722 }
723 break;
724 }
725 }
726
727 /*
728 * ralink_eth_hw_init
729 */
730 static void
731 ralink_eth_hw_init(ralink_eth_softc_t *sc)
732 {
733 RALINK_DEBUG_FUNC_ENTRY();
734 struct ralink_eth_txstate *txs;
735 uint32_t r;
736 int i;
737
738 /* reset to a known good state */
739 ralink_eth_reset(sc);
740
741 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
742 /* Bring the switch to a sane default state (from linux driver) */
743 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2,
744 0x00000000);
745 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1,
746 0x00405555); /* check VLAN tag on port forward */
747 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0,
748 0x00002001);
749 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0,
750 0x00001002);
751 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1,
752 0x00001001);
753 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2,
754 0x00001001);
755 #if defined(MT7628)
756 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0,
757 0xffffffff);
758 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0,
759 0x10007f7f);
760 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2,
761 0x00007f7f);
762 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2,
763 0x0002500c);
764 #else
765 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0,
766 0xffff417e);
767 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0,
768 0x00007f7f);
769 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2,
770 0x00007f3f);
771 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2,
772 0x00d6500c);
773 #endif
774 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC,
775 0x0008a301); /* hashing algorithm=XOR48 */
776 /* aging interval=300sec */
777 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC,
778 0x02404040);
779 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT,
780 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */
781 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA,
782 0x00000000);
783
784 /* do some mii magic TODO: define these registers/bits */
785 /* lower down PHY 10Mbps mode power */
786 /* select local register */
787 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000);
788
789 for (i=0; i < 5; i++) {
790 /* set TX10 waveform coefficient */
791 ralink_eth_mii_write(sc->sc_dev, i, 26, 0x1601);
792
793 /* set TX100/TX10 AD/DA current bias */
794 ralink_eth_mii_write(sc->sc_dev, i, 29, 0x7058);
795
796 /* set TX100 slew rate control */
797 ralink_eth_mii_write(sc->sc_dev, i, 30, 0x0018);
798 }
799
800 /* PHY IOT */
801
802 /* select global register */
803 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x0);
804
805 /* tune TP_IDL tail and head waveform */
806 ralink_eth_mii_write(sc->sc_dev, 0, 22, 0x052f);
807
808 /* set TX10 signal amplitude threshold to minimum */
809 ralink_eth_mii_write(sc->sc_dev, 0, 17, 0x0fe0);
810
811 /* set squelch amplitude to higher threshold */
812 ralink_eth_mii_write(sc->sc_dev, 0, 18, 0x40ba);
813
814 /* longer TP_IDL tail length */
815 ralink_eth_mii_write(sc->sc_dev, 0, 14, 0x65);
816
817 /* select local register */
818 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000);
819 #else
820 /* GE1 + GigSW */
821 fe_write(sc, RA_FE_MDIO_CFG1,
822 MDIO_CFG_PHY_ADDR(0x1f) |
823 MDIO_CFG_BP_EN |
824 MDIO_CFG_FORCE_CFG |
825 MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) |
826 MDIO_CFG_FULL_DUPLEX |
827 MDIO_CFG_FC_TX |
828 MDIO_CFG_FC_RX |
829 MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM));
830 #endif
831
832 /*
833 * TODO: QOS - RT3052 has 4 TX queues for QOS,
834 * forgoing for 1 for simplicity
835 */
836
837 /*
838 * Allocate DMA accessible memory for TX/RX descriptor rings
839 */
840
841 /* Initialize the TX queues. */
842 SIMPLEQ_INIT(&sc->sc_txfreeq);
843 SIMPLEQ_INIT(&sc->sc_txdirtyq);
844
845 /* Initialize the TX descriptor ring. */
846 memset(sc->sc_txdesc, 0, sizeof(sc->sc_txdesc));
847 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
848
849 sc->sc_txdesc[i].txd_info1 = TXD_LAST0 | TXD_DDONE;
850
851 /* setup the freeq as well */
852 txs = &sc->sc_txstate[i];
853 txs->txs_mbuf = NULL;
854 txs->txs_idx = i;
855 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
856 }
857
858 /*
859 * Flush the TX descriptors
860 * - TODO: can we just access descriptors via KSEG1
861 * to avoid the flush?
862 */
863 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
864 (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc),
865 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
866
867 /* Initialize the RX descriptor ring */
868 memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc));
869 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
870 if (ralink_eth_add_rxbuf(sc, i)) {
871 panic("Can't allocate rx mbuf\n");
872 }
873 }
874
875 /*
876 * Flush the RX descriptors
877 * - TODO: can we just access descriptors via KSEG1
878 * to avoid the flush?
879 */
880 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
881 (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc),
882 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
883
884 /* Clear the PDMA state */
885 r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
886 r &= 0xff;
887 fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r);
888 (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
889
890 #if !defined(MT7628)
891 /* Setup the PDMA VLAN ID's */
892 fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000);
893 fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002);
894 fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004);
895 fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006);
896 fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008);
897 fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a);
898 fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c);
899 fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e);
900 #endif
901
902 /* Give the TX and TX rings to the chip. */
903 fe_write(sc, RA_FE_PDMA_TX0_PTR,
904 htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc)));
905 fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC));
906 fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0);
907 #if !defined(MT7628)
908 fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0);
909 #endif
910
911 fe_write(sc, RA_FE_PDMA_RX0_PTR,
912 htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc)));
913 fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC));
914 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX,
915 htole32(RALINK_ETH_NUM_RX_DESC - 1));
916 #if !defined(MT7628)
917 fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0);
918 #endif
919 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX,
920 htole32(RALINK_ETH_NUM_RX_DESC - 1));
921
922 /* Start PDMA */
923 fe_write(sc, RA_FE_PDMA_GLOBAL_CFG,
924 FE_PDMA_GLOBAL_CFG_TX_WB_DDONE |
925 FE_PDMA_GLOBAL_CFG_RX_DMA_EN |
926 FE_PDMA_GLOBAL_CFG_TX_DMA_EN |
927 FE_PDMA_GLOBAL_CFG_BURST_SZ_4);
928
929 /* Setup the clock for the Frame Engine */
930 #if defined(MT7628)
931 fe_write(sc, RA_FE_SDM_CON, 0x8100);
932 #else
933 fe_write(sc, RA_FE_GLOBAL_CFG,
934 FE_GLOBAL_CFG_EXT_VLAN(0x8100) |
935 FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) |
936 FE_GLOBAL_CFG_L2_SPACE(0x8));
937 #endif
938
939 /* Turn on all interrupts */
940 #if defined(MT7628)
941 fe_write(sc, RA_FE_INT_MASK,
942 RA_FE_INT_RX_DONE_INT1 |
943 RA_FE_INT_RX_DONE_INT0 |
944 RA_FE_INT_TX_DONE_INT3 |
945 RA_FE_INT_TX_DONE_INT2 |
946 RA_FE_INT_TX_DONE_INT1 |
947 RA_FE_INT_TX_DONE_INT0);
948 #else
949 fe_write(sc, RA_FE_INT_ENABLE,
950 FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0);
951 #endif
952
953 /*
954 * Configure GDMA forwarding
955 * - default all packets to CPU
956 * - Turn on auto-CRC
957 */
958 #if 0
959 fe_write(sc, RA_FE_GDMA1_FWD_CFG,
960 (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD));
961 #endif
962
963 #if !defined(MT7628)
964 fe_write(sc, RA_FE_GDMA1_FWD_CFG,
965 FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) |
966 FE_GDMA_FWD_CFG_STRIP_RX_CRC |
967 FE_GDMA_FWD_CFG_IP4_CRC_EN |
968 FE_GDMA_FWD_CFG_TCP_CRC_EN |
969 FE_GDMA_FWD_CFG_UDP_CRC_EN);
970 #endif
971
972 /* CDMA also needs CRCs turned on */
973 #if !defined(MT7628)
974 r = fe_read(sc, RA_FE_CDMA_CSG_CFG);
975 r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN |
976 FE_CDMA_CSG_CFG_TCP_CRC_EN);
977 fe_write(sc, RA_FE_CDMA_CSG_CFG, r);
978 #endif
979
980 /* Configure Flow Control Thresholds */
981 #if defined(MT7628)
982 sw_write(sc, RA_ETH_SW_FCT0,
983 RA_ETH_SW_FCT0_FC_RLS_TH(0xc8) |
984 RA_ETH_SW_FCT0_FC_SET_TH(0xa0) |
985 RA_ETH_SW_FCT0_DROP_RLS_TH(0x78) |
986 RA_ETH_SW_FCT0_DROP_SET_TH(0x50));
987 sw_write(sc, RA_ETH_SW_FCT1,
988 RA_ETH_SW_FCT1_PORT_TH(0x14));
989 #elif defined(RT3883)
990 fe_write(sc, RA_FE_PSE_FQ_CFG,
991 FE_PSE_FQ_MAX_COUNT(0xff) |
992 FE_PSE_FQ_FC_RELEASE(0x90) |
993 FE_PSE_FQ_FC_ASSERT(0x80));
994 #else
995 fe_write(sc, RA_FE_PSE_FQ_CFG,
996 FE_PSE_FQ_MAX_COUNT(0x80) |
997 FE_PSE_FQ_FC_RELEASE(0x50) |
998 FE_PSE_FQ_FC_ASSERT(0x40));
999 #endif
1000
1001 #ifdef RALINK_ETH_DEBUG
1002 #ifdef RA_FE_MDIO_CFG1
1003 printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1));
1004 #endif
1005 #ifdef RA_FE_MDIO_CFG2
1006 printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2));
1007 #endif
1008 printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR));
1009 printf("FE_PDMA_TX0_COUNT: %08x\n",
1010 fe_read(sc, RA_FE_PDMA_TX0_COUNT));
1011 printf("FE_PDMA_TX0_CPU_IDX: %08x\n",
1012 fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX));
1013 printf("FE_PDMA_TX0_DMA_IDX: %08x\n",
1014 fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX));
1015 printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR));
1016 printf("FE_PDMA_RX0_COUNT: %08x\n",
1017 fe_read(sc, RA_FE_PDMA_RX0_COUNT));
1018 printf("FE_PDMA_RX0_CPU_IDX: %08x\n",
1019 fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX));
1020 printf("FE_PDMA_RX0_DMA_IDX: %08x\n",
1021 fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX));
1022 printf("FE_PDMA_GLOBAL_CFG: %08x\n",
1023 fe_read(sc, RA_FE_PDMA_GLOBAL_CFG));
1024 #ifdef RA_FE_GLOBAL_CFG
1025 printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG));
1026 #endif
1027 #ifdef RA_FE_GDMA1_FWD_CFG
1028 printf("FE_GDMA1_FWD_CFG: %08x\n",
1029 fe_read(sc, RA_FE_GDMA1_FWD_CFG));
1030 #endif
1031 #ifdef RA_FE_CDMA_CSG_CFG
1032 printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG));
1033 #endif
1034 #ifdef RA_FE_PSE_FQ_CFG
1035 printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG));
1036 #endif
1037 #endif
1038
1039 /* Force PSE Reset to get everything finalized */
1040 #if defined(MT7628)
1041 #else
1042 fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE);
1043 fe_write(sc, RA_FE_GLOBAL_RESET, 0);
1044 #endif
1045 }
1046
1047 /*
1048 * ralink_eth_init
1049 */
1050 static int
1051 ralink_eth_init(struct ifnet *ifp)
1052 {
1053 RALINK_DEBUG_FUNC_ENTRY();
1054 ralink_eth_softc_t * const sc = ifp->if_softc;
1055 int error;
1056
1057 error = ralink_eth_enable(sc);
1058 if (!error) {
1059 /* Note that the interface is now running. */
1060 ifp->if_flags |= IFF_RUNNING;
1061 ifp->if_flags &= ~IFF_OACTIVE;
1062 }
1063
1064 return error;
1065 }
1066
1067 /*
1068 * ralink_eth_rxdrain
1069 *
1070 * Drain the receive queue.
1071 */
1072 static void
1073 ralink_eth_rxdrain(ralink_eth_softc_t *sc)
1074 {
1075 RALINK_DEBUG_FUNC_ENTRY();
1076
1077 for (int i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
1078 struct ralink_eth_rxstate *rxs = &sc->sc_rxstate[i];
1079 if (rxs->rxs_mbuf != NULL) {
1080 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1081 m_freem(rxs->rxs_mbuf);
1082 rxs->rxs_mbuf = NULL;
1083 }
1084 }
1085 }
1086
1087 /*
1088 * ralink_eth_stop
1089 */
1090 static void
1091 ralink_eth_stop(struct ifnet *ifp, int disable)
1092 {
1093 RALINK_DEBUG_FUNC_ENTRY();
1094 ralink_eth_softc_t * const sc = ifp->if_softc;
1095
1096 ralink_eth_disable(sc);
1097
1098 /* Mark the interface down and cancel the watchdog timer. */
1099 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1100 ifp->if_timer = 0;
1101 }
1102
1103 /*
1104 * ralink_eth_add_rxbuf
1105 */
1106 static int
1107 ralink_eth_add_rxbuf(ralink_eth_softc_t *sc, int idx)
1108 {
1109 RALINK_DEBUG_FUNC_ENTRY();
1110 struct ralink_eth_rxstate * const rxs = &sc->sc_rxstate[idx];
1111 struct mbuf *m;
1112 int error;
1113
1114 MGETHDR(m, M_DONTWAIT, MT_DATA);
1115 if (m == NULL) {
1116 printf("MGETHDR failed\n");
1117 sc->sc_evcnt_add_rxbuf_hdr_fail.ev_count++;
1118 return ENOBUFS;
1119 }
1120
1121 MCLGET(m, M_DONTWAIT);
1122 if ((m->m_flags & M_EXT) == 0) {
1123 m_freem(m);
1124 printf("MCLGET failed\n");
1125 sc->sc_evcnt_add_rxbuf_mcl_fail.ev_count++;
1126 return ENOBUFS;
1127 }
1128
1129 m->m_data = m->m_ext.ext_buf;
1130 rxs->rxs_mbuf = m;
1131
1132 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
1133 m->m_ext.ext_size, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
1134 if (error) {
1135 aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, "
1136 "error=%d\n", idx, error);
1137 panic(__func__); /* XXX */
1138 }
1139
1140 sc->sc_rxdesc[idx].data_ptr = MIPS_KSEG0_TO_PHYS(
1141 rxs->rxs_dmamap->dm_segs[0].ds_addr + RALINK_ETHER_ALIGN);
1142 sc->sc_rxdesc[idx].rxd_info1 = RXD_LAST0;
1143
1144 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1145 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1146
1147 return 0;
1148 }
1149
1150
1151 /*
1152 * ralink_eth_start
1153 */
1154 static void
1155 ralink_eth_start(struct ifnet *ifp)
1156 {
1157 RALINK_DEBUG_FUNC_ENTRY();
1158 ralink_eth_softc_t * const sc = ifp->if_softc;
1159 struct mbuf *m0, *m = NULL;
1160 struct ralink_eth_txstate *txs;
1161 bus_dmamap_t dmamap;
1162 int tx_cpu_idx;
1163 int error;
1164 int s;
1165
1166 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1167 return;
1168
1169 s = splnet();
1170
1171 tx_cpu_idx = fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX);
1172
1173 /*
1174 * Loop through the send queue, setting up transmit descriptors
1175 * until we drain the queue, or use up all available
1176 * transmit descriptors.
1177 */
1178 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) {
1179 /* Grab a packet off the queue. */
1180 IFQ_POLL(&ifp->if_snd, m0);
1181 if (m0 == NULL)
1182 break;
1183
1184 dmamap = txs->txs_dmamap;
1185
1186 if (m0->m_pkthdr.len < RALINK_MIN_BUF) {
1187 int padlen = 64 - m0->m_pkthdr.len;
1188 m_copyback(m0, m0->m_pkthdr.len, padlen,
1189 sc->ralink_zero_buf);
1190 /* TODO : need some checking here */
1191 }
1192
1193 /*
1194 * Do we need to align the buffer
1195 * or does the DMA map load fail?
1196 */
1197 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1198 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1199
1200 /* Allocate a new mbuf for re-alignment */
1201 MGETHDR(m, M_DONTWAIT, MT_DATA);
1202 if (m == NULL) {
1203 aprint_error_dev(sc->sc_dev,
1204 "unable to allocate aligned Tx mbuf\n");
1205 break;
1206 }
1207 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1208 if (m0->m_pkthdr.len > MHLEN) {
1209 MCLGET(m, M_DONTWAIT);
1210 if ((m->m_flags & M_EXT) == 0) {
1211 aprint_error_dev(sc->sc_dev,
1212 "unable to allocate Tx cluster\n");
1213 m_freem(m);
1214 break;
1215 }
1216 }
1217 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1218 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1219 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m,
1220 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1221 if (error) {
1222 aprint_error_dev(sc->sc_dev,
1223 "unable to load Tx buffer error=%d\n",
1224 error);
1225 m_freem(m);
1226 break;
1227 }
1228 }
1229
1230 IFQ_DEQUEUE(&ifp->if_snd, m0);
1231 /* did we copy the buffer out already? */
1232 if (m != NULL) {
1233 m_freem(m0);
1234 m0 = m;
1235 }
1236
1237 /* Sync the DMA map. */
1238 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1239 BUS_DMASYNC_PREWRITE);
1240
1241 /* Initialize the transmit descriptor */
1242 sc->sc_txdesc[tx_cpu_idx].data_ptr0 =
1243 MIPS_KSEG0_TO_PHYS(dmamap->dm_segs[0].ds_addr);
1244 sc->sc_txdesc[tx_cpu_idx].txd_info1 =
1245 TXD_LEN0(dmamap->dm_segs[0].ds_len) | TXD_LAST0;
1246 sc->sc_txdesc[tx_cpu_idx].txd_info2 =
1247 TXD_QN(3) | TXD_PN(TXD_PN_GDMA1);
1248 sc->sc_txdesc[tx_cpu_idx].txd_info2 = TXD_QN(3) |
1249 TXD_PN(TXD_PN_GDMA1) | TXD_VEN |
1250 // TXD_VIDX(pt->vlan_id) |
1251 TXD_TCP_EN | TXD_UDP_EN | TXD_IP_EN;
1252
1253 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1254 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr0,
1255 sc->sc_txdesc[tx_cpu_idx].data_ptr0);
1256 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1257 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info1,
1258 sc->sc_txdesc[tx_cpu_idx].txd_info1);
1259 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1260 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr1,
1261 sc->sc_txdesc[tx_cpu_idx].data_ptr1);
1262 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1263 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info2,
1264 sc->sc_txdesc[tx_cpu_idx].txd_info2);
1265
1266 /* sync the descriptor we're using. */
1267 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1268 (int)&sc->sc_txdesc[tx_cpu_idx] - (int)sc->sc_descs,
1269 sizeof(struct ralink_tx_desc),
1270 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1271
1272 /*
1273 * Store a pointer to the packet so we can free it later,
1274 * and remember what txdirty will be once the packet is
1275 * done.
1276 */
1277 txs->txs_mbuf = m0;
1278 sc->sc_pending_tx++;
1279 if (txs->txs_idx != tx_cpu_idx) {
1280 panic("txs_idx doesn't match %d != %d\n",
1281 txs->txs_idx, tx_cpu_idx);
1282 }
1283
1284 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1285 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1286
1287 /* Pass the packet to any BPF listeners. */
1288 bpf_mtap(ifp, m0);
1289
1290 /* Set a watchdog timer in case the chip flakes out. */
1291 ifp->if_timer = 5;
1292
1293 tx_cpu_idx = (tx_cpu_idx + 1) % RALINK_ETH_NUM_TX_DESC;
1294
1295 /* Write back the tx_cpu_idx */
1296 fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, tx_cpu_idx);
1297 }
1298
1299 if (txs == NULL) {
1300 /* No more slots left; notify upper layer. */
1301 ifp->if_flags |= IFF_OACTIVE;
1302 }
1303
1304 splx(s);
1305 }
1306
1307 /*
1308 * ralink_eth_watchdog
1309 *
1310 * Watchdog timer handler.
1311 */
1312 static void
1313 ralink_eth_watchdog(struct ifnet *ifp)
1314 {
1315 RALINK_DEBUG_FUNC_ENTRY();
1316 ralink_eth_softc_t * const sc = ifp->if_softc;
1317 bool doing_transmit;
1318
1319 sc->sc_evcnt_watchdog.ev_count++;
1320 doing_transmit = !SIMPLEQ_EMPTY(&sc->sc_txdirtyq);
1321
1322 if (doing_transmit) {
1323 RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: transmit timeout\n",
1324 ifp->if_xname);
1325 ifp->if_oerrors++;
1326 sc->sc_evcnt_wd_tx.ev_count++;
1327 } else {
1328 RALINK_DEBUG(RALINK_DEBUG_ERROR,
1329 "%s: spurious watchog timeout\n", ifp->if_xname);
1330 sc->sc_evcnt_wd_spurious.ev_count++;
1331 return;
1332 }
1333
1334 sc->sc_evcnt_wd_reactivate.ev_count++;
1335 const int s = splnet();
1336 /* deactive the active partitions, retaining the active information */
1337 ralink_eth_disable(sc);
1338 ralink_eth_enable(sc);
1339 splx(s);
1340
1341 /* Try to get more packets going. */
1342 ralink_eth_start(ifp);
1343 }
1344
1345 /*
1346 * ralink_eth_ioctl
1347 *
1348 * Handle control requests from the operator.
1349 */
1350 static int
1351 ralink_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1352 {
1353 RALINK_DEBUG_FUNC_ENTRY();
1354 struct ifdrv * const ifd = (struct ifdrv *) data;
1355 ralink_eth_softc_t * const sc = ifp->if_softc;
1356 int s, error = 0;
1357
1358 RALINK_DEBUG(RALINK_DEBUG_INFO, "ifp: %p cmd: %lu data: %p\n",
1359 ifp, cmd, data);
1360
1361 s = splnet();
1362
1363 switch (cmd) {
1364 case SIOCSDRVSPEC:
1365 switch (ifd->ifd_cmd) {
1366 #if 0
1367 case ETH_SWITCH_CMD_PORT_MODE:
1368 /* len parameter is the mode */
1369 pt->mode = (int) ifd->ifd_len;
1370 ralink_eth_configure_switch(pt->sc_reth);
1371 break;
1372 #endif
1373 default:
1374 error = EINVAL;
1375 }
1376 break;
1377 default:
1378 error = ether_ioctl(ifp, cmd, data);
1379 if (error == ENETRESET) {
1380 if (ifp->if_flags & IFF_RUNNING) {
1381 /*
1382 * Multicast list has changed. Set the
1383 * hardware filter accordingly.
1384 */
1385 RALINK_DEBUG(RALINK_DEBUG_INFO, "TODO!!!");
1386 #if 0
1387 ralink_eth_filter_setup(sc);
1388 #endif
1389 }
1390 error = 0;
1391 }
1392 break;
1393 }
1394
1395 splx(s);
1396
1397 /* Try to get more packets going. */
1398 if (sc->sc_ih != NULL)
1399 ralink_eth_start(ifp);
1400
1401 return error;
1402 }
1403
1404 /*
1405 * ralink_eth_intr
1406 *
1407 */
1408 static int
1409 ralink_eth_intr(void *arg)
1410 {
1411 RALINK_DEBUG_FUNC_ENTRY();
1412 ralink_eth_softc_t * const sc = arg;
1413
1414 for (u_int n = 0;; n = 1) {
1415 u_int32_t status = fe_read(sc, RA_FE_INT_STATUS);
1416 fe_write(sc, RA_FE_INT_STATUS, ~0);
1417 RALINK_DEBUG(RALINK_DEBUG_REG,"%s() status: 0x%08x\n",
1418 __func__, status);
1419 #if defined(MT7628)
1420 if ((status & (RA_FE_INT_RX_DONE_INT1 | RA_FE_INT_RX_DONE_INT0 |
1421 RA_FE_INT_TX_DONE_INT3 | RA_FE_INT_TX_DONE_INT2 |
1422 RA_FE_INT_TX_DONE_INT1 | RA_FE_INT_TX_DONE_INT0)) == 0) {
1423 if (n == 0)
1424 sc->sc_evcnt_spurious_intr.ev_count++;
1425 return (n != 0);
1426 }
1427
1428 if (status & (RA_FE_INT_RX_DONE_INT1|RA_FE_INT_RX_DONE_INT0))
1429 ralink_eth_rxintr(sc);
1430
1431 if (status & (RA_FE_INT_TX_DONE_INT3 | RA_FE_INT_TX_DONE_INT2 |
1432 RA_FE_INT_TX_DONE_INT1 | RA_FE_INT_TX_DONE_INT0))
1433 ralink_eth_txintr(sc);
1434 #else
1435 if ((status & (FE_INT_RX | FE_INT_TX0)) == 0) {
1436 if (n == 0)
1437 sc->sc_evcnt_spurious_intr.ev_count++;
1438 return (n != 0);
1439 }
1440
1441 if (status & FE_INT_RX)
1442 ralink_eth_rxintr(sc);
1443
1444 if (status & FE_INT_TX0)
1445 ralink_eth_txintr(sc);
1446 #endif
1447 }
1448
1449 /* Try to get more packets going. */
1450 if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
1451
1452 return 1;
1453 }
1454
1455 /*
1456 * ralink_eth_rxintr
1457 */
1458 static void
1459 ralink_eth_rxintr(ralink_eth_softc_t *sc)
1460 {
1461 RALINK_DEBUG_FUNC_ENTRY();
1462 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
1463 struct ralink_eth_rxstate *rxs;
1464 struct mbuf *m;
1465 int len;
1466 int rx_cpu_idx;
1467
1468 KASSERT(curcpu()->ci_cpl >= IPL_NET);
1469 sc->sc_evcnt_rxintr.ev_count++;
1470 rx_cpu_idx = fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX);
1471
1472 for (;;) {
1473 rx_cpu_idx = (rx_cpu_idx + 1) % RALINK_ETH_NUM_RX_DESC;
1474
1475 rxs = &sc->sc_rxstate[rx_cpu_idx];
1476
1477 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1478 (int)&sc->sc_rxdesc[rx_cpu_idx] - (int)sc->sc_descs,
1479 sizeof(struct ralink_rx_desc),
1480 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1481
1482 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1483 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].data_ptr,
1484 sc->sc_rxdesc[rx_cpu_idx].data_ptr);
1485 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1486 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info1,
1487 sc->sc_rxdesc[rx_cpu_idx].rxd_info1);
1488 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1489 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].unused,
1490 sc->sc_rxdesc[rx_cpu_idx].unused);
1491 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1492 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info2,
1493 sc->sc_rxdesc[rx_cpu_idx].rxd_info2);
1494
1495 if (!(sc->sc_rxdesc[rx_cpu_idx].rxd_info1 & RXD_DDONE))
1496 break;
1497
1498 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1499 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1500
1501 /*
1502 * No errors; receive the packet.
1503 * Note the chip includes the CRC with every packet.
1504 */
1505 len = RXD_LEN0(sc->sc_rxdesc[rx_cpu_idx].rxd_info1);
1506
1507 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) packet rx %d bytes\n",
1508 rx_cpu_idx, len);
1509
1510 /*
1511 * Allocate a new mbuf cluster. If that fails, we are
1512 * out of memory, and must drop the packet and recycle
1513 * the buffer that's already attached to this descriptor.
1514 */
1515 m = rxs->rxs_mbuf;
1516 if (ralink_eth_add_rxbuf(sc, rx_cpu_idx) != 0)
1517 break;
1518 m->m_data += RALINK_ETHER_ALIGN;
1519 m->m_pkthdr.len = m->m_len = len;
1520
1521 #ifdef RALINK_ETH_DEBUG
1522 {
1523 struct ether_header *eh = mtod(m, struct ether_header *);
1524 printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost));
1525 printf("rx: eth_src: %s type: 0x%04x \n",
1526 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
1527 printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014));
1528 printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098));
1529
1530 unsigned char * s = mtod(m, unsigned char *);
1531 for (int j = 0; j < 32; j++)
1532 printf("%02x%c", *(s + j),
1533 (j == 15 || j == 31) ? '\n' : ' ');
1534 }
1535 #endif
1536
1537 /*
1538 * claim the buffer here since we can't do it at
1539 * allocation time due to the SW partitions
1540 */
1541 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1542
1543 /* push it up the inteface */
1544 m_set_rcvif(m, ifp);
1545
1546 #ifdef RALINK_ETH_DEBUG
1547 {
1548 struct ether_header *eh = mtod(m, struct ether_header *);
1549 printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost));
1550 printf("rx: eth_src: %s type: 0x%04x\n",
1551 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
1552 printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014));
1553 printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098));
1554
1555 unsigned char * s = mtod(m, unsigned char *);
1556 for (int j = 0; j < 32; j++)
1557 printf("%02x%c", *(s + j),
1558 (j == 15 || j == 31) ? '\n' : ' ');
1559 }
1560 #endif
1561
1562 /*
1563 * XXX: M_CSUM_TCPv4 and M_CSUM_UDPv4 do not currently work when
1564 * using PF's ROUTETO option for load balancing.
1565 */
1566 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1567
1568 /* Pass it on. */
1569 sc->sc_evcnt_input.ev_count++;
1570 if_percpuq_enqueue(ifp->if_percpuq, m);
1571
1572 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, rx_cpu_idx);
1573 }
1574 }
1575
1576 /*
1577 * ralink_eth_txintr
1578 */
1579 static void
1580 ralink_eth_txintr(ralink_eth_softc_t *sc)
1581 {
1582 RALINK_DEBUG_FUNC_ENTRY();
1583 struct ralink_eth_txstate *txs;
1584
1585 KASSERT(curcpu()->ci_cpl >= IPL_NET);
1586 sc->sc_evcnt_txintr.ev_count++;
1587
1588 /*
1589 * Go through our Tx list and free mbufs for those
1590 * frames that have been transmitted.
1591 */
1592 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1593 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1594 (int)&sc->sc_txdesc[txs->txs_idx] - (int)sc->sc_descs,
1595 sizeof(struct ralink_tx_desc),
1596 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1597
1598 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1599 txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr0,
1600 sc->sc_txdesc[txs->txs_idx].data_ptr0);
1601 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1602 txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info1,
1603 sc->sc_txdesc[txs->txs_idx].txd_info1);
1604 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1605 txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr1,
1606 sc->sc_txdesc[txs->txs_idx].data_ptr1);
1607 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1608 txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info2,
1609 sc->sc_txdesc[txs->txs_idx].txd_info2);
1610
1611 /* we're finished if the current tx isn't done */
1612 if (!(sc->sc_txdesc[txs->txs_idx].txd_info1 & TXD_DDONE))
1613 break;
1614
1615 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) transmitted\n",
1616 txs->txs_idx);
1617
1618 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1619
1620 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
1621 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1622 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1623 m_freem(txs->txs_mbuf);
1624 txs->txs_mbuf = NULL;
1625
1626 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1627
1628 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1629 ifp->if_flags &= ~IFF_OACTIVE;
1630 ifp->if_opackets++;
1631 sc->sc_evcnt_output.ev_count++;
1632
1633 if (--sc->sc_pending_tx == 0)
1634 ifp->if_timer = 0;
1635 }
1636 }
1637
1638 /*
1639 * ralink_eth_mdio_enable
1640 */
1641 #if defined(RT3050) || defined(RT3052)
1642 static void
1643 ralink_eth_mdio_enable(ralink_eth_softc_t *sc, bool enable)
1644 {
1645 uint32_t data = sy_read(sc, RA_SYSCTL_GPIOMODE);
1646
1647 if (enable)
1648 data &= ~GPIOMODE_MDIO;
1649 else
1650 data |= GPIOMODE_MDIO;
1651
1652 sy_write(sc, RA_SYSCTL_GPIOMODE, data);
1653 }
1654 #else
1655 #define ralink_eth_mdio_enable(sc, enable)
1656 #endif
1657
1658 /*
1659 * ralink_eth_mii_statchg
1660 */
1661 static void
1662 ralink_eth_mii_statchg(struct ifnet *ifp)
1663 {
1664 #if 0
1665 ralink_eth_softc_t * const sc = ifp->if_softc;
1666
1667 #endif
1668 }
1669
1670 /*
1671 * ralink_eth_mii_tick
1672 *
1673 * One second timer, used to tick the MIIs.
1674 */
1675 static void
1676 ralink_eth_mii_tick(void *arg)
1677 {
1678 ralink_eth_softc_t * const sc = arg;
1679
1680 const int s = splnet();
1681 mii_tick(&sc->sc_mii);
1682 splx(s);
1683
1684 callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc);
1685 }
1686
1687 /*
1688 * ralink_eth_mii_read
1689 */
1690 static int
1691 ralink_eth_mii_read(device_t self, int phy_addr, int phy_reg)
1692 {
1693 ralink_eth_softc_t *sc = device_private(self);
1694 KASSERT(sc != NULL);
1695 #if 0
1696 printf("%s() phy_addr: %d phy_reg: %d\n", __func__, phy_addr, phy_reg);
1697 #endif
1698 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1699 if (phy_addr > 5)
1700 return 0;
1701 #endif
1702
1703 /* We enable mdio gpio purpose register, and disable it when exit. */
1704 ralink_eth_mdio_enable(sc, true);
1705
1706 /*
1707 * make sure previous read operation is complete
1708 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1709 */
1710 for (;;) {
1711 /* rd_rdy: read operation is complete */
1712 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1713 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0)
1714 break;
1715 #else
1716 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0)
1717 break;
1718 #endif
1719 }
1720
1721 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1722 sw_write(sc, RA_ETH_SW_PCTL0,
1723 PCTL0_RD_CMD | PCTL0_REG(phy_reg) | PCTL0_ADDR(phy_addr));
1724 #else
1725 fe_write(sc, RA_FE_MDIO_ACCESS,
1726 MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg));
1727 fe_write(sc, RA_FE_MDIO_ACCESS,
1728 MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg) |
1729 MDIO_ACCESS_TRG);
1730 #endif
1731
1732 /*
1733 * make sure read operation is complete
1734 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1735 */
1736 for (;;) {
1737 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1738 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) != 0) {
1739 int data = PCTL1_RD_VAL(
1740 sw_read(sc, RA_ETH_SW_PCTL1));
1741 ralink_eth_mdio_enable(sc, false);
1742 return data;
1743 }
1744 #else
1745 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) {
1746 int data = MDIO_ACCESS_DATA(
1747 fe_read(sc, RA_FE_MDIO_ACCESS));
1748 ralink_eth_mdio_enable(sc, false);
1749 return data;
1750 }
1751 #endif
1752 }
1753 }
1754
1755 /*
1756 * ralink_eth_mii_write
1757 */
1758 static void
1759 ralink_eth_mii_write(device_t self, int phy_addr, int phy_reg, int val)
1760 {
1761 ralink_eth_softc_t *sc = device_private(self);
1762 KASSERT(sc != NULL);
1763 #if 0
1764 printf("%s() phy_addr: %d phy_reg: %d val: 0x%04x\n",
1765 __func__, phy_addr, phy_reg, val);
1766 #endif
1767 ralink_eth_mdio_enable(sc, true);
1768
1769 /*
1770 * make sure previous write operation is complete
1771 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1772 */
1773 for (;;) {
1774 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1775 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0)
1776 break;
1777 #else
1778 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0)
1779 break;
1780 #endif
1781 }
1782
1783 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1784 sw_write(sc, RA_ETH_SW_PCTL0,
1785 PCTL0_WR_CMD | PCTL0_WR_VAL(val) | PCTL0_REG(phy_reg) |
1786 PCTL0_ADDR(phy_addr));
1787 #else
1788 fe_write(sc, RA_FE_MDIO_ACCESS,
1789 MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) |
1790 MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val));
1791 fe_write(sc, RA_FE_MDIO_ACCESS,
1792 MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) |
1793 MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val) |
1794 MDIO_ACCESS_TRG);
1795 #endif
1796
1797
1798 /* make sure write operation is complete */
1799 for (;;) {
1800 #if defined(RT3050) || defined(RT3052) || defined(MT7628)
1801 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_WR_DONE) != 0) {
1802 ralink_eth_mdio_enable(sc, false);
1803 return;
1804 }
1805 #else
1806 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0){
1807 ralink_eth_mdio_enable(sc, false);
1808 return;
1809 }
1810 #endif
1811 }
1812 }
1813