if_mvxpe.c revision 1.37 1 /* $NetBSD: if_mvxpe.c,v 1.37 2021/12/05 07:57:38 msaitoh Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.37 2021/12/05 07:57:38 msaitoh Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/mvxpbmvar.h>
62 #include <dev/marvell/if_mvxpereg.h>
63 #include <dev/marvell/if_mvxpevar.h>
64
65 #include "locators.h"
66
67 #if BYTE_ORDER == BIG_ENDIAN
68 #error "BIG ENDIAN not supported"
69 #endif
70
71 #ifdef MVXPE_DEBUG
72 #define STATIC /* nothing */
73 #else
74 #define STATIC static
75 #endif
76
77 /* autoconf(9) */
78 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79 STATIC void mvxpe_attach(device_t, device_t, void *);
80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82 mvxpe_match, mvxpe_attach, NULL, NULL);
83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86 /* MII */
87 STATIC int mvxpe_miibus_readreg(device_t, int, int, uint16_t *);
88 STATIC int mvxpe_miibus_writereg(device_t, int, int, uint16_t);
89 STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91 /* Address Decoding Window */
92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94 /* Device Register Initialization */
95 STATIC int mvxpe_initreg(struct ifnet *);
96
97 /* Descriptor Ring Control for each of queues */
98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106 /* Rx/Tx Queue Control */
107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116 /* Interrupt Handlers */
117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119 STATIC int mvxpe_rxtxth_intr(void *);
120 STATIC int mvxpe_misc_intr(void *);
121 STATIC int mvxpe_rxtx_intr(void *);
122 STATIC void mvxpe_tick(void *);
123
124 /* struct ifnet and mii callbacks*/
125 STATIC void mvxpe_start(struct ifnet *);
126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127 STATIC int mvxpe_init(struct ifnet *);
128 STATIC void mvxpe_stop(struct ifnet *, int);
129 STATIC void mvxpe_watchdog(struct ifnet *);
130 STATIC int mvxpe_ifflags_cb(struct ethercom *);
131 STATIC int mvxpe_mediachange(struct ifnet *);
132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134 /* Link State Notify */
135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136 STATIC void mvxpe_linkup(struct mvxpe_softc *);
137 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140 /* Tx Subroutines */
141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144 struct mvxpe_tx_desc *, struct mbuf *);
145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148 /* Rx Subroutines */
149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156 struct mvxpe_rx_desc *, struct mbuf *);
157
158 /* MAC address filter */
159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162 /* sysctl(9) */
163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169 /* MIB */
170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173 /* for Debug */
174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177 STATIC int mvxpe_root_num;
178 STATIC kmutex_t mii_mutex;
179 STATIC int mii_init = 0;
180 #ifdef MVXPE_DEBUG
181 STATIC int mvxpe_debug = MVXPE_DEBUG;
182 #endif
183
184 /*
185 * List of MIB register and names
186 */
187 STATIC struct mvxpe_mib_def {
188 uint32_t regnum;
189 int reg64;
190 const char *sysctl_name;
191 const char *desc;
192 int ext;
193 #define MVXPE_MIBEXT_IF_OERRORS 1
194 #define MVXPE_MIBEXT_IF_IERRORS 2
195 #define MVXPE_MIBEXT_IF_COLLISIONS 3
196 } mvxpe_mib_list[] = {
197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
198 "Good Octets Rx", 0},
199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
200 "Bad Octets Rx", 0},
201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err",
202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS},
203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
204 "Good Frames Rx", 0},
205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
206 "Bad Frames Rx", 0},
207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
208 "Broadcast Frames Rx", 0},
209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
210 "Multicast Frames Rx", 0},
211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
212 "Frame Size 1 - 64", 0},
213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
214 "Frame Size 65 - 127", 0},
215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
216 "Frame Size 128 - 255", 0},
217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
218 "Frame Size 256 - 511"},
219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
220 "Frame Size 512 - 1023", 0},
221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_frame_1024_max",
222 "Frame Size 1024 - Max", 0},
223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
224 "Good Octets Tx", 0},
225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
226 "Good Frames Tx", 0},
227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS},
229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
230 "Multicast Frames Tx"},
231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
232 "Broadcast Frames Tx"},
233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
234 "Unknown MAC Control", 0},
235 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
236 "Flow Control Tx", 0},
237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
238 "Good Flow Control Rx", 0},
239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
240 "Bad Flow Control Rx", 0},
241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS},
251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS},
253 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS},
255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS},
257 };
258
259 /*
260 * autoconf(9)
261 */
262 /* ARGSUSED */
263 STATIC int
264 mvxpe_match(device_t parent, cfdata_t match, void *aux)
265 {
266 struct marvell_attach_args *mva = aux;
267 bus_size_t pv_off;
268 uint32_t pv;
269
270 if (strcmp(mva->mva_name, match->cf_name) != 0)
271 return 0;
272 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
273 return 0;
274
275 /* check port version */
276 pv_off = mva->mva_offset + MVXPE_PV;
277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
278 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
279 return 0; /* old version is not supported */
280
281 return 1;
282 }
283
284 /* ARGSUSED */
285 STATIC void
286 mvxpe_attach(device_t parent, device_t self, void *aux)
287 {
288 struct mvxpe_softc *sc = device_private(self);
289 struct mii_softc *child;
290 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
291 struct mii_data * const mii = &sc->sc_mii;
292 struct marvell_attach_args *mva = aux;
293 prop_dictionary_t dict;
294 prop_data_t enaddrp = NULL;
295 uint32_t phyaddr, maddrh, maddrl;
296 uint8_t enaddr[ETHER_ADDR_LEN];
297 int q;
298
299 aprint_naive("\n");
300 aprint_normal(": Marvell ARMADA GbE Controller\n");
301 memset(sc, 0, sizeof(*sc));
302 sc->sc_dev = self;
303 sc->sc_port = mva->mva_unit;
304 sc->sc_iot = mva->mva_iot;
305 sc->sc_dmat = mva->mva_dmat;
306 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
307 callout_init(&sc->sc_tick_ch, 0);
308 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
309
310 /*
311 * BUS space
312 */
313 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
314 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
315 aprint_error_dev(self, "Cannot map registers\n");
316 goto fail;
317 }
318 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
319 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
320 &sc->sc_mibh)) {
321 aprint_error_dev(self,
322 "Cannot map destination address filter registers\n");
323 goto fail;
324 }
325 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
326 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
327
328 /*
329 * Buffer Manager(BM) subsystem.
330 */
331 sc->sc_bm = mvxpbm_device(mva);
332 if (sc->sc_bm == NULL) {
333 aprint_error_dev(self, "no Buffer Manager.\n");
334 goto fail;
335 }
336 aprint_normal_dev(self,
337 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
338 aprint_normal_dev(sc->sc_dev,
339 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
340 mvxpbm_buf_size(sc->sc_bm) / 1024,
341 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
342
343 /*
344 * make sure DMA engines are in reset state
345 */
346 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
347 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
348
349 /*
350 * Address decoding window
351 */
352 mvxpe_wininit(sc, mva->mva_tags);
353
354 /*
355 * MAC address
356 */
357 dict = device_properties(self);
358 if (dict)
359 enaddrp = prop_dictionary_get(dict, "mac-address");
360 if (enaddrp) {
361 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
362 maddrh = enaddr[0] << 24;
363 maddrh |= enaddr[1] << 16;
364 maddrh |= enaddr[2] << 8;
365 maddrh |= enaddr[3];
366 maddrl = enaddr[4] << 8;
367 maddrl |= enaddr[5];
368 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
369 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
370 }
371 else {
372 /*
373 * even if enaddr is not found in dictionary,
374 * the port may be initialized by IPL program such as U-BOOT.
375 */
376 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
377 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
378 if ((maddrh | maddrl) == 0) {
379 aprint_error_dev(self, "No Ethernet address\n");
380 return;
381 }
382 }
383 sc->sc_enaddr[0] = maddrh >> 24;
384 sc->sc_enaddr[1] = maddrh >> 16;
385 sc->sc_enaddr[2] = maddrh >> 8;
386 sc->sc_enaddr[3] = maddrh >> 0;
387 sc->sc_enaddr[4] = maddrl >> 8;
388 sc->sc_enaddr[5] = maddrl >> 0;
389 aprint_normal_dev(self, "Ethernet address %s\n",
390 ether_sprintf(sc->sc_enaddr));
391
392 /*
393 * Register interrupt handlers
394 * XXX: handle Ethernet unit intr. and Error intr.
395 */
396 mvxpe_disable_intr(sc);
397 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
398
399 /*
400 * MIB buffer allocation
401 */
402 sc->sc_sysctl_mib_size =
403 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
404 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_SLEEP);
405 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
406
407 /*
408 * Device DMA Buffer allocation
409 */
410 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
411 if (mvxpe_ring_alloc_queue(sc, q) != 0)
412 goto fail;
413 mvxpe_ring_init_queue(sc, q);
414 }
415
416 /*
417 * We can support 802.1Q VLAN-sized frames and jumbo
418 * Ethernet frames.
419 */
420 sc->sc_ethercom.ec_capabilities |=
421 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
422 ifp->if_softc = sc;
423 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
424 ifp->if_start = mvxpe_start;
425 ifp->if_ioctl = mvxpe_ioctl;
426 ifp->if_init = mvxpe_init;
427 ifp->if_stop = mvxpe_stop;
428 ifp->if_watchdog = mvxpe_watchdog;
429
430 /*
431 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
432 */
433 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
435 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
437 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
439 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
441 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
443
444 /*
445 * Initialize struct ifnet
446 */
447 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
448 IFQ_SET_READY(&ifp->if_snd);
449 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
450
451 /*
452 * Enable DMA engines and Initiazlie Device Registers.
453 */
454 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
455 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
456 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
457 mvxpe_sc_lock(sc); /* XXX */
458 mvxpe_filter_setup(sc);
459 mvxpe_sc_unlock(sc);
460 mvxpe_initreg(ifp);
461
462 /*
463 * Now MAC is working, setup MII.
464 */
465 if (mii_init == 0) {
466 /*
467 * MII bus is shared by all MACs and all PHYs in SoC.
468 * serializing the bus access should be safe.
469 */
470 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
471 mii_init = 1;
472 }
473 mii->mii_ifp = ifp;
474 mii->mii_readreg = mvxpe_miibus_readreg;
475 mii->mii_writereg = mvxpe_miibus_writereg;
476 mii->mii_statchg = mvxpe_miibus_statchg;
477
478 sc->sc_ethercom.ec_mii = mii;
479 ifmedia_init(&mii->mii_media, 0, mvxpe_mediachange, mvxpe_mediastatus);
480 /*
481 * XXX: phy addressing highly depends on Board Design.
482 * we assume phyaddress == MAC unit number here,
483 * but some boards may not.
484 */
485 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
486 child = LIST_FIRST(&mii->mii_phys);
487 if (child == NULL) {
488 aprint_error_dev(self, "no PHY found!\n");
489 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
490 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
491 } else {
492 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
493 phyaddr = MVXPE_PHYADDR_PHYAD(child->mii_phy);
494 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
495 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
496 }
497
498 /*
499 * Call MI attach routines.
500 */
501 if_attach(ifp);
502 if_deferred_start_init(ifp, NULL);
503
504 ether_ifattach(ifp, sc->sc_enaddr);
505 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
506
507 sysctl_mvxpe_init(sc);
508 mvxpe_evcnt_attach(sc);
509 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
510 RND_TYPE_NET, RND_FLAG_DEFAULT);
511
512 return;
513
514 fail:
515 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
516 mvxpe_ring_dealloc_queue(sc, q);
517 if (sc->sc_sysctl_mib)
518 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
519
520 return;
521 }
522
523 STATIC int
524 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
525 {
526 #ifdef MVXPE_EVENT_COUNTERS
527 int q;
528
529 /* Master Interrupt Handler */
530 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
531 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
532 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
533 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
534 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
535 NULL, device_xname(sc->sc_dev), "MISC Intr.");
536
537 /* RXTXTH Interrupt */
538 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
539 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
540
541 /* MISC Interrupt */
542 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
543 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
544 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
545 NULL, device_xname(sc->sc_dev), "MISC link status changed");
546 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
547 NULL, device_xname(sc->sc_dev), "MISC internal address error");
548 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
549 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
550 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
551 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
552 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
553 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
554 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
555 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
556 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
557 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
558 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
559 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
560 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
561 NULL, device_xname(sc->sc_dev), "MISC Tx resource error");
562
563 /* RxTx Interrupt */
564 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
565 NULL, device_xname(sc->sc_dev), "RxTx Rx resource error");
566 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
567 NULL, device_xname(sc->sc_dev), "RxTx Rx packet");
568 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
569 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
570 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
571 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
572 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
573 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
574 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
575 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
576
577 /* Link */
578 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
579 NULL, device_xname(sc->sc_dev), "link up");
580 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
581 NULL, device_xname(sc->sc_dev), "link down");
582
583 /* Rx Descriptor */
584 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
585 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
586 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
587 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
588 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
589 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
590 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
591 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
592 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
593 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
594
595 /* Tx Descriptor */
596 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
597 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
598 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
599 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
600 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
601 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
602 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
603 NULL, device_xname(sc->sc_dev), "Tx unknown error counter");
604
605 /* Status Registers */
606 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
607 NULL, device_xname(sc->sc_dev), "Rx discard counter");
608 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
609 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
610 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
611 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
612 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
613 NULL, device_xname(sc->sc_dev), "Tx dropped counter");
614 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
615 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
616
617 /* Device Driver Errors */
618 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
619 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
620 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
621 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
622 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
623 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
624 static const char *rxq_desc[] = {
625 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
626 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
627 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
628 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
629 };
630 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
631 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
632 }
633 #undef MVXPE_QUEUE_DESC
634 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
635 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
636 static const char *txq_desc[] = {
637 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
638 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
639 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
640 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
641 };
642 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
643 NULL, device_xname(sc->sc_dev), txq_desc[q]);
644 }
645 #undef MVXPE_QUEUE_DESC
646 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
647 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
648 static const char *rxqe_desc[] = {
649 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
650 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
651 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
652 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
653 };
654 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
655 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
656 }
657 #undef MVXPE_QUEUE_DESC
658 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
659 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
660 static const char *txqe_desc[] = {
661 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
662 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
663 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
664 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
665 };
666 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
667 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
668 }
669 #undef MVXPE_QUEUE_DESC
670
671 #endif /* MVXPE_EVENT_COUNTERS */
672 return 0;
673 }
674
675 STATIC void
676 mvxpe_sc_lock(struct mvxpe_softc *sc)
677 {
678 mutex_enter(&sc->sc_mtx);
679 }
680
681 STATIC void
682 mvxpe_sc_unlock(struct mvxpe_softc *sc)
683 {
684 mutex_exit(&sc->sc_mtx);
685 }
686
687 /*
688 * MII
689 */
690 STATIC int
691 mvxpe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
692 {
693 struct mvxpe_softc *sc = device_private(dev);
694 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
695 uint32_t smi;
696 int i, rv = 0;
697
698 mutex_enter(&mii_mutex);
699
700 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
701 DELAY(1);
702 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
703 break;
704 }
705 if (i == MVXPE_PHY_TIMEOUT) {
706 aprint_error_ifnet(ifp, "SMI busy timeout\n");
707 rv = ETIMEDOUT;
708 goto out;
709 }
710
711 smi =
712 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
713 MVXPE_WRITE(sc, MVXPE_SMI, smi);
714
715 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
716 DELAY(1);
717 smi = MVXPE_READ(sc, MVXPE_SMI);
718 if (smi & MVXPE_SMI_READVALID) {
719 *val = smi & MVXPE_SMI_DATA_MASK;
720 break;
721 }
722 }
723 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
724 if (i >= MVXPE_PHY_TIMEOUT)
725 rv = ETIMEDOUT;
726
727 out:
728 mutex_exit(&mii_mutex);
729
730 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#hx\n", phy, reg, *val);
731
732 return rv;
733 }
734
735 STATIC int
736 mvxpe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
737 {
738 struct mvxpe_softc *sc = device_private(dev);
739 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
740 uint32_t smi;
741 int i, rv = 0;
742
743 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#hx\n", phy, reg, val);
744
745 mutex_enter(&mii_mutex);
746
747 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
748 DELAY(1);
749 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
750 break;
751 }
752 if (i == MVXPE_PHY_TIMEOUT) {
753 aprint_error_ifnet(ifp, "SMI busy timeout\n");
754 rv = ETIMEDOUT;
755 goto out;
756 }
757
758 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
759 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
760 MVXPE_WRITE(sc, MVXPE_SMI, smi);
761
762 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
763 DELAY(1);
764 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
765 break;
766 }
767
768 if (i == MVXPE_PHY_TIMEOUT) {
769 aprint_error_ifnet(ifp, "phy write timed out\n");
770 rv = ETIMEDOUT;
771 }
772
773 out:
774 mutex_exit(&mii_mutex);
775
776 return rv;
777 }
778
779 STATIC void
780 mvxpe_miibus_statchg(struct ifnet *ifp)
781 {
782
783 /* nothing to do */
784 }
785
786 /*
787 * Address Decoding Window
788 */
789 STATIC void
790 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
791 {
792 device_t pdev = device_parent(sc->sc_dev);
793 uint64_t base;
794 uint32_t en, ac, size;
795 int window, target, attr, rv, i;
796
797 /* First disable all address decode windows */
798 en = MVXPE_BARE_EN_MASK;
799 MVXPE_WRITE(sc, MVXPE_BARE, en);
800
801 ac = 0;
802 for (window = 0, i = 0;
803 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
804 rv = marvell_winparams_by_tag(pdev, tags[i],
805 &target, &attr, &base, &size);
806 if (rv != 0 || size == 0)
807 continue;
808
809 if (base > 0xffffffffULL) {
810 if (window >= MVXPE_NREMAP) {
811 aprint_error_dev(sc->sc_dev,
812 "can't remap window %d\n", window);
813 continue;
814 }
815 MVXPE_WRITE(sc, MVXPE_HA(window),
816 (base >> 32) & 0xffffffff);
817 }
818
819 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
820 MVXPE_BASEADDR_TARGET(target) |
821 MVXPE_BASEADDR_ATTR(attr) |
822 MVXPE_BASEADDR_BASE(base));
823 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
824
825 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
826 window, base, size);
827
828 en &= ~(1 << window);
829 /* set full access (r/w) */
830 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
831 window++;
832 }
833 /* allow to access decode window */
834 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
835
836 MVXPE_WRITE(sc, MVXPE_BARE, en);
837 }
838
839 /*
840 * Device Register Initialization
841 * reset device registers to device driver default value.
842 * the device is not enabled here.
843 */
844 STATIC int
845 mvxpe_initreg(struct ifnet *ifp)
846 {
847 struct mvxpe_softc *sc = ifp->if_softc;
848 int serdes = 0;
849 uint32_t reg;
850 int q, i;
851
852 DPRINTIFNET(ifp, 1, "initializing device register\n");
853
854 /* Init TX/RX Queue Registers */
855 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
856 mvxpe_rx_lockq(sc, q);
857 if (mvxpe_rx_queue_init(ifp, q) != 0) {
858 aprint_error_ifnet(ifp,
859 "initialization failed: cannot initialize queue\n");
860 mvxpe_rx_unlockq(sc, q);
861 return ENOBUFS;
862 }
863 mvxpe_rx_unlockq(sc, q);
864
865 mvxpe_tx_lockq(sc, q);
866 if (mvxpe_tx_queue_init(ifp, q) != 0) {
867 aprint_error_ifnet(ifp,
868 "initialization failed: cannot initialize queue\n");
869 mvxpe_tx_unlockq(sc, q);
870 return ENOBUFS;
871 }
872 mvxpe_tx_unlockq(sc, q);
873 }
874
875 /* Tx MTU Limit */
876 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
877
878 /* Check SGMII or SERDES(assume IPL/U-BOOT initialize this) */
879 reg = MVXPE_READ(sc, MVXPE_PMACC0);
880 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
881 serdes = 1;
882
883 /* Ethernet Unit Control */
884 reg = MVXPE_READ(sc, MVXPE_EUC);
885 reg |= MVXPE_EUC_POLLING;
886 MVXPE_WRITE(sc, MVXPE_EUC, reg);
887
888 /* Auto Negotiation */
889 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
890 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
891 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
892 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
893 if (serdes) {
894 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
895 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
896 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
897 }
898 MVXPE_WRITE(sc, MVXPE_PANC, reg);
899
900 /* EEE: Low Power Idle */
901 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
902 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
903 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
904
905 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
906 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
907
908 reg = MVXPE_LPIC2_MUSTSET;
909 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
910
911 /* Port MAC Control set 0 */
912 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
913 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
914 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
915 if (serdes)
916 reg |= MVXPE_PMACC0_PORTTYPE;
917 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
918
919 /* Port MAC Control set 1 is only used for loop-back test */
920
921 /* Port MAC Control set 2 */
922 reg = MVXPE_READ(sc, MVXPE_PMACC2);
923 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
924 reg |= MVXPE_PMACC2_MUSTSET;
925 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
926
927 /* Port MAC Control set 3 is used for IPG tune */
928
929 /* Port MAC Control set 4 is not used */
930
931 /* Port Configuration */
932 /* Use queue 0 only */
933 reg = MVXPE_READ(sc, MVXPE_PXC);
934 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK |
935 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK);
936 MVXPE_WRITE(sc, MVXPE_PXC, reg);
937
938 /* Port Configuration Extended: enable Tx CRC generation */
939 reg = MVXPE_READ(sc, MVXPE_PXCX);
940 reg &= ~MVXPE_PXCX_TXCRCDIS;
941 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
942
943 /* clear MIB counter registers(clear by read) */
944 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
945 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
946
947 /* Set SDC register except IPGINT bits */
948 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
949 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
950 reg |= MVXPE_SDC_BLMR;
951 reg |= MVXPE_SDC_BLMT;
952 MVXPE_WRITE(sc, MVXPE_SDC, reg);
953
954 return 0;
955 }
956
957 /*
958 * Descriptor Ring Controls for each of queues
959 */
960 STATIC void *
961 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
962 {
963 bus_dma_segment_t segs;
964 void *kva = NULL;
965 int nsegs;
966
967 /*
968 * Allocate the descriptor queues.
969 * struct mvxpe_ring_data contians array of descriptor per queue.
970 */
971 if (bus_dmamem_alloc(sc->sc_dmat,
972 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
973 aprint_error_dev(sc->sc_dev,
974 "can't alloc device memory (%zu bytes)\n", size);
975 return NULL;
976 }
977 if (bus_dmamem_map(sc->sc_dmat,
978 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
979 aprint_error_dev(sc->sc_dev,
980 "can't map dma buffers (%zu bytes)\n", size);
981 goto fail1;
982 }
983
984 if (bus_dmamap_create(sc->sc_dmat,
985 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
986 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
987 goto fail2;
988 }
989 if (bus_dmamap_load(sc->sc_dmat,
990 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
991 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
992 goto fail3;
993 }
994 memset(kva, 0, size);
995 return kva;
996
997 fail3:
998 bus_dmamap_destroy(sc->sc_dmat, *map);
999 memset(map, 0, sizeof(*map));
1000 fail2:
1001 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1002 fail1:
1003 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
1004 return NULL;
1005 }
1006
1007 STATIC int
1008 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
1009 {
1010 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1011 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1012
1013 /*
1014 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1015 * queue length. real queue length is limited by
1016 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1017 *
1018 * because descriptor ring reallocation needs reprogramming of
1019 * DMA registers, we allocate enough descriptor for hard limit
1020 * of queue length.
1021 */
1022 rx->rx_descriptors =
1023 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1024 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1025 if (rx->rx_descriptors == NULL)
1026 goto fail;
1027
1028 tx->tx_descriptors =
1029 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1030 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1031 if (tx->tx_descriptors == NULL)
1032 goto fail;
1033
1034 return 0;
1035 fail:
1036 mvxpe_ring_dealloc_queue(sc, q);
1037 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1038 return ENOMEM;
1039 }
1040
1041 STATIC void
1042 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1043 {
1044 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1045 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1046 bus_dma_segment_t *segs;
1047 bus_size_t size;
1048 void *kva;
1049 int nsegs;
1050
1051 /* Rx */
1052 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1053 if (kva) {
1054 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1055 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1056 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1057
1058 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1059 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1060 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1061 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1062 }
1063
1064 /* Tx */
1065 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1066 if (kva) {
1067 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1068 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1069 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1070
1071 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1072 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1073 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1074 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1075 }
1076
1077 /* Clear doungling pointers all */
1078 memset(rx, 0, sizeof(*rx));
1079 memset(tx, 0, sizeof(*tx));
1080 }
1081
1082 STATIC void
1083 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1084 {
1085 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1086 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1087 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1088 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1089 static const int rx_default_queue_len[] = {
1090 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1091 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1092 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1093 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1094 };
1095 static const int tx_default_queue_len[] = {
1096 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1097 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1098 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1099 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1100 };
1101 extern uint32_t mvTclk;
1102 int i;
1103
1104 /* Rx handle */
1105 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1106 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1107 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1108 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1109 }
1110 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1111 rx->rx_dma = rx->rx_cpu = 0;
1112 rx->rx_queue_len = rx_default_queue_len[q];
1113 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1114 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1115 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1116 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1117 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1118
1119 /* Tx handle */
1120 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1121 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1122 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1123 MVXPE_TX_MBUF(sc, q, i) = NULL;
1124 /* Tx handle needs DMA map for busdma_load_mbuf() */
1125 if (bus_dmamap_create(sc->sc_dmat,
1126 mvxpbm_chunk_size(sc->sc_bm),
1127 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1128 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1129 &MVXPE_TX_MAP(sc, q, i))) {
1130 aprint_error_dev(sc->sc_dev,
1131 "can't create dma map (tx ring %d)\n", i);
1132 }
1133 }
1134 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1135 tx->tx_dma = tx->tx_cpu = 0;
1136 tx->tx_queue_len = tx_default_queue_len[q];
1137 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1138 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1139 tx->tx_used = 0;
1140 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1141 }
1142
1143 STATIC void
1144 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1145 {
1146 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1147 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1148 struct mbuf *m;
1149 int i;
1150
1151 KASSERT_RX_MTX(sc, q);
1152 KASSERT_TX_MTX(sc, q);
1153
1154 /* Rx handle */
1155 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1156 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1157 continue;
1158 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1159 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1160 }
1161 rx->rx_dma = rx->rx_cpu = 0;
1162
1163 /* Tx handle */
1164 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1165 m = MVXPE_TX_MBUF(sc, q, i);
1166 if (m == NULL)
1167 continue;
1168 MVXPE_TX_MBUF(sc, q, i) = NULL;
1169 bus_dmamap_sync(sc->sc_dmat,
1170 MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len,
1171 BUS_DMASYNC_POSTWRITE);
1172 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1173 m_freem(m);
1174 }
1175 tx->tx_dma = tx->tx_cpu = 0;
1176 tx->tx_used = 0;
1177 }
1178
1179 STATIC void
1180 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1181 {
1182 int wrap;
1183
1184 KASSERT_RX_MTX(sc, q);
1185 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1186 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1187
1188 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1189 if (wrap > 0) {
1190 count -= wrap;
1191 KASSERT(count > 0);
1192 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1193 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1194 }
1195 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1196 MVXPE_RX_DESC_OFF(sc, q, idx),
1197 sizeof(struct mvxpe_rx_desc) * count, ops);
1198 }
1199
1200 STATIC void
1201 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1202 {
1203 int wrap = 0;
1204
1205 KASSERT_TX_MTX(sc, q);
1206 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1207 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1208
1209 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1210 if (wrap > 0) {
1211 count -= wrap;
1212 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1213 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1214 }
1215 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1216 MVXPE_TX_DESC_OFF(sc, q, idx),
1217 sizeof(struct mvxpe_tx_desc) * count, ops);
1218 }
1219
1220 /*
1221 * Rx/Tx Queue Control
1222 */
1223 STATIC int
1224 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1225 {
1226 struct mvxpe_softc *sc = ifp->if_softc;
1227 uint32_t reg;
1228
1229 KASSERT_RX_MTX(sc, q);
1230 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1231
1232 /* descriptor address */
1233 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1234
1235 /* Rx buffer size and descriptor ring size */
1236 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1237 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1238 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1239 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1240 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1241
1242 /* Rx packet offset address */
1243 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1244 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1245 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1246 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1247
1248 /* Rx DMA SNOOP */
1249 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1250 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1251 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1252
1253 /* if DMA is not working, register is not updated */
1254 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1255 return 0;
1256 }
1257
1258 STATIC int
1259 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1260 {
1261 struct mvxpe_softc *sc = ifp->if_softc;
1262 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1263 uint32_t reg;
1264
1265 KASSERT_TX_MTX(sc, q);
1266 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1267
1268 /* descriptor address */
1269 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1270
1271 /* Tx threshold, and descriptor ring size */
1272 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1273 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1274 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1275 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1276 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1277
1278 /* if DMA is not working, register is not updated */
1279 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1280 return 0;
1281 }
1282
1283 STATIC int
1284 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1285 {
1286 struct mvxpe_softc *sc = ifp->if_softc;
1287 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1288 uint32_t reg;
1289
1290 KASSERT_RX_MTX(sc, q);
1291
1292 /* Set Rx interrupt threshold */
1293 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1294 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1295 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1296
1297 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1298 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1299
1300 /* Unmask RXTX_TH Intr. */
1301 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1302 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1303 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1304 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1305
1306 /* Enable Rx queue */
1307 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1308 reg |= MVXPE_RQC_ENQ(q);
1309 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1310
1311 return 0;
1312 }
1313
1314 STATIC int
1315 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1316 {
1317 struct mvxpe_softc *sc = ifp->if_softc;
1318 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1319 uint32_t reg;
1320
1321 KASSERT_TX_MTX(sc, q);
1322
1323 /* Set Tx interrupt threshold */
1324 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1325 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1326 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1327 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1328
1329 /* Unmask RXTX_TH Intr. */
1330 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1331 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1332 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1333
1334 /* Don't update MVXPE_TQC here, there is no packet yet. */
1335 return 0;
1336 }
1337
1338 STATIC void
1339 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1340 {
1341 KASSERT(q >= 0);
1342 KASSERT(q < MVXPE_QUEUE_SIZE);
1343 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1344 }
1345
1346 STATIC void
1347 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1348 {
1349 KASSERT(q >= 0);
1350 KASSERT(q < MVXPE_QUEUE_SIZE);
1351 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1352 }
1353
1354 STATIC void
1355 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1356 {
1357 KASSERT(q >= 0);
1358 KASSERT(q < MVXPE_QUEUE_SIZE);
1359 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1360 }
1361
1362 STATIC void
1363 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1364 {
1365 KASSERT(q >= 0);
1366 KASSERT(q < MVXPE_QUEUE_SIZE);
1367 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1368 }
1369
1370 /*
1371 * Interrupt Handlers
1372 */
1373 STATIC void
1374 mvxpe_disable_intr(struct mvxpe_softc *sc)
1375 {
1376 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1377 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1378 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1379 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1380 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1381 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1382 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1383 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1384 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1385 }
1386
1387 STATIC void
1388 mvxpe_enable_intr(struct mvxpe_softc *sc)
1389 {
1390 uint32_t reg;
1391
1392 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1393 reg = MVXPE_READ(sc, MVXPE_PMIM);
1394 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1395 reg |= MVXPE_PMI_LINKCHANGE;
1396 reg |= MVXPE_PMI_IAE;
1397 reg |= MVXPE_PMI_RXOVERRUN;
1398 reg |= MVXPE_PMI_RXCRCERROR;
1399 reg |= MVXPE_PMI_RXLARGEPACKET;
1400 reg |= MVXPE_PMI_TXUNDRN;
1401 #if 0
1402 /*
1403 * The device may raise false interrupts for SERDES even if the device
1404 * is not configured to use SERDES connection.
1405 */
1406 reg |= MVXPE_PMI_PRBSERROR;
1407 reg |= MVXPE_PMI_SRSE;
1408 #else
1409 reg &= ~MVXPE_PMI_PRBSERROR;
1410 reg &= ~MVXPE_PMI_SRSE;
1411 #endif
1412 reg |= MVXPE_PMI_TREQ_MASK;
1413 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1414
1415 /* Enable Summary Bit to check all interrupt cause. */
1416 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1417 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1418 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1419 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1420 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1421
1422 /* Enable All Queue Interrupt */
1423 reg = MVXPE_READ(sc, MVXPE_PIE);
1424 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1425 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1426 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1427 }
1428
1429 STATIC int
1430 mvxpe_rxtxth_intr(void *arg)
1431 {
1432 struct mvxpe_softc *sc = arg;
1433 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1434 uint32_t ic, queues, datum = 0;
1435
1436 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1437 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1438
1439 mvxpe_sc_lock(sc);
1440 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1441 if (ic == 0) {
1442 mvxpe_sc_unlock(sc);
1443 return 0;
1444 }
1445 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1446 datum = datum ^ ic;
1447
1448 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1449
1450 /* ack maintance interrupt first */
1451 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1452 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1453 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1454 }
1455 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1456 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1457 mvxpe_misc_intr(sc);
1458 }
1459 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1460 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1461 mvxpe_rxtx_intr(sc);
1462 }
1463 if (!(ifp->if_flags & IFF_RUNNING)) {
1464 mvxpe_sc_unlock(sc);
1465 return 1;
1466 }
1467
1468 /* RxTxTH interrupt */
1469 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1470 if (queues) {
1471 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1472 mvxpe_rx(sc, queues);
1473 }
1474 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1475 if (queues) {
1476 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1477 mvxpe_tx_complete(sc, queues);
1478 }
1479 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1480 if (queues) {
1481 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1482 mvxpe_rx_refill(sc, queues);
1483 }
1484 mvxpe_sc_unlock(sc);
1485
1486 if_schedule_deferred_start(ifp);
1487
1488 rnd_add_uint32(&sc->sc_rnd_source, datum);
1489
1490 return 1;
1491 }
1492
1493 STATIC int
1494 mvxpe_misc_intr(void *arg)
1495 {
1496 struct mvxpe_softc *sc = arg;
1497 #ifdef MVXPE_DEBUG
1498 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1499 #endif
1500 uint32_t ic;
1501 uint32_t datum = 0;
1502 int claimed = 0;
1503
1504 DPRINTSC(sc, 2, "got MISC_INTR\n");
1505 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1506
1507 KASSERT_SC_MTX(sc);
1508
1509 for (;;) {
1510 ic = MVXPE_READ(sc, MVXPE_PMIC);
1511 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1512 if (ic == 0)
1513 break;
1514 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1515 datum = datum ^ ic;
1516 claimed = 1;
1517
1518 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1519 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1520 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1521 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1522 }
1523 if (ic & MVXPE_PMI_LINKCHANGE) {
1524 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1525 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1526 mvxpe_linkupdate(sc);
1527 }
1528 if (ic & MVXPE_PMI_IAE) {
1529 DPRINTIFNET(ifp, 2, "+IAE\n");
1530 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1531 }
1532 if (ic & MVXPE_PMI_RXOVERRUN) {
1533 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1534 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1535 }
1536 if (ic & MVXPE_PMI_RXCRCERROR) {
1537 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1538 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1539 }
1540 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1541 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1542 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1543 }
1544 if (ic & MVXPE_PMI_TXUNDRN) {
1545 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1546 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1547 }
1548 if (ic & MVXPE_PMI_PRBSERROR) {
1549 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1550 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1551 }
1552 if (ic & MVXPE_PMI_TREQ_MASK) {
1553 DPRINTIFNET(ifp, 2, "+TREQ\n");
1554 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1555 }
1556 }
1557 if (datum)
1558 rnd_add_uint32(&sc->sc_rnd_source, datum);
1559
1560 return claimed;
1561 }
1562
1563 STATIC int
1564 mvxpe_rxtx_intr(void *arg)
1565 {
1566 struct mvxpe_softc *sc = arg;
1567 #ifdef MVXPE_DEBUG
1568 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1569 #endif
1570 uint32_t datum = 0;
1571 uint32_t prxtxic;
1572 int claimed = 0;
1573
1574 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1575 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1576
1577 KASSERT_SC_MTX(sc);
1578
1579 for (;;) {
1580 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1581 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1582 if (prxtxic == 0)
1583 break;
1584 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1585 datum = datum ^ prxtxic;
1586 claimed = 1;
1587
1588 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1589
1590 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1591 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1592 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1593 }
1594 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1595 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1596 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1597 }
1598 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1599 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1600 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1601 }
1602 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1603 DPRINTIFNET(ifp, 1, "PRXTXTHIC Summary\n");
1604 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1605 }
1606 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1607 DPRINTIFNET(ifp, 1, "PTXERROR Summary\n");
1608 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1609 }
1610 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1611 DPRINTIFNET(ifp, 1, "PMISCIC Summary\n");
1612 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1613 }
1614 }
1615 if (datum)
1616 rnd_add_uint32(&sc->sc_rnd_source, datum);
1617
1618 return claimed;
1619 }
1620
1621 STATIC void
1622 mvxpe_tick(void *arg)
1623 {
1624 struct mvxpe_softc *sc = arg;
1625 struct mii_data *mii = &sc->sc_mii;
1626
1627 mvxpe_sc_lock(sc);
1628
1629 mii_tick(mii);
1630 mii_pollstat(&sc->sc_mii);
1631
1632 /* read mib registers(clear by read) */
1633 mvxpe_update_mib(sc);
1634
1635 /* read counter registers(clear by read) */
1636 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1637 MVXPE_READ(sc, MVXPE_PDFC));
1638 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1639 MVXPE_READ(sc, MVXPE_POFC));
1640 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1641 MVXPE_READ(sc, MVXPE_TXBADFCS));
1642 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1643 MVXPE_READ(sc, MVXPE_TXDROPPED));
1644 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1645 MVXPE_READ(sc, MVXPE_LPIC));
1646
1647 mvxpe_sc_unlock(sc);
1648
1649 callout_schedule(&sc->sc_tick_ch, hz);
1650 }
1651
1652
1653 /*
1654 * struct ifnet and mii callbacks
1655 */
1656 STATIC void
1657 mvxpe_start(struct ifnet *ifp)
1658 {
1659 struct mvxpe_softc *sc = ifp->if_softc;
1660 struct mbuf *m;
1661 int q;
1662
1663 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
1664 DPRINTIFNET(ifp, 1, "not running\n");
1665 return;
1666 }
1667
1668 mvxpe_sc_lock(sc);
1669 if (!MVXPE_IS_LINKUP(sc)) {
1670 /* If Link is DOWN, can't start TX */
1671 DPRINTIFNET(ifp, 1, "link fail\n");
1672 for (;;) {
1673 /*
1674 * discard stale packets all.
1675 * these may confuse DAD, ARP or timer based protocols.
1676 */
1677 IFQ_DEQUEUE(&ifp->if_snd, m);
1678 if (m == NULL)
1679 break;
1680 m_freem(m);
1681 }
1682 mvxpe_sc_unlock(sc);
1683 return;
1684 }
1685 for (;;) {
1686 /*
1687 * don't use IFQ_POLL().
1688 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1689 * on SMP enabled networking stack.
1690 */
1691 IFQ_DEQUEUE(&ifp->if_snd, m);
1692 if (m == NULL)
1693 break;
1694
1695 q = mvxpe_tx_queue_select(sc, m);
1696 if (q < 0)
1697 break;
1698 /* mutex is held in mvxpe_tx_queue_select() */
1699
1700 if (mvxpe_tx_queue(sc, m, q) != 0) {
1701 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1702 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1703 mvxpe_tx_unlockq(sc, q);
1704 break;
1705 }
1706 mvxpe_tx_unlockq(sc, q);
1707 KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1708 KASSERT(sc->sc_tx_ring[q].tx_used <=
1709 sc->sc_tx_ring[q].tx_queue_len);
1710 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1711 sc->sc_tx_pending++;
1712 if_statinc(ifp, if_opackets);
1713 ifp->if_timer = 1;
1714 sc->sc_wdogsoft = 1;
1715 bpf_mtap(ifp, m, BPF_D_OUT);
1716 }
1717 mvxpe_sc_unlock(sc);
1718
1719 return;
1720 }
1721
1722 STATIC int
1723 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1724 {
1725 struct mvxpe_softc *sc = ifp->if_softc;
1726 int error = 0;
1727
1728 switch (cmd) {
1729 default:
1730 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1731 error = ether_ioctl(ifp, cmd, data);
1732 if (error == ENETRESET) {
1733 if (ifp->if_flags & IFF_RUNNING) {
1734 mvxpe_sc_lock(sc);
1735 mvxpe_filter_setup(sc);
1736 mvxpe_sc_unlock(sc);
1737 }
1738 error = 0;
1739 }
1740 break;
1741 }
1742
1743 return error;
1744 }
1745
1746 STATIC int
1747 mvxpe_init(struct ifnet *ifp)
1748 {
1749 struct mvxpe_softc *sc = ifp->if_softc;
1750 struct mii_data *mii = &sc->sc_mii;
1751 uint32_t reg;
1752 int q;
1753
1754 mvxpe_sc_lock(sc);
1755
1756 /* Start DMA Engine */
1757 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1758 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1759 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1760
1761 /* Enable port */
1762 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1763 reg |= MVXPE_PMACC0_PORTEN;
1764 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1765
1766 /* Link up */
1767 mvxpe_linkup(sc);
1768
1769 /* Enable All Queue and interrupt of each Queue */
1770 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1771 mvxpe_rx_lockq(sc, q);
1772 mvxpe_rx_queue_enable(ifp, q);
1773 mvxpe_rx_queue_refill(sc, q);
1774 mvxpe_rx_unlockq(sc, q);
1775
1776 mvxpe_tx_lockq(sc, q);
1777 mvxpe_tx_queue_enable(ifp, q);
1778 mvxpe_tx_unlockq(sc, q);
1779 }
1780
1781 /* Enable interrupt */
1782 mvxpe_enable_intr(sc);
1783
1784 /* Set Counter */
1785 callout_schedule(&sc->sc_tick_ch, hz);
1786
1787 /* Media check */
1788 mii_mediachg(mii);
1789
1790 ifp->if_flags |= IFF_RUNNING;
1791 ifp->if_flags &= ~IFF_OACTIVE;
1792
1793 mvxpe_sc_unlock(sc);
1794 return 0;
1795 }
1796
1797 /* ARGSUSED */
1798 STATIC void
1799 mvxpe_stop(struct ifnet *ifp, int disable)
1800 {
1801 struct mvxpe_softc *sc = ifp->if_softc;
1802 uint32_t reg;
1803 int q, cnt;
1804
1805 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1806
1807 mvxpe_sc_lock(sc);
1808
1809 callout_stop(&sc->sc_tick_ch);
1810
1811 /* Link down */
1812 mvxpe_linkdown(sc);
1813
1814 /* Disable Rx interrupt */
1815 reg = MVXPE_READ(sc, MVXPE_PIE);
1816 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1817 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1818
1819 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1820 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1821 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1822 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1823
1824 /* Wait for all Rx activity to terminate. */
1825 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1826 reg = MVXPE_RQC_DIS(reg);
1827 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1828 cnt = 0;
1829 do {
1830 if (cnt >= RX_DISABLE_TIMEOUT) {
1831 aprint_error_ifnet(ifp,
1832 "timeout for RX stopped. rqc 0x%x\n", reg);
1833 break;
1834 }
1835 cnt++;
1836 reg = MVXPE_READ(sc, MVXPE_RQC);
1837 } while (reg & MVXPE_RQC_EN_MASK);
1838
1839 /* Wait for all Tx activety to terminate. */
1840 reg = MVXPE_READ(sc, MVXPE_PIE);
1841 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1842 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1843
1844 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1845 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1846 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1847
1848 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1849 reg = MVXPE_TQC_DIS(reg);
1850 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1851 cnt = 0;
1852 do {
1853 if (cnt >= TX_DISABLE_TIMEOUT) {
1854 aprint_error_ifnet(ifp,
1855 "timeout for TX stopped. tqc 0x%x\n", reg);
1856 break;
1857 }
1858 cnt++;
1859 reg = MVXPE_READ(sc, MVXPE_TQC);
1860 } while (reg & MVXPE_TQC_EN_MASK);
1861
1862 /* Wait for all Tx FIFO is empty */
1863 cnt = 0;
1864 do {
1865 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1866 aprint_error_ifnet(ifp,
1867 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1868 break;
1869 }
1870 cnt++;
1871 reg = MVXPE_READ(sc, MVXPE_PS0);
1872 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1873
1874 /* Reset the MAC Port Enable bit */
1875 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1876 reg &= ~MVXPE_PMACC0_PORTEN;
1877 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1878
1879 /* Disable each of queue */
1880 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1881 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1882
1883 mvxpe_rx_lockq(sc, q);
1884 mvxpe_tx_lockq(sc, q);
1885
1886 /* Disable Rx packet buffer refill request */
1887 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1888 reg |= MVXPE_PRXDQTH_NODT(0);
1889 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1890
1891 if (disable) {
1892 /*
1893 * Hold Reset state of DMA Engine
1894 * (must write 0x0 to restart it)
1895 */
1896 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1897 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1898 mvxpe_ring_flush_queue(sc, q);
1899 }
1900
1901 mvxpe_tx_unlockq(sc, q);
1902 mvxpe_rx_unlockq(sc, q);
1903 }
1904
1905 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1906
1907 mvxpe_sc_unlock(sc);
1908 }
1909
1910 STATIC void
1911 mvxpe_watchdog(struct ifnet *ifp)
1912 {
1913 struct mvxpe_softc *sc = ifp->if_softc;
1914 int q;
1915
1916 mvxpe_sc_lock(sc);
1917
1918 /*
1919 * Reclaim first as there is a possibility of losing Tx completion
1920 * interrupts.
1921 */
1922 mvxpe_tx_complete(sc, 0xff);
1923 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1924 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1925
1926 if (tx->tx_dma != tx->tx_cpu) {
1927 if (sc->sc_wdogsoft) {
1928 /*
1929 * There is race condition between CPU and DMA
1930 * engine. When DMA engine encounters queue end,
1931 * it clears MVXPE_TQC_ENQ bit.
1932 * XXX: how about enhanced mode?
1933 */
1934 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1935 ifp->if_timer = 5;
1936 sc->sc_wdogsoft = 0;
1937 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1938 } else {
1939 aprint_error_ifnet(ifp, "watchdog timeout\n");
1940 if_statinc(ifp, if_oerrors);
1941 mvxpe_linkreset(sc);
1942 mvxpe_sc_unlock(sc);
1943
1944 /* trigger reinitialize sequence */
1945 mvxpe_stop(ifp, 1);
1946 mvxpe_init(ifp);
1947
1948 mvxpe_sc_lock(sc);
1949 }
1950 }
1951 }
1952 mvxpe_sc_unlock(sc);
1953 }
1954
1955 STATIC int
1956 mvxpe_ifflags_cb(struct ethercom *ec)
1957 {
1958 struct ifnet *ifp = &ec->ec_if;
1959 struct mvxpe_softc *sc = ifp->if_softc;
1960 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1961
1962 mvxpe_sc_lock(sc);
1963
1964 if (change != 0)
1965 sc->sc_if_flags = ifp->if_flags;
1966
1967 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1968 mvxpe_sc_unlock(sc);
1969 return ENETRESET;
1970 }
1971
1972 if ((change & IFF_PROMISC) != 0)
1973 mvxpe_filter_setup(sc);
1974
1975 if ((change & IFF_UP) != 0)
1976 mvxpe_linkreset(sc);
1977
1978 mvxpe_sc_unlock(sc);
1979 return 0;
1980 }
1981
1982 STATIC int
1983 mvxpe_mediachange(struct ifnet *ifp)
1984 {
1985 return ether_mediachange(ifp);
1986 }
1987
1988 STATIC void
1989 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1990 {
1991 ether_mediastatus(ifp, ifmr);
1992 }
1993
1994 /*
1995 * Link State Notify
1996 */
1997 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
1998 {
1999 int linkup; /* bool */
2000
2001 KASSERT_SC_MTX(sc);
2002
2003 /* tell miibus */
2004 mii_pollstat(&sc->sc_mii);
2005
2006 /* syslog */
2007 linkup = MVXPE_IS_LINKUP(sc);
2008 if (sc->sc_linkstate == linkup)
2009 return;
2010
2011 #ifdef DEBUG
2012 log(LOG_DEBUG,
2013 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2014 #endif
2015 if (linkup)
2016 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2017 else
2018 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2019
2020 sc->sc_linkstate = linkup;
2021 }
2022
2023 STATIC void
2024 mvxpe_linkup(struct mvxpe_softc *sc)
2025 {
2026 uint32_t reg;
2027
2028 KASSERT_SC_MTX(sc);
2029
2030 /* set EEE parameters */
2031 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2032 if (sc->sc_cf.cf_lpi)
2033 reg |= MVXPE_LPIC1_LPIRE;
2034 else
2035 reg &= ~MVXPE_LPIC1_LPIRE;
2036 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2037
2038 /* set auto-negotiation parameters */
2039 reg = MVXPE_READ(sc, MVXPE_PANC);
2040 if (sc->sc_cf.cf_fc) {
2041 /* flow control negotiation */
2042 reg |= MVXPE_PANC_PAUSEADV;
2043 reg |= MVXPE_PANC_ANFCEN;
2044 }
2045 else {
2046 reg &= ~MVXPE_PANC_PAUSEADV;
2047 reg &= ~MVXPE_PANC_ANFCEN;
2048 }
2049 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2050 reg &= ~MVXPE_PANC_FORCELINKPASS;
2051 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2052
2053 mii_mediachg(&sc->sc_mii);
2054 }
2055
2056 STATIC void
2057 mvxpe_linkdown(struct mvxpe_softc *sc)
2058 {
2059 struct mii_softc *mii;
2060 uint32_t reg;
2061
2062 KASSERT_SC_MTX(sc);
2063 return;
2064
2065 reg = MVXPE_READ(sc, MVXPE_PANC);
2066 reg |= MVXPE_PANC_FORCELINKFAIL;
2067 reg &= MVXPE_PANC_FORCELINKPASS;
2068 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2069
2070 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2071 if (mii)
2072 mii_phy_down(mii);
2073 }
2074
2075 STATIC void
2076 mvxpe_linkreset(struct mvxpe_softc *sc)
2077 {
2078 struct mii_softc *mii;
2079
2080 KASSERT_SC_MTX(sc);
2081
2082 /* force reset PHY first */
2083 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2084 if (mii)
2085 mii_phy_reset(mii);
2086
2087 /* reinit MAC and PHY */
2088 mvxpe_linkdown(sc);
2089 if ((sc->sc_if_flags & IFF_UP) != 0)
2090 mvxpe_linkup(sc);
2091 }
2092
2093 /*
2094 * Tx Subroutines
2095 */
2096 STATIC int
2097 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2098 {
2099 int q = 0;
2100
2101 /* XXX: get attribute from ALTQ framework? */
2102 mvxpe_tx_lockq(sc, q);
2103 return 0;
2104 }
2105
2106 STATIC int
2107 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2108 {
2109 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2110 bus_dma_segment_t *txsegs;
2111 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2112 struct mvxpe_tx_desc *t = NULL;
2113 uint32_t ptxsu;
2114 int txnsegs;
2115 int start, used;
2116 int i;
2117
2118 KASSERT_TX_MTX(sc, q);
2119 KASSERT(tx->tx_used >= 0);
2120 KASSERT(tx->tx_used <= tx->tx_queue_len);
2121
2122 /* load mbuf using dmamap of 1st descriptor */
2123 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2124 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2125 m_freem(m);
2126 return ENOBUFS;
2127 }
2128 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2129 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2130 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2131 /* we have no enough descriptors or mbuf is broken */
2132 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2133 m_freem(m);
2134 return ENOBUFS;
2135 }
2136 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2137 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2138
2139 /* remember mbuf using 1st descriptor */
2140 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2141 bus_dmamap_sync(sc->sc_dmat,
2142 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2143 BUS_DMASYNC_PREWRITE);
2144
2145 /* load to tx descriptors */
2146 start = tx->tx_cpu;
2147 used = 0;
2148 for (i = 0; i < txnsegs; i++) {
2149 if (__predict_false(txsegs[i].ds_len == 0))
2150 continue;
2151 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2152 t->command = 0;
2153 t->l4ichk = 0;
2154 t->flags = 0;
2155 if (i == 0) {
2156 /* 1st descriptor */
2157 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2158 t->command |= MVXPE_TX_CMD_PADDING;
2159 t->command |= MVXPE_TX_CMD_F;
2160 mvxpe_tx_set_csumflag(ifp, t, m);
2161 }
2162 t->bufptr = txsegs[i].ds_addr;
2163 t->bytecnt = txsegs[i].ds_len;
2164 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2165 tx->tx_used++;
2166 used++;
2167 }
2168 /* t is last descriptor here */
2169 KASSERT(t != NULL);
2170 t->command |= MVXPE_TX_CMD_L;
2171
2172 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2173 #ifdef MVXPE_DEBUG
2174 if (mvxpe_debug > 2)
2175 for (i = start; i <= tx->tx_cpu; i++) {
2176 t = MVXPE_TX_DESC(sc, q, i);
2177 mvxpe_dump_txdesc(t, i);
2178 }
2179 #endif
2180 mvxpe_ring_sync_tx(sc, q, start, used,
2181 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2182
2183 while (used > 255) {
2184 ptxsu = MVXPE_PTXSU_NOWD(255);
2185 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2186 used -= 255;
2187 }
2188 if (used > 0) {
2189 ptxsu = MVXPE_PTXSU_NOWD(used);
2190 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2191 }
2192 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2193
2194 DPRINTSC(sc, 2,
2195 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2196 DPRINTSC(sc, 2,
2197 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2198 DPRINTSC(sc, 2,
2199 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2200 DPRINTSC(sc, 2,
2201 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2202 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2203 DPRINTIFNET(ifp, 2,
2204 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2205 tx->tx_cpu, tx->tx_dma, tx->tx_used);
2206 return 0;
2207 }
2208
2209 STATIC void
2210 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2211 struct mvxpe_tx_desc *t, struct mbuf *m)
2212 {
2213 struct ether_header *eh;
2214 int csum_flags;
2215 uint32_t iphl = 0, ipoff = 0;
2216
2217 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2218
2219 eh = mtod(m, struct ether_header *);
2220 switch (htons(eh->ether_type)) {
2221 case ETHERTYPE_IP:
2222 case ETHERTYPE_IPV6:
2223 ipoff = ETHER_HDR_LEN;
2224 break;
2225 case ETHERTYPE_VLAN:
2226 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227 break;
2228 }
2229
2230 if (csum_flags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2231 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2232 t->command |= MVXPE_TX_CMD_L3_IP4;
2233 }
2234 else if (csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2235 iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2236 t->command |= MVXPE_TX_CMD_L3_IP6;
2237 }
2238 else {
2239 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2240 return;
2241 }
2242
2243
2244 /* L3 */
2245 if (csum_flags & M_CSUM_IPv4) {
2246 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2247 }
2248
2249 /* L4 */
2250 if ((csum_flags &
2251 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0) {
2252 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2253 }
2254 else if (csum_flags & M_CSUM_TCPv4) {
2255 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2256 t->command |= MVXPE_TX_CMD_L4_TCP;
2257 }
2258 else if (csum_flags & M_CSUM_UDPv4) {
2259 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2260 t->command |= MVXPE_TX_CMD_L4_UDP;
2261 }
2262 else if (csum_flags & M_CSUM_TCPv6) {
2263 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2264 t->command |= MVXPE_TX_CMD_L4_TCP;
2265 }
2266 else if (csum_flags & M_CSUM_UDPv6) {
2267 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2268 t->command |= MVXPE_TX_CMD_L4_UDP;
2269 }
2270
2271 t->l4ichk = 0;
2272 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2273 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2274 }
2275
2276 STATIC void
2277 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2278 {
2279 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2280 int q;
2281
2282 DPRINTSC(sc, 2, "tx completed.\n");
2283
2284 KASSERT_SC_MTX(sc);
2285
2286 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2287 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2288 continue;
2289 mvxpe_tx_lockq(sc, q);
2290 mvxpe_tx_queue_complete(sc, q);
2291 mvxpe_tx_unlockq(sc, q);
2292 }
2293 KASSERT(sc->sc_tx_pending >= 0);
2294 if (sc->sc_tx_pending == 0)
2295 ifp->if_timer = 0;
2296 }
2297
2298 STATIC void
2299 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2300 {
2301 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2302 struct mvxpe_tx_desc *t;
2303 struct mbuf *m;
2304 uint32_t ptxs, ptxsu, ndesc;
2305 int i;
2306
2307 KASSERT_TX_MTX(sc, q);
2308
2309 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2310 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2311 if (ndesc == 0)
2312 return;
2313
2314 DPRINTSC(sc, 2,
2315 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2316
2317 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2318 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2319
2320 for (i = 0; i < ndesc; i++) {
2321 int error = 0;
2322
2323 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2324 if (t->flags & MVXPE_TX_F_ES) {
2325 DPRINTSC(sc, 1,
2326 "tx error queue %d desc %d\n",
2327 q, tx->tx_dma);
2328 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2329 case MVXPE_TX_F_EC_LC:
2330 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2331 break;
2332 case MVXPE_TX_F_EC_UR:
2333 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2334 break;
2335 case MVXPE_TX_F_EC_RL:
2336 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2337 break;
2338 default:
2339 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2340 break;
2341 }
2342 error = 1;
2343 }
2344 m = MVXPE_TX_MBUF(sc, q, tx->tx_dma);
2345 if (m != NULL) {
2346 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2347 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2348 bus_dmamap_sync(sc->sc_dmat,
2349 MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len,
2350 BUS_DMASYNC_POSTWRITE);
2351 bus_dmamap_unload(sc->sc_dmat,
2352 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2353 m_freem(m);
2354 sc->sc_tx_pending--;
2355 }
2356 else
2357 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2358 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2359 tx->tx_used--;
2360 if (error)
2361 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2362 else
2363 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2364 }
2365 KASSERT(tx->tx_used >= 0);
2366 KASSERT(tx->tx_used <= tx->tx_queue_len);
2367 while (ndesc > 255) {
2368 ptxsu = MVXPE_PTXSU_NORB(255);
2369 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2370 ndesc -= 255;
2371 }
2372 if (ndesc > 0) {
2373 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2374 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2375 }
2376 DPRINTSC(sc, 2,
2377 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2378 q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2379 }
2380
2381 /*
2382 * Rx Subroutines
2383 */
2384 STATIC void
2385 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2386 {
2387 int q, npkt;
2388
2389 KASSERT_SC_MTX(sc);
2390
2391 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2392 /* mutex is held by rx_queue_select */
2393 mvxpe_rx_queue(sc, q, npkt);
2394 mvxpe_rx_unlockq(sc, q);
2395 }
2396 }
2397
2398 STATIC void
2399 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2400 {
2401 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2402 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2403 struct mvxpe_rx_desc *r;
2404 struct mvxpbm_chunk *chunk;
2405 struct mbuf *m;
2406 uint32_t prxsu;
2407 int error = 0;
2408 int i;
2409
2410 KASSERT_RX_MTX(sc, q);
2411
2412 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2413 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2414
2415 for (i = 0; i < npkt; i++) {
2416 /* get descriptor and packet */
2417 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2418 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2419 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2420 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2421
2422 /* check errors */
2423 if (r->status & MVXPE_RX_ES) {
2424 switch (r->status & MVXPE_RX_EC_MASK) {
2425 case MVXPE_RX_EC_CE:
2426 DPRINTIFNET(ifp, 1, "CRC error\n");
2427 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2428 break;
2429 case MVXPE_RX_EC_OR:
2430 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2431 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2432 break;
2433 case MVXPE_RX_EC_MF:
2434 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2435 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2436 break;
2437 case MVXPE_RX_EC_RE:
2438 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2439 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2440 break;
2441 }
2442 error = 1;
2443 goto rx_done;
2444 }
2445 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2446 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2447 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2448 error = 1;
2449 goto rx_done;
2450 }
2451
2452 if (chunk == NULL) {
2453 device_printf(sc->sc_dev,
2454 "got rx interrupt, but no chunk\n");
2455 error = 1;
2456 goto rx_done;
2457 }
2458
2459 /* extract packet buffer */
2460 if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2461 error = 1;
2462 goto rx_done;
2463 }
2464 m = chunk->m;
2465 m_set_rcvif(m, ifp);
2466 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2467 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2468 mvxpe_rx_set_csumflag(ifp, r, m);
2469 if_percpuq_enqueue(ifp->if_percpuq, m);
2470 chunk = NULL; /* the BM chunk goes to networking stack now */
2471 rx_done:
2472 if (chunk) {
2473 /* rx error. just return the chunk to BM. */
2474 mvxpbm_free_chunk(chunk);
2475 }
2476 if (error)
2477 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2478 else
2479 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2480 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2481 }
2482 /* DMA status update */
2483 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2484 while (npkt > 255) {
2485 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2486 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2487 npkt -= 255;
2488 }
2489 if (npkt > 0) {
2490 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2491 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2492 }
2493
2494 DPRINTSC(sc, 2,
2495 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2496 DPRINTSC(sc, 2,
2497 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2498 DPRINTSC(sc, 2,
2499 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2500 DPRINTSC(sc, 2,
2501 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2502 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2503 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2504 rx->rx_cpu, rx->rx_dma);
2505 }
2506
2507 STATIC int
2508 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2509 {
2510 uint32_t prxs, npkt;
2511 int q;
2512
2513 KASSERT_SC_MTX(sc);
2514 KASSERT(queue != NULL);
2515 DPRINTSC(sc, 2, "selecting rx queue\n");
2516
2517 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2518 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2519 continue;
2520
2521 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2522 npkt = MVXPE_PRXS_GET_ODC(prxs);
2523 if (npkt == 0)
2524 continue;
2525
2526 DPRINTSC(sc, 2,
2527 "queue %d selected: prxs=%#x, %u packet received.\n",
2528 q, prxs, npkt);
2529 *queue = q;
2530 mvxpe_rx_lockq(sc, q);
2531 return npkt;
2532 }
2533
2534 return 0;
2535 }
2536
2537 STATIC void
2538 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2539 {
2540 int q;
2541
2542 KASSERT_SC_MTX(sc);
2543
2544 /* XXX: check rx bit array */
2545 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2546 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2547 continue;
2548
2549 mvxpe_rx_lockq(sc, q);
2550 mvxpe_rx_queue_refill(sc, q);
2551 mvxpe_rx_unlockq(sc, q);
2552 }
2553 }
2554
2555 STATIC void
2556 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2557 {
2558 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2559 uint32_t prxs, prxsu, ndesc;
2560 int idx, refill = 0;
2561 int npkt;
2562
2563 KASSERT_RX_MTX(sc, q);
2564
2565 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2566 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2567 refill = rx->rx_queue_len - ndesc;
2568 if (refill <= 0)
2569 return;
2570 DPRINTPRXS(2, q);
2571 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2572
2573 idx = rx->rx_cpu;
2574 for (npkt = 0; npkt < refill; npkt++)
2575 if (mvxpe_rx_queue_add(sc, q) != 0)
2576 break;
2577 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2578 if (npkt == 0)
2579 return;
2580
2581 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2582 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2583
2584 while (npkt > 255) {
2585 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2586 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2587 npkt -= 255;
2588 }
2589 if (npkt > 0) {
2590 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2591 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2592 }
2593 DPRINTPRXS(2, q);
2594 return;
2595 }
2596
2597 STATIC int
2598 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2599 {
2600 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2601 struct mvxpe_rx_desc *r;
2602 struct mvxpbm_chunk *chunk = NULL;
2603
2604 KASSERT_RX_MTX(sc, q);
2605
2606 /* Allocate the packet buffer */
2607 chunk = mvxpbm_alloc(sc->sc_bm);
2608 if (chunk == NULL) {
2609 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2610 return ENOBUFS;
2611 }
2612
2613 /* Add the packet to descritor */
2614 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2615 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2616 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2617
2618 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2619 r->bufptr = chunk->buf_pa;
2620 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2621 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2622 return 0;
2623 }
2624
2625 STATIC void
2626 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2627 struct mvxpe_rx_desc *r, struct mbuf *m0)
2628 {
2629 uint32_t csum_flags = 0;
2630
2631 if ((r->status & (MVXPE_RX_IP_HEADER_OK | MVXPE_RX_L3_IP)) == 0)
2632 return; /* not a IP packet */
2633
2634 /* L3 */
2635 if (r->status & MVXPE_RX_L3_IP) {
2636 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx;
2637 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 &&
2638 (csum_flags & M_CSUM_IPv4) != 0) {
2639 csum_flags |= M_CSUM_IPv4_BAD;
2640 goto finish;
2641 }
2642 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2643 /*
2644 * r->l4chk has partial checksum of each framgment.
2645 * but there is no way to use it in NetBSD.
2646 */
2647 return;
2648 }
2649 }
2650
2651 /* L4 */
2652 switch (r->status & MVXPE_RX_L4_MASK) {
2653 case MVXPE_RX_L4_TCP:
2654 if (r->status & MVXPE_RX_L3_IP)
2655 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx;
2656 else
2657 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx;
2658 break;
2659 case MVXPE_RX_L4_UDP:
2660 if (r->status & MVXPE_RX_L3_IP)
2661 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx;
2662 else
2663 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx;
2664 break;
2665 case MVXPE_RX_L4_OTH:
2666 default:
2667 break;
2668 }
2669 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags &
2670 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0)
2671 csum_flags |= M_CSUM_TCP_UDP_BAD;
2672 finish:
2673 m0->m_pkthdr.csum_flags = csum_flags;
2674 }
2675
2676 /*
2677 * MAC address filter
2678 */
2679 STATIC uint8_t
2680 mvxpe_crc8(const uint8_t *data, size_t size)
2681 {
2682 int bit;
2683 uint8_t byte;
2684 uint8_t crc = 0;
2685 const uint8_t poly = 0x07;
2686
2687 while (size--)
2688 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2689 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2690
2691 return crc;
2692 }
2693
2694 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2695
2696 STATIC void
2697 mvxpe_filter_setup(struct mvxpe_softc *sc)
2698 {
2699 struct ethercom *ec = &sc->sc_ethercom;
2700 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2701 struct ether_multi *enm;
2702 struct ether_multistep step;
2703 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2704 uint32_t pxc;
2705 int i;
2706 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2707
2708 KASSERT_SC_MTX(sc);
2709
2710 memset(dfut, 0, sizeof(dfut));
2711 memset(dfsmt, 0, sizeof(dfsmt));
2712 memset(dfomt, 0, sizeof(dfomt));
2713
2714 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
2715 goto allmulti;
2716 }
2717
2718 ETHER_LOCK(ec);
2719 ETHER_FIRST_MULTI(step, ec, enm);
2720 while (enm != NULL) {
2721 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2722 /* ranges are complex and somewhat rare */
2723 ETHER_UNLOCK(ec);
2724 goto allmulti;
2725 }
2726 /* chip handles some IPv4 multicast specially */
2727 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2728 i = enm->enm_addrlo[5];
2729 dfsmt[i>>2] |=
2730 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2731 } else {
2732 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2733 dfomt[i>>2] |=
2734 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2735 }
2736
2737 ETHER_NEXT_MULTI(step, enm);
2738 }
2739 ETHER_UNLOCK(ec);
2740 goto set;
2741
2742 allmulti:
2743 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
2744 for (i = 0; i < MVXPE_NDFSMT; i++) {
2745 dfsmt[i] = dfomt[i] =
2746 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2747 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2748 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2749 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2750 }
2751 }
2752
2753 set:
2754 pxc = MVXPE_READ(sc, MVXPE_PXC);
2755 pxc &= ~MVXPE_PXC_UPM;
2756 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2757 if (ifp->if_flags & IFF_BROADCAST) {
2758 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2759 }
2760 if (ifp->if_flags & IFF_PROMISC) {
2761 pxc |= MVXPE_PXC_UPM;
2762 }
2763 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2764
2765 /* Set Destination Address Filter Unicast Table */
2766 if (ifp->if_flags & IFF_PROMISC) {
2767 /* pass all unicast addresses */
2768 for (i = 0; i < MVXPE_NDFUT; i++) {
2769 dfut[i] =
2770 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2771 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2772 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2773 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2774 }
2775 }
2776 else {
2777 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2778 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2779 }
2780 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2781
2782 /* Set Destination Address Filter Multicast Tables */
2783 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2784 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2785 }
2786
2787 /*
2788 * sysctl(9)
2789 */
2790 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2791 {
2792 int rc;
2793 const struct sysctlnode *node;
2794
2795 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2796 0, CTLTYPE_NODE, "mvxpe",
2797 SYSCTL_DESCR("mvxpe interface controls"),
2798 NULL, 0, NULL, 0,
2799 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2800 goto err;
2801 }
2802
2803 mvxpe_root_num = node->sysctl_num;
2804 return;
2805
2806 err:
2807 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2808 }
2809
2810 STATIC int
2811 sysctl_read_mib(SYSCTLFN_ARGS)
2812 {
2813 struct mvxpe_sysctl_mib *arg;
2814 struct mvxpe_softc *sc;
2815 struct sysctlnode node;
2816 uint64_t val;
2817 int err;
2818
2819 node = *rnode;
2820 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2821 if (arg == NULL)
2822 return EINVAL;
2823
2824 sc = arg->sc;
2825 if (sc == NULL)
2826 return EINVAL;
2827 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2828 return EINVAL;
2829
2830 mvxpe_sc_lock(sc);
2831 val = arg->counter;
2832 mvxpe_sc_unlock(sc);
2833
2834 node.sysctl_data = &val;
2835 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2836 if (err)
2837 return err;
2838 if (newp)
2839 return EINVAL;
2840
2841 return 0;
2842 }
2843
2844
2845 STATIC int
2846 sysctl_clear_mib(SYSCTLFN_ARGS)
2847 {
2848 struct mvxpe_softc *sc;
2849 struct sysctlnode node;
2850 int val;
2851 int err;
2852
2853 node = *rnode;
2854 sc = (struct mvxpe_softc *)rnode->sysctl_data;
2855 if (sc == NULL)
2856 return EINVAL;
2857
2858 val = 0;
2859 node.sysctl_data = &val;
2860 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2861 if (err || newp == NULL)
2862 return err;
2863 if (val < 0 || val > 1)
2864 return EINVAL;
2865 if (val == 1) {
2866 mvxpe_sc_lock(sc);
2867 mvxpe_clear_mib(sc);
2868 mvxpe_sc_unlock(sc);
2869 }
2870
2871 return 0;
2872 }
2873
2874 STATIC int
2875 sysctl_set_queue_length(SYSCTLFN_ARGS)
2876 {
2877 struct mvxpe_sysctl_queue *arg;
2878 struct mvxpe_rx_ring *rx = NULL;
2879 struct mvxpe_tx_ring *tx = NULL;
2880 struct mvxpe_softc *sc;
2881 struct sysctlnode node;
2882 uint32_t reg;
2883 int val;
2884 int err;
2885
2886 node = *rnode;
2887
2888 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2889 if (arg == NULL)
2890 return EINVAL;
2891 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2892 return EINVAL;
2893 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2894 return EINVAL;
2895
2896 sc = arg->sc;
2897 if (sc == NULL)
2898 return EINVAL;
2899
2900 /* read queue length */
2901 mvxpe_sc_lock(sc);
2902 switch (arg->rxtx) {
2903 case MVXPE_SYSCTL_RX:
2904 mvxpe_rx_lockq(sc, arg->queue);
2905 rx = MVXPE_RX_RING(sc, arg->queue);
2906 val = rx->rx_queue_len;
2907 mvxpe_rx_unlockq(sc, arg->queue);
2908 break;
2909 case MVXPE_SYSCTL_TX:
2910 mvxpe_tx_lockq(sc, arg->queue);
2911 tx = MVXPE_TX_RING(sc, arg->queue);
2912 val = tx->tx_queue_len;
2913 mvxpe_tx_unlockq(sc, arg->queue);
2914 break;
2915 }
2916
2917 node.sysctl_data = &val;
2918 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2919 if (err || newp == NULL) {
2920 mvxpe_sc_unlock(sc);
2921 return err;
2922 }
2923
2924 /* update queue length */
2925 if (val < 8 || val > MVXPE_RX_RING_CNT) {
2926 mvxpe_sc_unlock(sc);
2927 return EINVAL;
2928 }
2929 switch (arg->rxtx) {
2930 case MVXPE_SYSCTL_RX:
2931 mvxpe_rx_lockq(sc, arg->queue);
2932 rx->rx_queue_len = val;
2933 rx->rx_queue_th_received =
2934 rx->rx_queue_len / MVXPE_RXTH_RATIO;
2935 rx->rx_queue_th_free =
2936 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2937
2938 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2939 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2940 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2941
2942 mvxpe_rx_unlockq(sc, arg->queue);
2943 break;
2944 case MVXPE_SYSCTL_TX:
2945 mvxpe_tx_lockq(sc, arg->queue);
2946 tx->tx_queue_len = val;
2947 tx->tx_queue_th_free =
2948 tx->tx_queue_len / MVXPE_TXTH_RATIO;
2949
2950 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2951 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2952 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2953
2954 mvxpe_tx_unlockq(sc, arg->queue);
2955 break;
2956 }
2957 mvxpe_sc_unlock(sc);
2958
2959 return 0;
2960 }
2961
2962 STATIC int
2963 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2964 {
2965 struct mvxpe_sysctl_queue *arg;
2966 struct mvxpe_rx_ring *rx = NULL;
2967 struct mvxpe_softc *sc;
2968 struct sysctlnode node;
2969 extern uint32_t mvTclk;
2970 uint32_t reg, time_mvtclk;
2971 int time_us;
2972 int err;
2973
2974 node = *rnode;
2975
2976 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2977 if (arg == NULL)
2978 return EINVAL;
2979 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2980 return EINVAL;
2981 if (arg->rxtx != MVXPE_SYSCTL_RX)
2982 return EINVAL;
2983
2984 sc = arg->sc;
2985 if (sc == NULL)
2986 return EINVAL;
2987
2988 /* read queue length */
2989 mvxpe_sc_lock(sc);
2990 mvxpe_rx_lockq(sc, arg->queue);
2991 rx = MVXPE_RX_RING(sc, arg->queue);
2992 time_mvtclk = rx->rx_queue_th_time;
2993 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
2994 node.sysctl_data = &time_us;
2995 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
2996 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
2997 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2998 if (err || newp == NULL) {
2999 mvxpe_rx_unlockq(sc, arg->queue);
3000 mvxpe_sc_unlock(sc);
3001 return err;
3002 }
3003
3004 /* update queue length (0[sec] - 1[sec]) */
3005 if (time_us < 0 || time_us > (1000 * 1000)) {
3006 mvxpe_rx_unlockq(sc, arg->queue);
3007 mvxpe_sc_unlock(sc);
3008 return EINVAL;
3009 }
3010 time_mvtclk =
3011 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
3012 rx->rx_queue_th_time = time_mvtclk;
3013 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
3014 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
3015 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
3016 mvxpe_rx_unlockq(sc, arg->queue);
3017 mvxpe_sc_unlock(sc);
3018
3019 return 0;
3020 }
3021
3022
3023 STATIC void
3024 sysctl_mvxpe_init(struct mvxpe_softc *sc)
3025 {
3026 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3027 const struct sysctlnode *node;
3028 int mvxpe_nodenum;
3029 int mvxpe_mibnum;
3030 int mvxpe_rxqueuenum;
3031 int mvxpe_txqueuenum;
3032 int q, i;
3033
3034 /* hw.mvxpe.mvxpe[unit] */
3035 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3036 0, CTLTYPE_NODE, ifp->if_xname,
3037 SYSCTL_DESCR("mvxpe per-controller controls"),
3038 NULL, 0, NULL, 0,
3039 CTL_HW, mvxpe_root_num, CTL_CREATE,
3040 CTL_EOL) != 0) {
3041 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3042 return;
3043 }
3044 mvxpe_nodenum = node->sysctl_num;
3045
3046 /* hw.mvxpe.mvxpe[unit].mib */
3047 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3048 0, CTLTYPE_NODE, "mib",
3049 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3050 NULL, 0, NULL, 0,
3051 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3052 CTL_EOL) != 0) {
3053 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3054 return;
3055 }
3056 mvxpe_mibnum = node->sysctl_num;
3057
3058 /* hw.mvxpe.mvxpe[unit].rx */
3059 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3060 0, CTLTYPE_NODE, "rx",
3061 SYSCTL_DESCR("Rx Queues"),
3062 NULL, 0, NULL, 0,
3063 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3064 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3065 return;
3066 }
3067 mvxpe_rxqueuenum = node->sysctl_num;
3068
3069 /* hw.mvxpe.mvxpe[unit].tx */
3070 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3071 0, CTLTYPE_NODE, "tx",
3072 SYSCTL_DESCR("Tx Queues"),
3073 NULL, 0, NULL, 0,
3074 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3075 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3076 return;
3077 }
3078 mvxpe_txqueuenum = node->sysctl_num;
3079
3080 #ifdef MVXPE_DEBUG
3081 /* hw.mvxpe.debug */
3082 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3083 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3084 SYSCTL_DESCR("mvxpe device driver debug control"),
3085 NULL, 0, &mvxpe_debug, 0,
3086 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3087 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3088 return;
3089 }
3090 #endif
3091 /*
3092 * MIB access
3093 */
3094 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3095 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3096 const char *name = mvxpe_mib_list[i].sysctl_name;
3097 const char *desc = mvxpe_mib_list[i].desc;
3098 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3099
3100 mib_arg->sc = sc;
3101 mib_arg->index = i;
3102 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3103 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3104 sysctl_read_mib, 0, (void *)mib_arg, 0,
3105 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3106 CTL_CREATE, CTL_EOL) != 0) {
3107 aprint_normal_dev(sc->sc_dev,
3108 "couldn't create sysctl node\n");
3109 break;
3110 }
3111 }
3112
3113 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3114 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3115 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3116 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3117 static const char *sysctl_queue_names[] = {
3118 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3119 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3120 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3121 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3122 };
3123 #undef MVXPE_SYSCTL_NAME
3124 #ifdef SYSCTL_INCLUDE_DESCR
3125 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3126 static const char *sysctl_queue_descrs[] = {
3127 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1),
3128 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3),
3129 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5),
3130 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7),
3131 };
3132 #undef MVXPE_SYSCTL_DESCR
3133 #endif /* SYSCTL_INCLUDE_DESCR */
3134 int mvxpe_curnum;
3135
3136 rxarg->sc = txarg->sc = sc;
3137 rxarg->queue = txarg->queue = q;
3138 rxarg->rxtx = MVXPE_SYSCTL_RX;
3139 txarg->rxtx = MVXPE_SYSCTL_TX;
3140
3141 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3142 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3143 0, CTLTYPE_NODE,
3144 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3145 NULL, 0, NULL, 0,
3146 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3147 CTL_CREATE, CTL_EOL) != 0) {
3148 aprint_normal_dev(sc->sc_dev,
3149 "couldn't create sysctl node\n");
3150 break;
3151 }
3152 mvxpe_curnum = node->sysctl_num;
3153
3154 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3155 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3156 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3157 SYSCTL_DESCR("maximum length of the queue"),
3158 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3159 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3160 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3161 aprint_normal_dev(sc->sc_dev,
3162 "couldn't create sysctl node\n");
3163 break;
3164 }
3165
3166 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3167 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3168 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3169 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3170 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3171 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3172 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3173 aprint_normal_dev(sc->sc_dev,
3174 "couldn't create sysctl node\n");
3175 break;
3176 }
3177
3178 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3179 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3180 0, CTLTYPE_NODE,
3181 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3182 NULL, 0, NULL, 0,
3183 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3184 CTL_CREATE, CTL_EOL) != 0) {
3185 aprint_normal_dev(sc->sc_dev,
3186 "couldn't create sysctl node\n");
3187 break;
3188 }
3189 mvxpe_curnum = node->sysctl_num;
3190
3191 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3192 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3193 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3194 SYSCTL_DESCR("maximum length of the queue"),
3195 sysctl_set_queue_length, 0, (void *)txarg, 0,
3196 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3197 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3198 aprint_normal_dev(sc->sc_dev,
3199 "couldn't create sysctl node\n");
3200 break;
3201 }
3202 }
3203
3204 /* hw.mvxpe.mvxpe[unit].clear_mib */
3205 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3206 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3207 SYSCTL_DESCR("mvxpe device driver debug control"),
3208 sysctl_clear_mib, 0, (void *)sc, 0,
3209 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3210 CTL_EOL) != 0) {
3211 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3212 return;
3213 }
3214
3215 }
3216
3217 /*
3218 * MIB
3219 */
3220 STATIC void
3221 mvxpe_clear_mib(struct mvxpe_softc *sc)
3222 {
3223 int i;
3224
3225 KASSERT_SC_MTX(sc);
3226
3227 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3228 if (mvxpe_mib_list[i].reg64)
3229 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3230 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3231 sc->sc_sysctl_mib[i].counter = 0;
3232 }
3233 }
3234
3235 STATIC void
3236 mvxpe_update_mib(struct mvxpe_softc *sc)
3237 {
3238 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3239 int i;
3240
3241 KASSERT_SC_MTX(sc);
3242
3243 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3244 uint32_t val_hi;
3245 uint32_t val_lo;
3246 uint64_t val;
3247
3248 if (mvxpe_mib_list[i].reg64) {
3249 /* XXX: implement bus_space_read_8() */
3250 val_lo = MVXPE_READ_MIB(sc,
3251 (mvxpe_mib_list[i].regnum + 4));
3252 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3253 }
3254 else {
3255 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3256 val_hi = 0;
3257 }
3258
3259 if ((val_lo | val_hi) == 0)
3260 continue;
3261
3262 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3263 sc->sc_sysctl_mib[i].counter += val;
3264
3265 switch (mvxpe_mib_list[i].ext) {
3266 case MVXPE_MIBEXT_IF_OERRORS:
3267 if_statadd(ifp, if_oerrors, val);
3268 break;
3269 case MVXPE_MIBEXT_IF_IERRORS:
3270 if_statadd(ifp, if_ierrors, val);
3271 break;
3272 case MVXPE_MIBEXT_IF_COLLISIONS:
3273 if_statadd(ifp, if_collisions, val);
3274 break;
3275 default:
3276 break;
3277 }
3278
3279 }
3280 }
3281
3282 /*
3283 * for Debug
3284 */
3285 STATIC void
3286 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3287 {
3288 #define DESC_PRINT(X) \
3289 if (X) \
3290 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3291
3292 DESC_PRINT(desc->command);
3293 DESC_PRINT(desc->l4ichk);
3294 DESC_PRINT(desc->bytecnt);
3295 DESC_PRINT(desc->bufptr);
3296 DESC_PRINT(desc->flags);
3297 #undef DESC_PRINT
3298 }
3299
3300 STATIC void
3301 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3302 {
3303 #define DESC_PRINT(X) \
3304 if (X) \
3305 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3306
3307 DESC_PRINT(desc->status);
3308 DESC_PRINT(desc->bytecnt);
3309 DESC_PRINT(desc->bufptr);
3310 DESC_PRINT(desc->l4chk);
3311 #undef DESC_PRINT
3312 }
3313