if_mvxpe.c revision 1.21 1 /* $NetBSD: if_mvxpe.c,v 1.21 2019/01/22 03:42:27 msaitoh Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.21 2019/01/22 03:42:27 msaitoh Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/mvxpbmvar.h>
62 #include <dev/marvell/if_mvxpereg.h>
63 #include <dev/marvell/if_mvxpevar.h>
64
65 #include "locators.h"
66
67 #if BYTE_ORDER == BIG_ENDIAN
68 #error "BIG ENDIAN not supported"
69 #endif
70
71 #ifdef MVXPE_DEBUG
72 #define STATIC /* nothing */
73 #else
74 #define STATIC static
75 #endif
76
77 /* autoconf(9) */
78 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79 STATIC void mvxpe_attach(device_t, device_t, void *);
80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82 mvxpe_match, mvxpe_attach, NULL, NULL);
83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86 /* MII */
87 STATIC int mvxpe_miibus_readreg(device_t, int, int, uint16_t *);
88 STATIC int mvxpe_miibus_writereg(device_t, int, int, uint16_t);
89 STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91 /* Addres Decoding Window */
92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94 /* Device Register Initialization */
95 STATIC int mvxpe_initreg(struct ifnet *);
96
97 /* Descriptor Ring Control for each of queues */
98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106 /* Rx/Tx Queue Control */
107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116 /* Interrupt Handlers */
117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119 STATIC int mvxpe_rxtxth_intr(void *);
120 STATIC int mvxpe_misc_intr(void *);
121 STATIC int mvxpe_rxtx_intr(void *);
122 STATIC void mvxpe_tick(void *);
123
124 /* struct ifnet and mii callbacks*/
125 STATIC void mvxpe_start(struct ifnet *);
126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127 STATIC int mvxpe_init(struct ifnet *);
128 STATIC void mvxpe_stop(struct ifnet *, int);
129 STATIC void mvxpe_watchdog(struct ifnet *);
130 STATIC int mvxpe_ifflags_cb(struct ethercom *);
131 STATIC int mvxpe_mediachange(struct ifnet *);
132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134 /* Link State Notify */
135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136 STATIC void mvxpe_linkup(struct mvxpe_softc *);
137 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140 /* Tx Subroutines */
141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144 struct mvxpe_tx_desc *, struct mbuf *);
145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148 /* Rx Subroutines */
149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156 struct mvxpe_rx_desc *, struct mbuf *);
157
158 /* MAC address filter */
159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162 /* sysctl(9) */
163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169 /* MIB */
170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173 /* for Debug */
174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177 STATIC int mvxpe_root_num;
178 STATIC kmutex_t mii_mutex;
179 STATIC int mii_init = 0;
180 #ifdef MVXPE_DEBUG
181 STATIC int mvxpe_debug = MVXPE_DEBUG;
182 #endif
183
184 /*
185 * List of MIB register and names
186 */
187 STATIC struct mvxpe_mib_def {
188 uint32_t regnum;
189 int reg64;
190 const char *sysctl_name;
191 const char *desc;
192 int ext;
193 #define MVXPE_MIBEXT_IF_OERRORS 1
194 #define MVXPE_MIBEXT_IF_IERRORS 2
195 #define MVXPE_MIBEXT_IF_COLLISIONS 3
196 } mvxpe_mib_list[] = {
197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
198 "Good Octets Rx", 0},
199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
200 "Bad Octets Rx", 0},
201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err",
202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS},
203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
204 "Good Frames Rx", 0},
205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
206 "Bad Frames Rx", 0},
207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
208 "Broadcast Frames Rx", 0},
209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
210 "Multicast Frames Rx", 0},
211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
212 "Frame Size 1 - 64", 0},
213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
214 "Frame Size 65 - 127", 0},
215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
216 "Frame Size 128 - 255", 0},
217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
218 "Frame Size 256 - 511"},
219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
220 "Frame Size 512 - 1023", 0},
221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max",
222 "Frame Size 1024 - Max", 0},
223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
224 "Good Octets Tx", 0},
225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
226 "Good Frames Tx", 0},
227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS},
229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
230 "Multicast Frames Tx"},
231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
232 "Broadcast Frames Tx"},
233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
234 "Unknown MAC Control", 0},
235 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
236 "Flow Control Tx", 0},
237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
238 "Good Flow Control Rx", 0},
239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
240 "Bad Flow Control Rx", 0},
241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS},
251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS},
253 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS},
255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS},
257 };
258
259 /*
260 * autoconf(9)
261 */
262 /* ARGSUSED */
263 STATIC int
264 mvxpe_match(device_t parent, cfdata_t match, void *aux)
265 {
266 struct marvell_attach_args *mva = aux;
267 bus_size_t pv_off;
268 uint32_t pv;
269
270 if (strcmp(mva->mva_name, match->cf_name) != 0)
271 return 0;
272 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
273 return 0;
274
275 /* check port version */
276 pv_off = mva->mva_offset + MVXPE_PV;
277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
278 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
279 return 0; /* old version is not supported */
280
281 return 1;
282 }
283
284 /* ARGSUSED */
285 STATIC void
286 mvxpe_attach(device_t parent, device_t self, void *aux)
287 {
288 struct mvxpe_softc *sc = device_private(self);
289 struct mii_softc *mii;
290 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
291 struct marvell_attach_args *mva = aux;
292 prop_dictionary_t dict;
293 prop_data_t enaddrp = NULL;
294 uint32_t phyaddr, maddrh, maddrl;
295 uint8_t enaddr[ETHER_ADDR_LEN];
296 int q;
297
298 aprint_naive("\n");
299 aprint_normal(": Marvell ARMADA GbE Controller\n");
300 memset(sc, 0, sizeof(*sc));
301 sc->sc_dev = self;
302 sc->sc_port = mva->mva_unit;
303 sc->sc_iot = mva->mva_iot;
304 sc->sc_dmat = mva->mva_dmat;
305 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
306 callout_init(&sc->sc_tick_ch, 0);
307 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
308
309 /*
310 * BUS space
311 */
312 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
313 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
314 aprint_error_dev(self, "Cannot map registers\n");
315 goto fail;
316 }
317 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
318 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
319 &sc->sc_mibh)) {
320 aprint_error_dev(self,
321 "Cannot map destination address filter registers\n");
322 goto fail;
323 }
324 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
325 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
326
327 /*
328 * Buffer Manager(BM) subsystem.
329 */
330 sc->sc_bm = mvxpbm_device(mva);
331 if (sc->sc_bm == NULL) {
332 aprint_error_dev(self, "no Buffer Manager.\n");
333 goto fail;
334 }
335 aprint_normal_dev(self,
336 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
337 aprint_normal_dev(sc->sc_dev,
338 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
339 mvxpbm_buf_size(sc->sc_bm) / 1024,
340 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
341
342 /*
343 * make sure DMA engines are in reset state
344 */
345 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
346 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
347
348 /*
349 * Address decoding window
350 */
351 mvxpe_wininit(sc, mva->mva_tags);
352
353 /*
354 * MAC address
355 */
356 dict = device_properties(self);
357 if (dict)
358 enaddrp = prop_dictionary_get(dict, "mac-address");
359 if (enaddrp) {
360 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
361 maddrh = enaddr[0] << 24;
362 maddrh |= enaddr[1] << 16;
363 maddrh |= enaddr[2] << 8;
364 maddrh |= enaddr[3];
365 maddrl = enaddr[4] << 8;
366 maddrl |= enaddr[5];
367 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
368 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
369 }
370 else {
371 /*
372 * even if enaddr is not found in dictionary,
373 * the port may be initialized by IPL program such as U-BOOT.
374 */
375 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
376 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
377 if ((maddrh | maddrl) == 0) {
378 aprint_error_dev(self, "No Ethernet address\n");
379 return;
380 }
381 }
382 sc->sc_enaddr[0] = maddrh >> 24;
383 sc->sc_enaddr[1] = maddrh >> 16;
384 sc->sc_enaddr[2] = maddrh >> 8;
385 sc->sc_enaddr[3] = maddrh >> 0;
386 sc->sc_enaddr[4] = maddrl >> 8;
387 sc->sc_enaddr[5] = maddrl >> 0;
388 aprint_normal_dev(self, "Ethernet address %s\n",
389 ether_sprintf(sc->sc_enaddr));
390
391 /*
392 * Register interrupt handlers
393 * XXX: handle Ethernet unit intr. and Error intr.
394 */
395 mvxpe_disable_intr(sc);
396 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
397
398 /*
399 * MIB buffer allocation
400 */
401 sc->sc_sysctl_mib_size =
402 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
403 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
404 if (sc->sc_sysctl_mib == NULL)
405 goto fail;
406 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
407
408 /*
409 * Device DMA Buffer allocation
410 */
411 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
412 if (mvxpe_ring_alloc_queue(sc, q) != 0)
413 goto fail;
414 mvxpe_ring_init_queue(sc, q);
415 }
416
417 /*
418 * We can support 802.1Q VLAN-sized frames and jumbo
419 * Ethernet frames.
420 */
421 sc->sc_ethercom.ec_capabilities |=
422 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
423 ifp->if_softc = sc;
424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
425 ifp->if_start = mvxpe_start;
426 ifp->if_ioctl = mvxpe_ioctl;
427 ifp->if_init = mvxpe_init;
428 ifp->if_stop = mvxpe_stop;
429 ifp->if_watchdog = mvxpe_watchdog;
430
431 /*
432 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
433 */
434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
435 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
437 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
439 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
441 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
443 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
444
445 /*
446 * Initialize struct ifnet
447 */
448 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
449 IFQ_SET_READY(&ifp->if_snd);
450 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
451
452 /*
453 * Enable DMA engines and Initiazlie Device Registers.
454 */
455 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
456 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
457 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
458 mvxpe_sc_lock(sc); /* XXX */
459 mvxpe_filter_setup(sc);
460 mvxpe_sc_unlock(sc);
461 mvxpe_initreg(ifp);
462
463 /*
464 * Now MAC is working, setup MII.
465 */
466 if (mii_init == 0) {
467 /*
468 * MII bus is shared by all MACs and all PHYs in SoC.
469 * serializing the bus access should be safe.
470 */
471 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
472 mii_init = 1;
473 }
474 sc->sc_mii.mii_ifp = ifp;
475 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg;
476 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg;
477 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg;
478
479 sc->sc_ethercom.ec_mii = &sc->sc_mii;
480 ifmedia_init(&sc->sc_mii.mii_media, 0,
481 mvxpe_mediachange, mvxpe_mediastatus);
482 /*
483 * XXX: phy addressing highly depends on Board Design.
484 * we assume phyaddress == MAC unit number here,
485 * but some boards may not.
486 */
487 mii_attach(self, &sc->sc_mii, 0xffffffff,
488 MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
489 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
490 if (mii == NULL) {
491 aprint_error_dev(self, "no PHY found!\n");
492 ifmedia_add(&sc->sc_mii.mii_media,
493 IFM_ETHER|IFM_MANUAL, 0, NULL);
494 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
495 } else {
496 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
497 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
498 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
499 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
500 }
501
502 /*
503 * Call MI attach routines.
504 */
505 if_attach(ifp);
506 if_deferred_start_init(ifp, NULL);
507
508 ether_ifattach(ifp, sc->sc_enaddr);
509 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
510
511 sysctl_mvxpe_init(sc);
512 mvxpe_evcnt_attach(sc);
513 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
514 RND_TYPE_NET, RND_FLAG_DEFAULT);
515
516 return;
517
518 fail:
519 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
520 mvxpe_ring_dealloc_queue(sc, q);
521 if (sc->sc_sysctl_mib)
522 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
523
524 return;
525 }
526
527 STATIC int
528 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
529 {
530 #ifdef MVXPE_EVENT_COUNTERS
531 int q;
532
533 /* Master Interrupt Handler */
534 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
535 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
536 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
537 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
538 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
539 NULL, device_xname(sc->sc_dev), "MISC Intr.");
540
541 /* RXTXTH Interrupt */
542 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
543 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
544
545 /* MISC Interrupt */
546 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
547 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
548 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
549 NULL, device_xname(sc->sc_dev), "MISC link status changed");
550 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
551 NULL, device_xname(sc->sc_dev), "MISC internal address error");
552 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
553 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
554 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
555 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
556 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
557 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
558 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
559 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
560 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
561 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
562 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
563 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
564 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
565 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
566
567 /* RxTx Interrupt */
568 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
569 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
570 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
571 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet");
572 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
573 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
574 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
575 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
576 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
577 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
578 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
579 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
580
581 /* Link */
582 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
583 NULL, device_xname(sc->sc_dev), "link up");
584 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
585 NULL, device_xname(sc->sc_dev), "link down");
586
587 /* Rx Descriptor */
588 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
589 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
590 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
591 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
592 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
593 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
594 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
595 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
596 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
597 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
598
599 /* Tx Descriptor */
600 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
601 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
602 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
603 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
604 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
605 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
606 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
607 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
608
609 /* Status Registers */
610 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
611 NULL, device_xname(sc->sc_dev), "Rx discard counter");
612 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
613 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
614 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
615 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
616 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
617 NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
618 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
619 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
620
621 /* Device Driver Errors */
622 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
623 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
624 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
625 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
626 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
627 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
628 static const char *rxq_desc[] = {
629 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
630 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
631 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
632 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
633 };
634 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
635 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
636 }
637 #undef MVXPE_QUEUE_DESC
638 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
639 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
640 static const char *txq_desc[] = {
641 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
642 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
643 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
644 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
645 };
646 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
647 NULL, device_xname(sc->sc_dev), txq_desc[q]);
648 }
649 #undef MVXPE_QUEUE_DESC
650 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
651 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
652 static const char *rxqe_desc[] = {
653 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
654 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
655 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
656 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
657 };
658 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
659 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
660 }
661 #undef MVXPE_QUEUE_DESC
662 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
663 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
664 static const char *txqe_desc[] = {
665 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
666 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
667 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
668 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
669 };
670 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
671 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
672 }
673 #undef MVXPE_QUEUE_DESC
674
675 #endif /* MVXPE_EVENT_COUNTERS */
676 return 0;
677 }
678
679 STATIC void
680 mvxpe_sc_lock(struct mvxpe_softc *sc)
681 {
682 mutex_enter(&sc->sc_mtx);
683 }
684
685 STATIC void
686 mvxpe_sc_unlock(struct mvxpe_softc *sc)
687 {
688 mutex_exit(&sc->sc_mtx);
689 }
690
691 /*
692 * MII
693 */
694 STATIC int
695 mvxpe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
696 {
697 struct mvxpe_softc *sc = device_private(dev);
698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
699 uint32_t smi;
700 int i, rv = 0;
701
702 mutex_enter(&mii_mutex);
703
704 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
705 DELAY(1);
706 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
707 break;
708 }
709 if (i == MVXPE_PHY_TIMEOUT) {
710 aprint_error_ifnet(ifp, "SMI busy timeout\n");
711 rv = ETIMEDOUT;
712 goto out;
713 }
714
715 smi =
716 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
717 MVXPE_WRITE(sc, MVXPE_SMI, smi);
718
719 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
720 DELAY(1);
721 smi = MVXPE_READ(sc, MVXPE_SMI);
722 if (smi & MVXPE_SMI_READVALID) {
723 *val = smi & MVXPE_SMI_DATA_MASK;
724 break;
725 }
726 }
727 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
728 if (i >= MVXPE_PHY_TIMEOUT)
729 rv = ETIMEDOUT;
730
731 out:
732 mutex_exit(&mii_mutex);
733
734 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#hx\n", phy, reg, *val);
735
736 return rv;
737 }
738
739 STATIC int
740 mvxpe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
741 {
742 struct mvxpe_softc *sc = device_private(dev);
743 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
744 uint32_t smi;
745 int i, rv = 0;
746
747 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#hx\n", phy, reg, val);
748
749 mutex_enter(&mii_mutex);
750
751 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
752 DELAY(1);
753 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
754 break;
755 }
756 if (i == MVXPE_PHY_TIMEOUT) {
757 aprint_error_ifnet(ifp, "SMI busy timeout\n");
758 rv = ETIMEDOUT;
759 goto out;
760 }
761
762 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
763 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
764 MVXPE_WRITE(sc, MVXPE_SMI, smi);
765
766 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
767 DELAY(1);
768 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
769 break;
770 }
771
772 if (i == MVXPE_PHY_TIMEOUT) {
773 aprint_error_ifnet(ifp, "phy write timed out\n");
774 rv = ETIMEDOUT;
775 }
776
777 out:
778 mutex_exit(&mii_mutex);
779
780 return rv;
781 }
782
783 STATIC void
784 mvxpe_miibus_statchg(struct ifnet *ifp)
785 {
786
787 /* nothing to do */
788 }
789
790 /*
791 * Address Decoding Window
792 */
793 STATIC void
794 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
795 {
796 device_t pdev = device_parent(sc->sc_dev);
797 uint64_t base;
798 uint32_t en, ac, size;
799 int window, target, attr, rv, i;
800
801 /* First disable all address decode windows */
802 en = MVXPE_BARE_EN_MASK;
803 MVXPE_WRITE(sc, MVXPE_BARE, en);
804
805 ac = 0;
806 for (window = 0, i = 0;
807 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
808 rv = marvell_winparams_by_tag(pdev, tags[i],
809 &target, &attr, &base, &size);
810 if (rv != 0 || size == 0)
811 continue;
812
813 if (base > 0xffffffffULL) {
814 if (window >= MVXPE_NREMAP) {
815 aprint_error_dev(sc->sc_dev,
816 "can't remap window %d\n", window);
817 continue;
818 }
819 MVXPE_WRITE(sc, MVXPE_HA(window),
820 (base >> 32) & 0xffffffff);
821 }
822
823 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
824 MVXPE_BASEADDR_TARGET(target) |
825 MVXPE_BASEADDR_ATTR(attr) |
826 MVXPE_BASEADDR_BASE(base));
827 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
828
829 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
830 window, base, size);
831
832 en &= ~(1 << window);
833 /* set full access (r/w) */
834 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
835 window++;
836 }
837 /* allow to access decode window */
838 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
839
840 MVXPE_WRITE(sc, MVXPE_BARE, en);
841 }
842
843 /*
844 * Device Register Initialization
845 * reset device registers to device driver default value.
846 * the device is not enabled here.
847 */
848 STATIC int
849 mvxpe_initreg(struct ifnet *ifp)
850 {
851 struct mvxpe_softc *sc = ifp->if_softc;
852 int serdes = 0;
853 uint32_t reg;
854 int q, i;
855
856 DPRINTIFNET(ifp, 1, "initializing device register\n");
857
858 /* Init TX/RX Queue Registers */
859 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
860 mvxpe_rx_lockq(sc, q);
861 if (mvxpe_rx_queue_init(ifp, q) != 0) {
862 aprint_error_ifnet(ifp,
863 "initialization failed: cannot initialize queue\n");
864 mvxpe_rx_unlockq(sc, q);
865 return ENOBUFS;
866 }
867 mvxpe_rx_unlockq(sc, q);
868
869 mvxpe_tx_lockq(sc, q);
870 if (mvxpe_tx_queue_init(ifp, q) != 0) {
871 aprint_error_ifnet(ifp,
872 "initialization failed: cannot initialize queue\n");
873 mvxpe_tx_unlockq(sc, q);
874 return ENOBUFS;
875 }
876 mvxpe_tx_unlockq(sc, q);
877 }
878
879 /* Tx MTU Limit */
880 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
881
882 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
883 reg = MVXPE_READ(sc, MVXPE_PMACC0);
884 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
885 serdes = 1;
886
887 /* Ethernet Unit Control */
888 reg = MVXPE_READ(sc, MVXPE_EUC);
889 reg |= MVXPE_EUC_POLLING;
890 MVXPE_WRITE(sc, MVXPE_EUC, reg);
891
892 /* Auto Negotiation */
893 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
894 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
895 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
896 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
897 if (serdes) {
898 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
899 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
900 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
901 }
902 MVXPE_WRITE(sc, MVXPE_PANC, reg);
903
904 /* EEE: Low Power Idle */
905 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
906 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
907 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
908
909 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
910 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
911
912 reg = MVXPE_LPIC2_MUSTSET;
913 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
914
915 /* Port MAC Control set 0 */
916 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
917 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
918 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
919 if (serdes)
920 reg |= MVXPE_PMACC0_PORTTYPE;
921 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
922
923 /* Port MAC Control set 1 is only used for loop-back test */
924
925 /* Port MAC Control set 2 */
926 reg = MVXPE_READ(sc, MVXPE_PMACC2);
927 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
928 reg |= MVXPE_PMACC2_MUSTSET;
929 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
930
931 /* Port MAC Control set 3 is used for IPG tune */
932
933 /* Port MAC Control set 4 is not used */
934
935 /* Port Configuration */
936 /* Use queue 0 only */
937 reg = MVXPE_READ(sc, MVXPE_PXC);
938 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK |
939 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK);
940 MVXPE_WRITE(sc, MVXPE_PXC, reg);
941
942 /* Port Configuration Extended: enable Tx CRC generation */
943 reg = MVXPE_READ(sc, MVXPE_PXCX);
944 reg &= ~MVXPE_PXCX_TXCRCDIS;
945 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
946
947 /* clear MIB counter registers(clear by read) */
948 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
949 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
950
951 /* Set SDC register except IPGINT bits */
952 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
953 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
954 reg |= MVXPE_SDC_BLMR;
955 reg |= MVXPE_SDC_BLMT;
956 MVXPE_WRITE(sc, MVXPE_SDC, reg);
957
958 return 0;
959 }
960
961 /*
962 * Descriptor Ring Controls for each of queues
963 */
964 STATIC void *
965 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
966 {
967 bus_dma_segment_t segs;
968 void *kva = NULL;
969 int nsegs;
970
971 /*
972 * Allocate the descriptor queues.
973 * struct mvxpe_ring_data contians array of descriptor per queue.
974 */
975 if (bus_dmamem_alloc(sc->sc_dmat,
976 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
977 aprint_error_dev(sc->sc_dev,
978 "can't alloc device memory (%zu bytes)\n", size);
979 return NULL;
980 }
981 if (bus_dmamem_map(sc->sc_dmat,
982 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
983 aprint_error_dev(sc->sc_dev,
984 "can't map dma buffers (%zu bytes)\n", size);
985 goto fail1;
986 }
987
988 if (bus_dmamap_create(sc->sc_dmat,
989 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
990 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
991 goto fail2;
992 }
993 if (bus_dmamap_load(sc->sc_dmat,
994 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
995 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
996 goto fail3;
997 }
998 memset(kva, 0, size);
999 return kva;
1000
1001 fail3:
1002 bus_dmamap_destroy(sc->sc_dmat, *map);
1003 memset(map, 0, sizeof(*map));
1004 fail2:
1005 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1006 fail1:
1007 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
1008 return NULL;
1009 }
1010
1011 STATIC int
1012 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
1013 {
1014 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1015 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1016
1017 /*
1018 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1019 * queue length. real queue length is limited by
1020 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1021 *
1022 * because descriptor ring reallocation needs reprogramming of
1023 * DMA registers, we allocate enough descriptor for hard limit
1024 * of queue length.
1025 */
1026 rx->rx_descriptors =
1027 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1028 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1029 if (rx->rx_descriptors == NULL)
1030 goto fail;
1031
1032 tx->tx_descriptors =
1033 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1034 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1035 if (tx->tx_descriptors == NULL)
1036 goto fail;
1037
1038 return 0;
1039 fail:
1040 mvxpe_ring_dealloc_queue(sc, q);
1041 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1042 return ENOMEM;
1043 }
1044
1045 STATIC void
1046 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1047 {
1048 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1049 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1050 bus_dma_segment_t *segs;
1051 bus_size_t size;
1052 void *kva;
1053 int nsegs;
1054
1055 /* Rx */
1056 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1057 if (kva) {
1058 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1059 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1060 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1061
1062 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1063 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1064 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1065 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1066 }
1067
1068 /* Tx */
1069 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1070 if (kva) {
1071 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1072 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1073 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1074
1075 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1076 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1077 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1078 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1079 }
1080
1081 /* Clear doungling pointers all */
1082 memset(rx, 0, sizeof(*rx));
1083 memset(tx, 0, sizeof(*tx));
1084 }
1085
1086 STATIC void
1087 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1088 {
1089 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1090 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1091 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1092 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1093 static const int rx_default_queue_len[] = {
1094 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1095 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1096 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1097 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1098 };
1099 static const int tx_default_queue_len[] = {
1100 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1101 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1102 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1103 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1104 };
1105 extern uint32_t mvTclk;
1106 int i;
1107
1108 /* Rx handle */
1109 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1110 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1111 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1112 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1113 }
1114 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1115 rx->rx_dma = rx->rx_cpu = 0;
1116 rx->rx_queue_len = rx_default_queue_len[q];
1117 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1118 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1119 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1120 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1121 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1122
1123 /* Tx handle */
1124 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1125 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1126 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1127 MVXPE_TX_MBUF(sc, q, i) = NULL;
1128 /* Tx handle needs DMA map for busdma_load_mbuf() */
1129 if (bus_dmamap_create(sc->sc_dmat,
1130 mvxpbm_chunk_size(sc->sc_bm),
1131 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1132 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
1133 &MVXPE_TX_MAP(sc, q, i))) {
1134 aprint_error_dev(sc->sc_dev,
1135 "can't create dma map (tx ring %d)\n", i);
1136 }
1137 }
1138 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1139 tx->tx_dma = tx->tx_cpu = 0;
1140 tx->tx_queue_len = tx_default_queue_len[q];
1141 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1142 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1143 tx->tx_used = 0;
1144 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1145 }
1146
1147 STATIC void
1148 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1149 {
1150 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1151 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1152 struct mbuf *m;
1153 int i;
1154
1155 KASSERT_RX_MTX(sc, q);
1156 KASSERT_TX_MTX(sc, q);
1157
1158 /* Rx handle */
1159 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1160 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1161 continue;
1162 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1163 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1164 }
1165 rx->rx_dma = rx->rx_cpu = 0;
1166
1167 /* Tx handle */
1168 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1169 m = MVXPE_TX_MBUF(sc, q, i);
1170 if (m == NULL)
1171 continue;
1172 MVXPE_TX_MBUF(sc, q, i) = NULL;
1173 bus_dmamap_sync(sc->sc_dmat,
1174 MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len,
1175 BUS_DMASYNC_POSTWRITE);
1176 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1177 m_freem(m);
1178 }
1179 tx->tx_dma = tx->tx_cpu = 0;
1180 tx->tx_used = 0;
1181 }
1182
1183 STATIC void
1184 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1185 {
1186 int wrap;
1187
1188 KASSERT_RX_MTX(sc, q);
1189 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1190 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1191
1192 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1193 if (wrap > 0) {
1194 count -= wrap;
1195 KASSERT(count > 0);
1196 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1197 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1198 }
1199 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1200 MVXPE_RX_DESC_OFF(sc, q, idx),
1201 sizeof(struct mvxpe_rx_desc) * count, ops);
1202 }
1203
1204 STATIC void
1205 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1206 {
1207 int wrap = 0;
1208
1209 KASSERT_TX_MTX(sc, q);
1210 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1211 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1212
1213 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1214 if (wrap > 0) {
1215 count -= wrap;
1216 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1217 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1218 }
1219 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1220 MVXPE_TX_DESC_OFF(sc, q, idx),
1221 sizeof(struct mvxpe_tx_desc) * count, ops);
1222 }
1223
1224 /*
1225 * Rx/Tx Queue Control
1226 */
1227 STATIC int
1228 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1229 {
1230 struct mvxpe_softc *sc = ifp->if_softc;
1231 uint32_t reg;
1232
1233 KASSERT_RX_MTX(sc, q);
1234 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1235
1236 /* descriptor address */
1237 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1238
1239 /* Rx buffer size and descriptor ring size */
1240 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1241 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1242 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1243 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1244 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1245
1246 /* Rx packet offset address */
1247 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1248 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1249 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1250 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1251
1252 /* Rx DMA SNOOP */
1253 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1254 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1255 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1256
1257 /* if DMA is not working, register is not updated */
1258 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1259 return 0;
1260 }
1261
1262 STATIC int
1263 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1264 {
1265 struct mvxpe_softc *sc = ifp->if_softc;
1266 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1267 uint32_t reg;
1268
1269 KASSERT_TX_MTX(sc, q);
1270 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1271
1272 /* descriptor address */
1273 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1274
1275 /* Tx threshold, and descriptor ring size */
1276 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1277 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1278 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1279 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1280 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1281
1282 /* if DMA is not working, register is not updated */
1283 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1284 return 0;
1285 }
1286
1287 STATIC int
1288 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1289 {
1290 struct mvxpe_softc *sc = ifp->if_softc;
1291 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1292 uint32_t reg;
1293
1294 KASSERT_RX_MTX(sc, q);
1295
1296 /* Set Rx interrupt threshold */
1297 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1298 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1299 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1300
1301 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1302 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1303
1304 /* Unmask RXTX_TH Intr. */
1305 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1306 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1307 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1308 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1309
1310 /* Enable Rx queue */
1311 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1312 reg |= MVXPE_RQC_ENQ(q);
1313 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1314
1315 return 0;
1316 }
1317
1318 STATIC int
1319 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1320 {
1321 struct mvxpe_softc *sc = ifp->if_softc;
1322 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1323 uint32_t reg;
1324
1325 KASSERT_TX_MTX(sc, q);
1326
1327 /* Set Tx interrupt threshold */
1328 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1329 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1330 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1331 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1332
1333 /* Unmask RXTX_TH Intr. */
1334 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1335 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1336 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1337
1338 /* Don't update MVXPE_TQC here, there is no packet yet. */
1339 return 0;
1340 }
1341
1342 STATIC void
1343 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1344 {
1345 KASSERT(q >= 0);
1346 KASSERT(q < MVXPE_QUEUE_SIZE);
1347 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1348 }
1349
1350 STATIC void
1351 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1352 {
1353 KASSERT(q >= 0);
1354 KASSERT(q < MVXPE_QUEUE_SIZE);
1355 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1356 }
1357
1358 STATIC void
1359 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1360 {
1361 KASSERT(q >= 0);
1362 KASSERT(q < MVXPE_QUEUE_SIZE);
1363 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1364 }
1365
1366 STATIC void
1367 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1368 {
1369 KASSERT(q >= 0);
1370 KASSERT(q < MVXPE_QUEUE_SIZE);
1371 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1372 }
1373
1374 /*
1375 * Interrupt Handlers
1376 */
1377 STATIC void
1378 mvxpe_disable_intr(struct mvxpe_softc *sc)
1379 {
1380 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1381 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1382 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1383 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1384 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1385 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1386 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1387 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1388 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1389 }
1390
1391 STATIC void
1392 mvxpe_enable_intr(struct mvxpe_softc *sc)
1393 {
1394 uint32_t reg;
1395
1396 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1397 reg = MVXPE_READ(sc, MVXPE_PMIM);
1398 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1399 reg |= MVXPE_PMI_LINKCHANGE;
1400 reg |= MVXPE_PMI_IAE;
1401 reg |= MVXPE_PMI_RXOVERRUN;
1402 reg |= MVXPE_PMI_RXCRCERROR;
1403 reg |= MVXPE_PMI_RXLARGEPACKET;
1404 reg |= MVXPE_PMI_TXUNDRN;
1405 #if 0
1406 /*
1407 * The device may raise false interrupts for SERDES even if the device
1408 * is not configured to use SERDES connection.
1409 */
1410 reg |= MVXPE_PMI_PRBSERROR;
1411 reg |= MVXPE_PMI_SRSE;
1412 #else
1413 reg &= ~MVXPE_PMI_PRBSERROR;
1414 reg &= ~MVXPE_PMI_SRSE;
1415 #endif
1416 reg |= MVXPE_PMI_TREQ_MASK;
1417 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1418
1419 /* Enable Summary Bit to check all interrupt cause. */
1420 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1421 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1422 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1423 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1424 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1425
1426 /* Enable All Queue Interrupt */
1427 reg = MVXPE_READ(sc, MVXPE_PIE);
1428 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1429 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1430 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1431 }
1432
1433 STATIC int
1434 mvxpe_rxtxth_intr(void *arg)
1435 {
1436 struct mvxpe_softc *sc = arg;
1437 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1438 uint32_t ic, queues, datum = 0;
1439
1440 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1441 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1442
1443 mvxpe_sc_lock(sc);
1444 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1445 if (ic == 0) {
1446 mvxpe_sc_unlock(sc);
1447 return 0;
1448 }
1449 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1450 datum = datum ^ ic;
1451
1452 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1453
1454 /* ack maintance interrupt first */
1455 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1456 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1457 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1458 }
1459 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1460 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1461 mvxpe_misc_intr(sc);
1462 }
1463 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1464 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1465 mvxpe_rxtx_intr(sc);
1466 }
1467 if (!(ifp->if_flags & IFF_RUNNING)) {
1468 mvxpe_sc_unlock(sc);
1469 return 1;
1470 }
1471
1472 /* RxTxTH interrupt */
1473 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1474 if (queues) {
1475 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1476 mvxpe_rx(sc, queues);
1477 }
1478 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1479 if (queues) {
1480 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1481 mvxpe_tx_complete(sc, queues);
1482 }
1483 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1484 if (queues) {
1485 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1486 mvxpe_rx_refill(sc, queues);
1487 }
1488 mvxpe_sc_unlock(sc);
1489
1490 if_schedule_deferred_start(ifp);
1491
1492 rnd_add_uint32(&sc->sc_rnd_source, datum);
1493
1494 return 1;
1495 }
1496
1497 STATIC int
1498 mvxpe_misc_intr(void *arg)
1499 {
1500 struct mvxpe_softc *sc = arg;
1501 #ifdef MVXPE_DEBUG
1502 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1503 #endif
1504 uint32_t ic;
1505 uint32_t datum = 0;
1506 int claimed = 0;
1507
1508 DPRINTSC(sc, 2, "got MISC_INTR\n");
1509 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1510
1511 KASSERT_SC_MTX(sc);
1512
1513 for (;;) {
1514 ic = MVXPE_READ(sc, MVXPE_PMIC);
1515 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1516 if (ic == 0)
1517 break;
1518 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1519 datum = datum ^ ic;
1520 claimed = 1;
1521
1522 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1523 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1524 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1525 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1526 }
1527 if (ic & MVXPE_PMI_LINKCHANGE) {
1528 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1529 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1530 mvxpe_linkupdate(sc);
1531 }
1532 if (ic & MVXPE_PMI_IAE) {
1533 DPRINTIFNET(ifp, 2, "+IAE\n");
1534 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1535 }
1536 if (ic & MVXPE_PMI_RXOVERRUN) {
1537 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1538 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1539 }
1540 if (ic & MVXPE_PMI_RXCRCERROR) {
1541 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1542 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1543 }
1544 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1545 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1546 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1547 }
1548 if (ic & MVXPE_PMI_TXUNDRN) {
1549 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1550 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1551 }
1552 if (ic & MVXPE_PMI_PRBSERROR) {
1553 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1554 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1555 }
1556 if (ic & MVXPE_PMI_TREQ_MASK) {
1557 DPRINTIFNET(ifp, 2, "+TREQ\n");
1558 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1559 }
1560 }
1561 if (datum)
1562 rnd_add_uint32(&sc->sc_rnd_source, datum);
1563
1564 return claimed;
1565 }
1566
1567 STATIC int
1568 mvxpe_rxtx_intr(void *arg)
1569 {
1570 struct mvxpe_softc *sc = arg;
1571 #ifdef MVXPE_DEBUG
1572 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1573 #endif
1574 uint32_t datum = 0;
1575 uint32_t prxtxic;
1576 int claimed = 0;
1577
1578 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1579 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1580
1581 KASSERT_SC_MTX(sc);
1582
1583 for (;;) {
1584 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1585 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1586 if (prxtxic == 0)
1587 break;
1588 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1589 datum = datum ^ prxtxic;
1590 claimed = 1;
1591
1592 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1593
1594 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1595 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1596 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1597 }
1598 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1599 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1600 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1601 }
1602 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1603 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1604 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1605 }
1606 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1607 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1608 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1609 }
1610 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1611 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1612 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1613 }
1614 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1615 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1616 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1617 }
1618 }
1619 if (datum)
1620 rnd_add_uint32(&sc->sc_rnd_source, datum);
1621
1622 return claimed;
1623 }
1624
1625 STATIC void
1626 mvxpe_tick(void *arg)
1627 {
1628 struct mvxpe_softc *sc = arg;
1629 struct mii_data *mii = &sc->sc_mii;
1630
1631 mvxpe_sc_lock(sc);
1632
1633 mii_tick(mii);
1634 mii_pollstat(&sc->sc_mii);
1635
1636 /* read mib registers(clear by read) */
1637 mvxpe_update_mib(sc);
1638
1639 /* read counter registers(clear by read) */
1640 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1641 MVXPE_READ(sc, MVXPE_PDFC));
1642 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1643 MVXPE_READ(sc, MVXPE_POFC));
1644 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1645 MVXPE_READ(sc, MVXPE_TXBADFCS));
1646 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1647 MVXPE_READ(sc, MVXPE_TXDROPPED));
1648 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1649 MVXPE_READ(sc, MVXPE_LPIC));
1650
1651 mvxpe_sc_unlock(sc);
1652
1653 callout_schedule(&sc->sc_tick_ch, hz);
1654 }
1655
1656
1657 /*
1658 * struct ifnet and mii callbacks
1659 */
1660 STATIC void
1661 mvxpe_start(struct ifnet *ifp)
1662 {
1663 struct mvxpe_softc *sc = ifp->if_softc;
1664 struct mbuf *m;
1665 int q;
1666
1667 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1668 DPRINTIFNET(ifp, 1, "not running\n");
1669 return;
1670 }
1671
1672 mvxpe_sc_lock(sc);
1673 if (!MVXPE_IS_LINKUP(sc)) {
1674 /* If Link is DOWN, can't start TX */
1675 DPRINTIFNET(ifp, 1, "link fail\n");
1676 for (;;) {
1677 /*
1678 * discard stale packets all.
1679 * these may confuse DAD, ARP or timer based protocols.
1680 */
1681 IFQ_DEQUEUE(&ifp->if_snd, m);
1682 if (m == NULL)
1683 break;
1684 m_freem(m);
1685 }
1686 mvxpe_sc_unlock(sc);
1687 return;
1688 }
1689 for (;;) {
1690 /*
1691 * don't use IFQ_POLL().
1692 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1693 * on SMP enabled networking stack.
1694 */
1695 IFQ_DEQUEUE(&ifp->if_snd, m);
1696 if (m == NULL)
1697 break;
1698
1699 q = mvxpe_tx_queue_select(sc, m);
1700 if (q < 0)
1701 break;
1702 /* mutex is held in mvxpe_tx_queue_select() */
1703
1704 if (mvxpe_tx_queue(sc, m, q) != 0) {
1705 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1706 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1707 mvxpe_tx_unlockq(sc, q);
1708 break;
1709 }
1710 mvxpe_tx_unlockq(sc, q);
1711 KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1712 KASSERT(sc->sc_tx_ring[q].tx_used <=
1713 sc->sc_tx_ring[q].tx_queue_len);
1714 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1715 sc->sc_tx_pending++;
1716 ifp->if_opackets++;
1717 ifp->if_timer = 1;
1718 sc->sc_wdogsoft = 1;
1719 bpf_mtap(ifp, m, BPF_D_OUT);
1720 }
1721 mvxpe_sc_unlock(sc);
1722
1723 return;
1724 }
1725
1726 STATIC int
1727 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1728 {
1729 struct mvxpe_softc *sc = ifp->if_softc;
1730 struct ifreq *ifr = data;
1731 int error = 0;
1732 int s;
1733
1734 switch (cmd) {
1735 case SIOCGIFMEDIA:
1736 case SIOCSIFMEDIA:
1737 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n");
1738 s = splnet(); /* XXX: is there suitable mutex? */
1739 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1740 splx(s);
1741 break;
1742 default:
1743 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1744 error = ether_ioctl(ifp, cmd, data);
1745 if (error == ENETRESET) {
1746 if (ifp->if_flags & IFF_RUNNING) {
1747 mvxpe_sc_lock(sc);
1748 mvxpe_filter_setup(sc);
1749 mvxpe_sc_unlock(sc);
1750 }
1751 error = 0;
1752 }
1753 break;
1754 }
1755
1756 return error;
1757 }
1758
1759 STATIC int
1760 mvxpe_init(struct ifnet *ifp)
1761 {
1762 struct mvxpe_softc *sc = ifp->if_softc;
1763 struct mii_data *mii = &sc->sc_mii;
1764 uint32_t reg;
1765 int q;
1766
1767 mvxpe_sc_lock(sc);
1768
1769 /* Start DMA Engine */
1770 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1771 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1772 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1773
1774 /* Enable port */
1775 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1776 reg |= MVXPE_PMACC0_PORTEN;
1777 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1778
1779 /* Link up */
1780 mvxpe_linkup(sc);
1781
1782 /* Enable All Queue and interrupt of each Queue */
1783 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1784 mvxpe_rx_lockq(sc, q);
1785 mvxpe_rx_queue_enable(ifp, q);
1786 mvxpe_rx_queue_refill(sc, q);
1787 mvxpe_rx_unlockq(sc, q);
1788
1789 mvxpe_tx_lockq(sc, q);
1790 mvxpe_tx_queue_enable(ifp, q);
1791 mvxpe_tx_unlockq(sc, q);
1792 }
1793
1794 /* Enable interrupt */
1795 mvxpe_enable_intr(sc);
1796
1797 /* Set Counter */
1798 callout_schedule(&sc->sc_tick_ch, hz);
1799
1800 /* Media check */
1801 mii_mediachg(mii);
1802
1803 ifp->if_flags |= IFF_RUNNING;
1804 ifp->if_flags &= ~IFF_OACTIVE;
1805
1806 mvxpe_sc_unlock(sc);
1807 return 0;
1808 }
1809
1810 /* ARGSUSED */
1811 STATIC void
1812 mvxpe_stop(struct ifnet *ifp, int disable)
1813 {
1814 struct mvxpe_softc *sc = ifp->if_softc;
1815 uint32_t reg;
1816 int q, cnt;
1817
1818 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1819
1820 mvxpe_sc_lock(sc);
1821
1822 callout_stop(&sc->sc_tick_ch);
1823
1824 /* Link down */
1825 mvxpe_linkdown(sc);
1826
1827 /* Disable Rx interrupt */
1828 reg = MVXPE_READ(sc, MVXPE_PIE);
1829 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1830 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1831
1832 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1833 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1834 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1835 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1836
1837 /* Wait for all Rx activity to terminate. */
1838 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1839 reg = MVXPE_RQC_DIS(reg);
1840 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1841 cnt = 0;
1842 do {
1843 if (cnt >= RX_DISABLE_TIMEOUT) {
1844 aprint_error_ifnet(ifp,
1845 "timeout for RX stopped. rqc 0x%x\n", reg);
1846 break;
1847 }
1848 cnt++;
1849 reg = MVXPE_READ(sc, MVXPE_RQC);
1850 } while (reg & MVXPE_RQC_EN_MASK);
1851
1852 /* Wait for all Tx activety to terminate. */
1853 reg = MVXPE_READ(sc, MVXPE_PIE);
1854 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1855 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1856
1857 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1858 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1859 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1860
1861 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1862 reg = MVXPE_TQC_DIS(reg);
1863 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1864 cnt = 0;
1865 do {
1866 if (cnt >= TX_DISABLE_TIMEOUT) {
1867 aprint_error_ifnet(ifp,
1868 "timeout for TX stopped. tqc 0x%x\n", reg);
1869 break;
1870 }
1871 cnt++;
1872 reg = MVXPE_READ(sc, MVXPE_TQC);
1873 } while (reg & MVXPE_TQC_EN_MASK);
1874
1875 /* Wait for all Tx FIFO is empty */
1876 cnt = 0;
1877 do {
1878 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1879 aprint_error_ifnet(ifp,
1880 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1881 break;
1882 }
1883 cnt++;
1884 reg = MVXPE_READ(sc, MVXPE_PS0);
1885 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1886
1887 /* Reset the MAC Port Enable bit */
1888 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1889 reg &= ~MVXPE_PMACC0_PORTEN;
1890 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1891
1892 /* Disable each of queue */
1893 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1894 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1895
1896 mvxpe_rx_lockq(sc, q);
1897 mvxpe_tx_lockq(sc, q);
1898
1899 /* Disable Rx packet buffer refill request */
1900 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1901 reg |= MVXPE_PRXDQTH_NODT(0);
1902 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1903
1904 if (disable) {
1905 /*
1906 * Hold Reset state of DMA Engine
1907 * (must write 0x0 to restart it)
1908 */
1909 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1910 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1911 mvxpe_ring_flush_queue(sc, q);
1912 }
1913
1914 mvxpe_tx_unlockq(sc, q);
1915 mvxpe_rx_unlockq(sc, q);
1916 }
1917
1918 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1919
1920 mvxpe_sc_unlock(sc);
1921 }
1922
1923 STATIC void
1924 mvxpe_watchdog(struct ifnet *ifp)
1925 {
1926 struct mvxpe_softc *sc = ifp->if_softc;
1927 int q;
1928
1929 mvxpe_sc_lock(sc);
1930
1931 /*
1932 * Reclaim first as there is a possibility of losing Tx completion
1933 * interrupts.
1934 */
1935 mvxpe_tx_complete(sc, 0xff);
1936 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1937 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1938
1939 if (tx->tx_dma != tx->tx_cpu) {
1940 if (sc->sc_wdogsoft) {
1941 /*
1942 * There is race condition between CPU and DMA
1943 * engine. When DMA engine encounters queue end,
1944 * it clears MVXPE_TQC_ENQ bit.
1945 * XXX: how about enhanced mode?
1946 */
1947 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1948 ifp->if_timer = 5;
1949 sc->sc_wdogsoft = 0;
1950 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1951 } else {
1952 aprint_error_ifnet(ifp, "watchdog timeout\n");
1953 ifp->if_oerrors++;
1954 mvxpe_linkreset(sc);
1955 mvxpe_sc_unlock(sc);
1956
1957 /* trigger reinitialize sequence */
1958 mvxpe_stop(ifp, 1);
1959 mvxpe_init(ifp);
1960
1961 mvxpe_sc_lock(sc);
1962 }
1963 }
1964 }
1965 mvxpe_sc_unlock(sc);
1966 }
1967
1968 STATIC int
1969 mvxpe_ifflags_cb(struct ethercom *ec)
1970 {
1971 struct ifnet *ifp = &ec->ec_if;
1972 struct mvxpe_softc *sc = ifp->if_softc;
1973 int change = ifp->if_flags ^ sc->sc_if_flags;
1974
1975 mvxpe_sc_lock(sc);
1976
1977 if (change != 0)
1978 sc->sc_if_flags = ifp->if_flags;
1979
1980 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1981 mvxpe_sc_unlock(sc);
1982 return ENETRESET;
1983 }
1984
1985 if ((change & IFF_PROMISC) != 0)
1986 mvxpe_filter_setup(sc);
1987
1988 if ((change & IFF_UP) != 0)
1989 mvxpe_linkreset(sc);
1990
1991 mvxpe_sc_unlock(sc);
1992 return 0;
1993 }
1994
1995 STATIC int
1996 mvxpe_mediachange(struct ifnet *ifp)
1997 {
1998 return ether_mediachange(ifp);
1999 }
2000
2001 STATIC void
2002 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2003 {
2004 ether_mediastatus(ifp, ifmr);
2005 }
2006
2007 /*
2008 * Link State Notify
2009 */
2010 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
2011 {
2012 int linkup; /* bool */
2013
2014 KASSERT_SC_MTX(sc);
2015
2016 /* tell miibus */
2017 mii_pollstat(&sc->sc_mii);
2018
2019 /* syslog */
2020 linkup = MVXPE_IS_LINKUP(sc);
2021 if (sc->sc_linkstate == linkup)
2022 return;
2023
2024 #ifdef DEBUG
2025 log(LOG_DEBUG,
2026 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2027 #endif
2028 if (linkup)
2029 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2030 else
2031 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2032
2033 sc->sc_linkstate = linkup;
2034 }
2035
2036 STATIC void
2037 mvxpe_linkup(struct mvxpe_softc *sc)
2038 {
2039 uint32_t reg;
2040
2041 KASSERT_SC_MTX(sc);
2042
2043 /* set EEE parameters */
2044 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2045 if (sc->sc_cf.cf_lpi)
2046 reg |= MVXPE_LPIC1_LPIRE;
2047 else
2048 reg &= ~MVXPE_LPIC1_LPIRE;
2049 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2050
2051 /* set auto-negotiation parameters */
2052 reg = MVXPE_READ(sc, MVXPE_PANC);
2053 if (sc->sc_cf.cf_fc) {
2054 /* flow control negotiation */
2055 reg |= MVXPE_PANC_PAUSEADV;
2056 reg |= MVXPE_PANC_ANFCEN;
2057 }
2058 else {
2059 reg &= ~MVXPE_PANC_PAUSEADV;
2060 reg &= ~MVXPE_PANC_ANFCEN;
2061 }
2062 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2063 reg &= ~MVXPE_PANC_FORCELINKPASS;
2064 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2065
2066 mii_mediachg(&sc->sc_mii);
2067 }
2068
2069 STATIC void
2070 mvxpe_linkdown(struct mvxpe_softc *sc)
2071 {
2072 struct mii_softc *mii;
2073 uint32_t reg;
2074
2075 KASSERT_SC_MTX(sc);
2076 return;
2077
2078 reg = MVXPE_READ(sc, MVXPE_PANC);
2079 reg |= MVXPE_PANC_FORCELINKFAIL;
2080 reg &= MVXPE_PANC_FORCELINKPASS;
2081 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2082
2083 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2084 if (mii)
2085 mii_phy_down(mii);
2086 }
2087
2088 STATIC void
2089 mvxpe_linkreset(struct mvxpe_softc *sc)
2090 {
2091 struct mii_softc *mii;
2092
2093 KASSERT_SC_MTX(sc);
2094
2095 /* force reset PHY first */
2096 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2097 if (mii)
2098 mii_phy_reset(mii);
2099
2100 /* reinit MAC and PHY */
2101 mvxpe_linkdown(sc);
2102 if ((sc->sc_if_flags & IFF_UP) != 0)
2103 mvxpe_linkup(sc);
2104 }
2105
2106 /*
2107 * Tx Subroutines
2108 */
2109 STATIC int
2110 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2111 {
2112 int q = 0;
2113
2114 /* XXX: get attribute from ALTQ framework? */
2115 mvxpe_tx_lockq(sc, q);
2116 return 0;
2117 }
2118
2119 STATIC int
2120 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2121 {
2122 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2123 bus_dma_segment_t *txsegs;
2124 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2125 struct mvxpe_tx_desc *t = NULL;
2126 uint32_t ptxsu;
2127 int txnsegs;
2128 int start, used;
2129 int i;
2130
2131 KASSERT_TX_MTX(sc, q);
2132 KASSERT(tx->tx_used >= 0);
2133 KASSERT(tx->tx_used <= tx->tx_queue_len);
2134
2135 /* load mbuf using dmamap of 1st descriptor */
2136 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2137 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2138 m_freem(m);
2139 return ENOBUFS;
2140 }
2141 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2142 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2143 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2144 /* we have no enough descriptors or mbuf is broken */
2145 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2146 m_freem(m);
2147 return ENOBUFS;
2148 }
2149 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2150 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2151
2152 /* remember mbuf using 1st descriptor */
2153 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2154 bus_dmamap_sync(sc->sc_dmat,
2155 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2156 BUS_DMASYNC_PREWRITE);
2157
2158 /* load to tx descriptors */
2159 start = tx->tx_cpu;
2160 used = 0;
2161 for (i = 0; i < txnsegs; i++) {
2162 if (__predict_false(txsegs[i].ds_len == 0))
2163 continue;
2164 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2165 t->command = 0;
2166 t->l4ichk = 0;
2167 t->flags = 0;
2168 if (i == 0) {
2169 /* 1st descriptor */
2170 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2171 t->command |= MVXPE_TX_CMD_PADDING;
2172 t->command |= MVXPE_TX_CMD_F;
2173 mvxpe_tx_set_csumflag(ifp, t, m);
2174 }
2175 t->bufptr = txsegs[i].ds_addr;
2176 t->bytecnt = txsegs[i].ds_len;
2177 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2178 tx->tx_used++;
2179 used++;
2180 }
2181 /* t is last descriptor here */
2182 KASSERT(t != NULL);
2183 t->command |= MVXPE_TX_CMD_L;
2184
2185 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2186 #ifdef MVXPE_DEBUG
2187 if (mvxpe_debug > 2)
2188 for (i = start; i <= tx->tx_cpu; i++) {
2189 t = MVXPE_TX_DESC(sc, q, i);
2190 mvxpe_dump_txdesc(t, i);
2191 }
2192 #endif
2193 mvxpe_ring_sync_tx(sc, q, start, used,
2194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2195
2196 while (used > 255) {
2197 ptxsu = MVXPE_PTXSU_NOWD(255);
2198 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2199 used -= 255;
2200 }
2201 if (used > 0) {
2202 ptxsu = MVXPE_PTXSU_NOWD(used);
2203 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2204 }
2205 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2206
2207 DPRINTSC(sc, 2,
2208 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2209 DPRINTSC(sc, 2,
2210 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2211 DPRINTSC(sc, 2,
2212 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2213 DPRINTSC(sc, 2,
2214 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2215 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2216 DPRINTIFNET(ifp, 2,
2217 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2218 tx->tx_cpu, tx->tx_dma, tx->tx_used);
2219 return 0;
2220 }
2221
2222 STATIC void
2223 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2224 struct mvxpe_tx_desc *t, struct mbuf *m)
2225 {
2226 struct ether_header *eh;
2227 int csum_flags;
2228 uint32_t iphl = 0, ipoff = 0;
2229
2230
2231 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2232
2233 eh = mtod(m, struct ether_header *);
2234 switch (htons(eh->ether_type)) {
2235 case ETHERTYPE_IP:
2236 case ETHERTYPE_IPV6:
2237 ipoff = ETHER_HDR_LEN;
2238 break;
2239 case ETHERTYPE_VLAN:
2240 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2241 break;
2242 }
2243
2244 if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2245 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2246 t->command |= MVXPE_TX_CMD_L3_IP4;
2247 }
2248 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2249 iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2250 t->command |= MVXPE_TX_CMD_L3_IP6;
2251 }
2252 else {
2253 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2254 return;
2255 }
2256
2257
2258 /* L3 */
2259 if (csum_flags & M_CSUM_IPv4) {
2260 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2261 }
2262
2263 /* L4 */
2264 if ((csum_flags &
2265 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) {
2266 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2267 }
2268 else if (csum_flags & M_CSUM_TCPv4) {
2269 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2270 t->command |= MVXPE_TX_CMD_L4_TCP;
2271 }
2272 else if (csum_flags & M_CSUM_UDPv4) {
2273 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2274 t->command |= MVXPE_TX_CMD_L4_UDP;
2275 }
2276 else if (csum_flags & M_CSUM_TCPv6) {
2277 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2278 t->command |= MVXPE_TX_CMD_L4_TCP;
2279 }
2280 else if (csum_flags & M_CSUM_UDPv6) {
2281 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2282 t->command |= MVXPE_TX_CMD_L4_UDP;
2283 }
2284
2285 t->l4ichk = 0;
2286 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2287 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2288 }
2289
2290 STATIC void
2291 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2292 {
2293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2294 int q;
2295
2296 DPRINTSC(sc, 2, "tx completed.\n");
2297
2298 KASSERT_SC_MTX(sc);
2299
2300 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2301 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2302 continue;
2303 mvxpe_tx_lockq(sc, q);
2304 mvxpe_tx_queue_complete(sc, q);
2305 mvxpe_tx_unlockq(sc, q);
2306 }
2307 KASSERT(sc->sc_tx_pending >= 0);
2308 if (sc->sc_tx_pending == 0)
2309 ifp->if_timer = 0;
2310 }
2311
2312 STATIC void
2313 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2314 {
2315 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2316 struct mvxpe_tx_desc *t;
2317 struct mbuf *m;
2318 uint32_t ptxs, ptxsu, ndesc;
2319 int i;
2320
2321 KASSERT_TX_MTX(sc, q);
2322
2323 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2324 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2325 if (ndesc == 0)
2326 return;
2327
2328 DPRINTSC(sc, 2,
2329 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2330
2331 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2332 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2333
2334 for (i = 0; i < ndesc; i++) {
2335 int error = 0;
2336
2337 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2338 if (t->flags & MVXPE_TX_F_ES) {
2339 DPRINTSC(sc, 1,
2340 "tx error queue %d desc %d\n",
2341 q, tx->tx_dma);
2342 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2343 case MVXPE_TX_F_EC_LC:
2344 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2345 break;
2346 case MVXPE_TX_F_EC_UR:
2347 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2348 break;
2349 case MVXPE_TX_F_EC_RL:
2350 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2351 break;
2352 default:
2353 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2354 break;
2355 }
2356 error = 1;
2357 }
2358 m = MVXPE_TX_MBUF(sc, q, tx->tx_dma);
2359 if (m != NULL) {
2360 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2361 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2362 bus_dmamap_sync(sc->sc_dmat,
2363 MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len,
2364 BUS_DMASYNC_POSTWRITE);
2365 bus_dmamap_unload(sc->sc_dmat,
2366 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2367 m_freem(m);
2368 sc->sc_tx_pending--;
2369 }
2370 else
2371 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2372 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2373 tx->tx_used--;
2374 if (error)
2375 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2376 else
2377 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2378 }
2379 KASSERT(tx->tx_used >= 0);
2380 KASSERT(tx->tx_used <= tx->tx_queue_len);
2381 while (ndesc > 255) {
2382 ptxsu = MVXPE_PTXSU_NORB(255);
2383 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2384 ndesc -= 255;
2385 }
2386 if (ndesc > 0) {
2387 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2388 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2389 }
2390 DPRINTSC(sc, 2,
2391 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2392 q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2393 }
2394
2395 /*
2396 * Rx Subroutines
2397 */
2398 STATIC void
2399 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2400 {
2401 int q, npkt;
2402
2403 KASSERT_SC_MTX(sc);
2404
2405 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2406 /* mutex is held by rx_queue_select */
2407 mvxpe_rx_queue(sc, q, npkt);
2408 mvxpe_rx_unlockq(sc, q);
2409 }
2410 }
2411
2412 STATIC void
2413 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2414 {
2415 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2416 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2417 struct mvxpe_rx_desc *r;
2418 struct mvxpbm_chunk *chunk;
2419 struct mbuf *m;
2420 uint32_t prxsu;
2421 int error = 0;
2422 int i;
2423
2424 KASSERT_RX_MTX(sc, q);
2425
2426 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2427 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2428
2429 for (i = 0; i < npkt; i++) {
2430 /* get descriptor and packet */
2431 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2432 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2433 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2434 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2435
2436 /* check errors */
2437 if (r->status & MVXPE_RX_ES) {
2438 switch (r->status & MVXPE_RX_EC_MASK) {
2439 case MVXPE_RX_EC_CE:
2440 DPRINTIFNET(ifp, 1, "CRC error\n");
2441 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2442 break;
2443 case MVXPE_RX_EC_OR:
2444 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2445 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2446 break;
2447 case MVXPE_RX_EC_MF:
2448 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2449 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2450 break;
2451 case MVXPE_RX_EC_RE:
2452 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2453 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2454 break;
2455 }
2456 error = 1;
2457 goto rx_done;
2458 }
2459 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2460 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2461 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2462 error = 1;
2463 goto rx_done;
2464 }
2465
2466 if (chunk == NULL) {
2467 device_printf(sc->sc_dev,
2468 "got rx interrupt, but no chunk\n");
2469 error = 1;
2470 goto rx_done;
2471 }
2472
2473 /* extract packet buffer */
2474 if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2475 error = 1;
2476 goto rx_done;
2477 }
2478 m = chunk->m;
2479 m_set_rcvif(m, ifp);
2480 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2481 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2482 mvxpe_rx_set_csumflag(ifp, r, m);
2483 if_percpuq_enqueue(ifp->if_percpuq, m);
2484 chunk = NULL; /* the BM chunk goes to networking stack now */
2485 rx_done:
2486 if (chunk) {
2487 /* rx error. just return the chunk to BM. */
2488 mvxpbm_free_chunk(chunk);
2489 }
2490 if (error)
2491 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2492 else
2493 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2494 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2495 }
2496 /* DMA status update */
2497 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2498 while (npkt > 255) {
2499 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2500 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2501 npkt -= 255;
2502 }
2503 if (npkt > 0) {
2504 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2505 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2506 }
2507
2508 DPRINTSC(sc, 2,
2509 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2510 DPRINTSC(sc, 2,
2511 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2512 DPRINTSC(sc, 2,
2513 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2514 DPRINTSC(sc, 2,
2515 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2516 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2517 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2518 rx->rx_cpu, rx->rx_dma);
2519 }
2520
2521 STATIC int
2522 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2523 {
2524 uint32_t prxs, npkt;
2525 int q;
2526
2527 KASSERT_SC_MTX(sc);
2528 KASSERT(queue != NULL);
2529 DPRINTSC(sc, 2, "selecting rx queue\n");
2530
2531 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2532 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2533 continue;
2534
2535 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2536 npkt = MVXPE_PRXS_GET_ODC(prxs);
2537 if (npkt == 0)
2538 continue;
2539
2540 DPRINTSC(sc, 2,
2541 "queue %d selected: prxs=%#x, %u pakcet received.\n",
2542 q, prxs, npkt);
2543 *queue = q;
2544 mvxpe_rx_lockq(sc, q);
2545 return npkt;
2546 }
2547
2548 return 0;
2549 }
2550
2551 STATIC void
2552 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2553 {
2554 int q;
2555
2556 KASSERT_SC_MTX(sc);
2557
2558 /* XXX: check rx bit array */
2559 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2560 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2561 continue;
2562
2563 mvxpe_rx_lockq(sc, q);
2564 mvxpe_rx_queue_refill(sc, q);
2565 mvxpe_rx_unlockq(sc, q);
2566 }
2567 }
2568
2569 STATIC void
2570 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2571 {
2572 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2573 uint32_t prxs, prxsu, ndesc;
2574 int idx, refill = 0;
2575 int npkt;
2576
2577 KASSERT_RX_MTX(sc, q);
2578
2579 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2580 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2581 refill = rx->rx_queue_len - ndesc;
2582 if (refill <= 0)
2583 return;
2584 DPRINTPRXS(2, q);
2585 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2586
2587 idx = rx->rx_cpu;
2588 for (npkt = 0; npkt < refill; npkt++)
2589 if (mvxpe_rx_queue_add(sc, q) != 0)
2590 break;
2591 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2592 if (npkt == 0)
2593 return;
2594
2595 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2597
2598 while (npkt > 255) {
2599 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2600 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2601 npkt -= 255;
2602 }
2603 if (npkt > 0) {
2604 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2605 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2606 }
2607 DPRINTPRXS(2, q);
2608 return;
2609 }
2610
2611 STATIC int
2612 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2613 {
2614 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2615 struct mvxpe_rx_desc *r;
2616 struct mvxpbm_chunk *chunk = NULL;
2617
2618 KASSERT_RX_MTX(sc, q);
2619
2620 /* Allocate the packet buffer */
2621 chunk = mvxpbm_alloc(sc->sc_bm);
2622 if (chunk == NULL) {
2623 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2624 return ENOBUFS;
2625 }
2626
2627 /* Add the packet to descritor */
2628 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2629 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2630 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2631
2632 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2633 r->bufptr = chunk->buf_pa;
2634 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2635 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2636 return 0;
2637 }
2638
2639 STATIC void
2640 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2641 struct mvxpe_rx_desc *r, struct mbuf *m0)
2642 {
2643 uint32_t csum_flags = 0;
2644
2645 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0)
2646 return; /* not a IP packet */
2647
2648 /* L3 */
2649 if (r->status & MVXPE_RX_L3_IP) {
2650 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx;
2651 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 &&
2652 (csum_flags & M_CSUM_IPv4) != 0) {
2653 csum_flags |= M_CSUM_IPv4_BAD;
2654 goto finish;
2655 }
2656 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2657 /*
2658 * r->l4chk has partial checksum of each framgment.
2659 * but there is no way to use it in NetBSD.
2660 */
2661 return;
2662 }
2663 }
2664
2665 /* L4 */
2666 switch (r->status & MVXPE_RX_L4_MASK) {
2667 case MVXPE_RX_L4_TCP:
2668 if (r->status & MVXPE_RX_L3_IP)
2669 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx;
2670 else
2671 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx;
2672 break;
2673 case MVXPE_RX_L4_UDP:
2674 if (r->status & MVXPE_RX_L3_IP)
2675 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx;
2676 else
2677 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx;
2678 break;
2679 case MVXPE_RX_L4_OTH:
2680 default:
2681 break;
2682 }
2683 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags &
2684 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0)
2685 csum_flags |= M_CSUM_TCP_UDP_BAD;
2686 finish:
2687 m0->m_pkthdr.csum_flags = csum_flags;
2688 }
2689
2690 /*
2691 * MAC address filter
2692 */
2693 STATIC uint8_t
2694 mvxpe_crc8(const uint8_t *data, size_t size)
2695 {
2696 int bit;
2697 uint8_t byte;
2698 uint8_t crc = 0;
2699 const uint8_t poly = 0x07;
2700
2701 while(size--)
2702 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2703 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2704
2705 return crc;
2706 }
2707
2708 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2709
2710 STATIC void
2711 mvxpe_filter_setup(struct mvxpe_softc *sc)
2712 {
2713 struct ethercom *ec = &sc->sc_ethercom;
2714 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2715 struct ether_multi *enm;
2716 struct ether_multistep step;
2717 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2718 uint32_t pxc;
2719 int i;
2720 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2721
2722 KASSERT_SC_MTX(sc);
2723
2724 memset(dfut, 0, sizeof(dfut));
2725 memset(dfsmt, 0, sizeof(dfsmt));
2726 memset(dfomt, 0, sizeof(dfomt));
2727
2728 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2729 goto allmulti;
2730 }
2731
2732 ETHER_FIRST_MULTI(step, ec, enm);
2733 while (enm != NULL) {
2734 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2735 /* ranges are complex and somewhat rare */
2736 goto allmulti;
2737 }
2738 /* chip handles some IPv4 multicast specially */
2739 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2740 i = enm->enm_addrlo[5];
2741 dfsmt[i>>2] |=
2742 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2743 } else {
2744 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2745 dfomt[i>>2] |=
2746 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2747 }
2748
2749 ETHER_NEXT_MULTI(step, enm);
2750 }
2751 goto set;
2752
2753 allmulti:
2754 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2755 for (i = 0; i < MVXPE_NDFSMT; i++) {
2756 dfsmt[i] = dfomt[i] =
2757 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2758 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2759 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2760 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2761 }
2762 }
2763
2764 set:
2765 pxc = MVXPE_READ(sc, MVXPE_PXC);
2766 pxc &= ~MVXPE_PXC_UPM;
2767 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2768 if (ifp->if_flags & IFF_BROADCAST) {
2769 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2770 }
2771 if (ifp->if_flags & IFF_PROMISC) {
2772 pxc |= MVXPE_PXC_UPM;
2773 }
2774 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2775
2776 /* Set Destination Address Filter Unicast Table */
2777 if (ifp->if_flags & IFF_PROMISC) {
2778 /* pass all unicast addresses */
2779 for (i = 0; i < MVXPE_NDFUT; i++) {
2780 dfut[i] =
2781 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2782 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2783 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2784 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2785 }
2786 }
2787 else {
2788 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2789 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2790 }
2791 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2792
2793 /* Set Destination Address Filter Multicast Tables */
2794 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2795 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2796 }
2797
2798 /*
2799 * sysctl(9)
2800 */
2801 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2802 {
2803 int rc;
2804 const struct sysctlnode *node;
2805
2806 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2807 0, CTLTYPE_NODE, "mvxpe",
2808 SYSCTL_DESCR("mvxpe interface controls"),
2809 NULL, 0, NULL, 0,
2810 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2811 goto err;
2812 }
2813
2814 mvxpe_root_num = node->sysctl_num;
2815 return;
2816
2817 err:
2818 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2819 }
2820
2821 STATIC int
2822 sysctl_read_mib(SYSCTLFN_ARGS)
2823 {
2824 struct mvxpe_sysctl_mib *arg;
2825 struct mvxpe_softc *sc;
2826 struct sysctlnode node;
2827 uint64_t val;
2828 int err;
2829
2830 node = *rnode;
2831 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2832 if (arg == NULL)
2833 return EINVAL;
2834
2835 sc = arg->sc;
2836 if (sc == NULL)
2837 return EINVAL;
2838 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2839 return EINVAL;
2840
2841 mvxpe_sc_lock(sc);
2842 val = arg->counter;
2843 mvxpe_sc_unlock(sc);
2844
2845 node.sysctl_data = &val;
2846 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2847 if (err)
2848 return err;
2849 if (newp)
2850 return EINVAL;
2851
2852 return 0;
2853 }
2854
2855
2856 STATIC int
2857 sysctl_clear_mib(SYSCTLFN_ARGS)
2858 {
2859 struct mvxpe_softc *sc;
2860 struct sysctlnode node;
2861 int val;
2862 int err;
2863
2864 node = *rnode;
2865 sc = (struct mvxpe_softc *)rnode->sysctl_data;
2866 if (sc == NULL)
2867 return EINVAL;
2868
2869 val = 0;
2870 node.sysctl_data = &val;
2871 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2872 if (err || newp == NULL)
2873 return err;
2874 if (val < 0 || val > 1)
2875 return EINVAL;
2876 if (val == 1) {
2877 mvxpe_sc_lock(sc);
2878 mvxpe_clear_mib(sc);
2879 mvxpe_sc_unlock(sc);
2880 }
2881
2882 return 0;
2883 }
2884
2885 STATIC int
2886 sysctl_set_queue_length(SYSCTLFN_ARGS)
2887 {
2888 struct mvxpe_sysctl_queue *arg;
2889 struct mvxpe_rx_ring *rx = NULL;
2890 struct mvxpe_tx_ring *tx = NULL;
2891 struct mvxpe_softc *sc;
2892 struct sysctlnode node;
2893 uint32_t reg;
2894 int val;
2895 int err;
2896
2897 node = *rnode;
2898
2899 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2900 if (arg == NULL)
2901 return EINVAL;
2902 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2903 return EINVAL;
2904 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2905 return EINVAL;
2906
2907 sc = arg->sc;
2908 if (sc == NULL)
2909 return EINVAL;
2910
2911 /* read queue length */
2912 mvxpe_sc_lock(sc);
2913 switch (arg->rxtx) {
2914 case MVXPE_SYSCTL_RX:
2915 mvxpe_rx_lockq(sc, arg->queue);
2916 rx = MVXPE_RX_RING(sc, arg->queue);
2917 val = rx->rx_queue_len;
2918 mvxpe_rx_unlockq(sc, arg->queue);
2919 break;
2920 case MVXPE_SYSCTL_TX:
2921 mvxpe_tx_lockq(sc, arg->queue);
2922 tx = MVXPE_TX_RING(sc, arg->queue);
2923 val = tx->tx_queue_len;
2924 mvxpe_tx_unlockq(sc, arg->queue);
2925 break;
2926 }
2927
2928 node.sysctl_data = &val;
2929 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2930 if (err || newp == NULL) {
2931 mvxpe_sc_unlock(sc);
2932 return err;
2933 }
2934
2935 /* update queue length */
2936 if (val < 8 || val > MVXPE_RX_RING_CNT) {
2937 mvxpe_sc_unlock(sc);
2938 return EINVAL;
2939 }
2940 switch (arg->rxtx) {
2941 case MVXPE_SYSCTL_RX:
2942 mvxpe_rx_lockq(sc, arg->queue);
2943 rx->rx_queue_len = val;
2944 rx->rx_queue_th_received =
2945 rx->rx_queue_len / MVXPE_RXTH_RATIO;
2946 rx->rx_queue_th_free =
2947 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2948
2949 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2950 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2951 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2952
2953 mvxpe_rx_unlockq(sc, arg->queue);
2954 break;
2955 case MVXPE_SYSCTL_TX:
2956 mvxpe_tx_lockq(sc, arg->queue);
2957 tx->tx_queue_len = val;
2958 tx->tx_queue_th_free =
2959 tx->tx_queue_len / MVXPE_TXTH_RATIO;
2960
2961 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2962 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2963 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2964
2965 mvxpe_tx_unlockq(sc, arg->queue);
2966 break;
2967 }
2968 mvxpe_sc_unlock(sc);
2969
2970 return 0;
2971 }
2972
2973 STATIC int
2974 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2975 {
2976 struct mvxpe_sysctl_queue *arg;
2977 struct mvxpe_rx_ring *rx = NULL;
2978 struct mvxpe_softc *sc;
2979 struct sysctlnode node;
2980 extern uint32_t mvTclk;
2981 uint32_t reg, time_mvtclk;
2982 int time_us;
2983 int err;
2984
2985 node = *rnode;
2986
2987 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2988 if (arg == NULL)
2989 return EINVAL;
2990 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2991 return EINVAL;
2992 if (arg->rxtx != MVXPE_SYSCTL_RX)
2993 return EINVAL;
2994
2995 sc = arg->sc;
2996 if (sc == NULL)
2997 return EINVAL;
2998
2999 /* read queue length */
3000 mvxpe_sc_lock(sc);
3001 mvxpe_rx_lockq(sc, arg->queue);
3002 rx = MVXPE_RX_RING(sc, arg->queue);
3003 time_mvtclk = rx->rx_queue_th_time;
3004 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
3005 node.sysctl_data = &time_us;
3006 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
3007 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
3008 err = sysctl_lookup(SYSCTLFN_CALL(&node));
3009 if (err || newp == NULL) {
3010 mvxpe_rx_unlockq(sc, arg->queue);
3011 mvxpe_sc_unlock(sc);
3012 return err;
3013 }
3014
3015 /* update queue length (0[sec] - 1[sec]) */
3016 if (time_us < 0 || time_us > (1000 * 1000)) {
3017 mvxpe_rx_unlockq(sc, arg->queue);
3018 mvxpe_sc_unlock(sc);
3019 return EINVAL;
3020 }
3021 time_mvtclk =
3022 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
3023 rx->rx_queue_th_time = time_mvtclk;
3024 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
3025 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
3026 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
3027 mvxpe_rx_unlockq(sc, arg->queue);
3028 mvxpe_sc_unlock(sc);
3029
3030 return 0;
3031 }
3032
3033
3034 STATIC void
3035 sysctl_mvxpe_init(struct mvxpe_softc *sc)
3036 {
3037 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3038 const struct sysctlnode *node;
3039 int mvxpe_nodenum;
3040 int mvxpe_mibnum;
3041 int mvxpe_rxqueuenum;
3042 int mvxpe_txqueuenum;
3043 int q, i;
3044
3045 /* hw.mvxpe.mvxpe[unit] */
3046 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3047 0, CTLTYPE_NODE, ifp->if_xname,
3048 SYSCTL_DESCR("mvxpe per-controller controls"),
3049 NULL, 0, NULL, 0,
3050 CTL_HW, mvxpe_root_num, CTL_CREATE,
3051 CTL_EOL) != 0) {
3052 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3053 return;
3054 }
3055 mvxpe_nodenum = node->sysctl_num;
3056
3057 /* hw.mvxpe.mvxpe[unit].mib */
3058 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3059 0, CTLTYPE_NODE, "mib",
3060 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3061 NULL, 0, NULL, 0,
3062 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3063 CTL_EOL) != 0) {
3064 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3065 return;
3066 }
3067 mvxpe_mibnum = node->sysctl_num;
3068
3069 /* hw.mvxpe.mvxpe[unit].rx */
3070 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3071 0, CTLTYPE_NODE, "rx",
3072 SYSCTL_DESCR("Rx Queues"),
3073 NULL, 0, NULL, 0,
3074 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3075 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3076 return;
3077 }
3078 mvxpe_rxqueuenum = node->sysctl_num;
3079
3080 /* hw.mvxpe.mvxpe[unit].tx */
3081 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3082 0, CTLTYPE_NODE, "tx",
3083 SYSCTL_DESCR("Tx Queues"),
3084 NULL, 0, NULL, 0,
3085 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3086 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3087 return;
3088 }
3089 mvxpe_txqueuenum = node->sysctl_num;
3090
3091 #ifdef MVXPE_DEBUG
3092 /* hw.mvxpe.debug */
3093 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3094 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3095 SYSCTL_DESCR("mvxpe device driver debug control"),
3096 NULL, 0, &mvxpe_debug, 0,
3097 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3098 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3099 return;
3100 }
3101 #endif
3102 /*
3103 * MIB access
3104 */
3105 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3106 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3107 const char *name = mvxpe_mib_list[i].sysctl_name;
3108 const char *desc = mvxpe_mib_list[i].desc;
3109 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3110
3111 mib_arg->sc = sc;
3112 mib_arg->index = i;
3113 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3114 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3115 sysctl_read_mib, 0, (void *)mib_arg, 0,
3116 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3117 CTL_CREATE, CTL_EOL) != 0) {
3118 aprint_normal_dev(sc->sc_dev,
3119 "couldn't create sysctl node\n");
3120 break;
3121 }
3122 }
3123
3124 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3125 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3126 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3127 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3128 static const char *sysctl_queue_names[] = {
3129 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3130 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3131 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3132 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3133 };
3134 #undef MVXPE_SYSCTL_NAME
3135 #ifdef SYSCTL_INCLUDE_DESCR
3136 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3137 static const char *sysctl_queue_descrs[] = {
3138 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1),
3139 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3),
3140 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5),
3141 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7),
3142 };
3143 #undef MVXPE_SYSCTL_DESCR
3144 #endif /* SYSCTL_INCLUDE_DESCR */
3145 int mvxpe_curnum;
3146
3147 rxarg->sc = txarg->sc = sc;
3148 rxarg->queue = txarg->queue = q;
3149 rxarg->rxtx = MVXPE_SYSCTL_RX;
3150 txarg->rxtx = MVXPE_SYSCTL_TX;
3151
3152 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3153 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3154 0, CTLTYPE_NODE,
3155 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3156 NULL, 0, NULL, 0,
3157 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3158 CTL_CREATE, CTL_EOL) != 0) {
3159 aprint_normal_dev(sc->sc_dev,
3160 "couldn't create sysctl node\n");
3161 break;
3162 }
3163 mvxpe_curnum = node->sysctl_num;
3164
3165 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3166 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3167 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3168 SYSCTL_DESCR("maximum length of the queue"),
3169 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3170 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3171 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3172 aprint_normal_dev(sc->sc_dev,
3173 "couldn't create sysctl node\n");
3174 break;
3175 }
3176
3177 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3178 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3179 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3180 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3181 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3182 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3183 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3184 aprint_normal_dev(sc->sc_dev,
3185 "couldn't create sysctl node\n");
3186 break;
3187 }
3188
3189 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3190 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3191 0, CTLTYPE_NODE,
3192 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3193 NULL, 0, NULL, 0,
3194 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3195 CTL_CREATE, CTL_EOL) != 0) {
3196 aprint_normal_dev(sc->sc_dev,
3197 "couldn't create sysctl node\n");
3198 break;
3199 }
3200 mvxpe_curnum = node->sysctl_num;
3201
3202 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3203 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3204 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3205 SYSCTL_DESCR("maximum length of the queue"),
3206 sysctl_set_queue_length, 0, (void *)txarg, 0,
3207 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3208 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3209 aprint_normal_dev(sc->sc_dev,
3210 "couldn't create sysctl node\n");
3211 break;
3212 }
3213 }
3214
3215 /* hw.mvxpe.mvxpe[unit].clear_mib */
3216 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3217 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3218 SYSCTL_DESCR("mvxpe device driver debug control"),
3219 sysctl_clear_mib, 0, (void *)sc, 0,
3220 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3221 CTL_EOL) != 0) {
3222 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3223 return;
3224 }
3225
3226 }
3227
3228 /*
3229 * MIB
3230 */
3231 STATIC void
3232 mvxpe_clear_mib(struct mvxpe_softc *sc)
3233 {
3234 int i;
3235
3236 KASSERT_SC_MTX(sc);
3237
3238 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3239 if (mvxpe_mib_list[i].reg64)
3240 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3241 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3242 sc->sc_sysctl_mib[i].counter = 0;
3243 }
3244 }
3245
3246 STATIC void
3247 mvxpe_update_mib(struct mvxpe_softc *sc)
3248 {
3249 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3250 int i;
3251
3252 KASSERT_SC_MTX(sc);
3253
3254 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3255 uint32_t val_hi;
3256 uint32_t val_lo;
3257 uint64_t val;
3258
3259 if (mvxpe_mib_list[i].reg64) {
3260 /* XXX: implement bus_space_read_8() */
3261 val_lo = MVXPE_READ_MIB(sc,
3262 (mvxpe_mib_list[i].regnum + 4));
3263 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3264 }
3265 else {
3266 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3267 val_hi = 0;
3268 }
3269
3270 if ((val_lo | val_hi) == 0)
3271 continue;
3272
3273 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3274 sc->sc_sysctl_mib[i].counter += val;
3275
3276 switch (mvxpe_mib_list[i].ext) {
3277 case MVXPE_MIBEXT_IF_OERRORS:
3278 ifp->if_oerrors += val;
3279 break;
3280 case MVXPE_MIBEXT_IF_IERRORS:
3281 ifp->if_ierrors += val;
3282 break;
3283 case MVXPE_MIBEXT_IF_COLLISIONS:
3284 ifp->if_collisions += val;
3285 break;
3286 default:
3287 break;
3288 }
3289
3290 }
3291 }
3292
3293 /*
3294 * for Debug
3295 */
3296 STATIC void
3297 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3298 {
3299 #define DESC_PRINT(X) \
3300 if (X) \
3301 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3302
3303 DESC_PRINT(desc->command);
3304 DESC_PRINT(desc->l4ichk);
3305 DESC_PRINT(desc->bytecnt);
3306 DESC_PRINT(desc->bufptr);
3307 DESC_PRINT(desc->flags);
3308 #undef DESC_PRINT
3309 }
3310
3311 STATIC void
3312 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3313 {
3314 #define DESC_PRINT(X) \
3315 if (X) \
3316 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3317
3318 DESC_PRINT(desc->status);
3319 DESC_PRINT(desc->bytecnt);
3320 DESC_PRINT(desc->bufptr);
3321 DESC_PRINT(desc->l4chk);
3322 #undef DESC_PRINT
3323 }
3324