if_mvxpe.c revision 1.13 1 /* $NetBSD: if_mvxpe.c,v 1.13 2016/06/10 13:27:14 ozaki-r Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.13 2016/06/10 13:27:14 ozaki-r Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/mvxpbmvar.h>
62 #include <dev/marvell/if_mvxpereg.h>
63 #include <dev/marvell/if_mvxpevar.h>
64
65 #include "locators.h"
66
67 #if BYTE_ORDER == BIG_ENDIAN
68 #error "BIG ENDIAN not supported"
69 #endif
70
71 #ifdef MVXPE_DEBUG
72 #define STATIC /* nothing */
73 #else
74 #define STATIC static
75 #endif
76
77 /* autoconf(9) */
78 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79 STATIC void mvxpe_attach(device_t, device_t, void *);
80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82 mvxpe_match, mvxpe_attach, NULL, NULL);
83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86 /* MII */
87 STATIC int mvxpe_miibus_readreg(device_t, int, int);
88 STATIC void mvxpe_miibus_writereg(device_t, int, int, int);
89 STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91 /* Addres Decoding Window */
92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94 /* Device Register Initialization */
95 STATIC int mvxpe_initreg(struct ifnet *);
96
97 /* Descriptor Ring Control for each of queues */
98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106 /* Rx/Tx Queue Control */
107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116 /* Interrupt Handlers */
117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119 STATIC int mvxpe_rxtxth_intr(void *);
120 STATIC int mvxpe_misc_intr(void *);
121 STATIC int mvxpe_rxtx_intr(void *);
122 STATIC void mvxpe_tick(void *);
123
124 /* struct ifnet and mii callbacks*/
125 STATIC void mvxpe_start(struct ifnet *);
126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127 STATIC int mvxpe_init(struct ifnet *);
128 STATIC void mvxpe_stop(struct ifnet *, int);
129 STATIC void mvxpe_watchdog(struct ifnet *);
130 STATIC int mvxpe_ifflags_cb(struct ethercom *);
131 STATIC int mvxpe_mediachange(struct ifnet *);
132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134 /* Link State Notify */
135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136 STATIC void mvxpe_linkup(struct mvxpe_softc *);
137 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140 /* Tx Subroutines */
141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144 struct mvxpe_tx_desc *, struct mbuf *);
145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148 /* Rx Subroutines */
149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156 struct mvxpe_rx_desc *, struct mbuf *);
157
158 /* MAC address filter */
159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162 /* sysctl(9) */
163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169 /* MIB */
170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173 /* for Debug */
174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177 STATIC int mvxpe_root_num;
178 STATIC kmutex_t mii_mutex;
179 STATIC int mii_init = 0;
180 #ifdef MVXPE_DEBUG
181 STATIC int mvxpe_debug = MVXPE_DEBUG;
182 #endif
183
184 /*
185 * List of MIB register and names
186 */
187 STATIC struct mvxpe_mib_def {
188 uint32_t regnum;
189 int reg64;
190 const char *sysctl_name;
191 const char *desc;
192 int ext;
193 #define MVXPE_MIBEXT_IF_OERRORS 1
194 #define MVXPE_MIBEXT_IF_IERRORS 2
195 #define MVXPE_MIBEXT_IF_COLLISIONS 3
196 } mvxpe_mib_list[] = {
197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
198 "Good Octets Rx", 0},
199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
200 "Bad Octets Rx", 0},
201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err",
202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS},
203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
204 "Good Frames Rx", 0},
205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
206 "Bad Frames Rx", 0},
207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
208 "Broadcast Frames Rx", 0},
209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
210 "Multicast Frames Rx", 0},
211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
212 "Frame Size 1 - 64", 0},
213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
214 "Frame Size 65 - 127", 0},
215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
216 "Frame Size 128 - 255", 0},
217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
218 "Frame Size 256 - 511"},
219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
220 "Frame Size 512 - 1023", 0},
221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max",
222 "Frame Size 1024 - Max", 0},
223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
224 "Good Octets Tx", 0},
225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
226 "Good Frames Tx", 0},
227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS},
229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
230 "Multicast Frames Tx"},
231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
232 "Broadcast Frames Tx"},
233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
234 "Unknown MAC Control", 0},
235 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
236 "Flow Control Tx", 0},
237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
238 "Good Flow Control Rx", 0},
239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
240 "Bad Flow Control Rx", 0},
241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS},
251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS},
253 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS},
255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS},
257 };
258
259 /*
260 * autoconf(9)
261 */
262 /* ARGSUSED */
263 STATIC int
264 mvxpe_match(device_t parent, cfdata_t match, void *aux)
265 {
266 struct marvell_attach_args *mva = aux;
267 bus_size_t pv_off;
268 uint32_t pv;
269
270 if (strcmp(mva->mva_name, match->cf_name) != 0)
271 return 0;
272 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
273 return 0;
274
275 /* check port version */
276 pv_off = mva->mva_offset + MVXPE_PV;
277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
278 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
279 return 0; /* old version is not supported */
280
281 return 1;
282 }
283
284 /* ARGSUSED */
285 STATIC void
286 mvxpe_attach(device_t parent, device_t self, void *aux)
287 {
288 struct mvxpe_softc *sc = device_private(self);
289 struct mii_softc *mii;
290 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
291 struct marvell_attach_args *mva = aux;
292 prop_dictionary_t dict;
293 prop_data_t enaddrp = NULL;
294 uint32_t phyaddr, maddrh, maddrl;
295 uint8_t enaddr[ETHER_ADDR_LEN];
296 int q;
297
298 aprint_naive("\n");
299 aprint_normal(": Marvell ARMADA GbE Controller\n");
300 memset(sc, 0, sizeof(*sc));
301 sc->sc_dev = self;
302 sc->sc_port = mva->mva_unit;
303 sc->sc_iot = mva->mva_iot;
304 sc->sc_dmat = mva->mva_dmat;
305 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
306 callout_init(&sc->sc_tick_ch, 0);
307 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
308
309 /*
310 * BUS space
311 */
312 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
313 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
314 aprint_error_dev(self, "Cannot map registers\n");
315 goto fail;
316 }
317 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
318 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
319 &sc->sc_mibh)) {
320 aprint_error_dev(self,
321 "Cannot map destination address filter registers\n");
322 goto fail;
323 }
324 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
325 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
326
327 /*
328 * Buffer Manager(BM) subsystem.
329 */
330 sc->sc_bm = mvxpbm_device(mva);
331 if (sc->sc_bm == NULL) {
332 aprint_error_dev(self, "no Buffer Manager.\n");
333 goto fail;
334 }
335 aprint_normal_dev(self,
336 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
337 aprint_normal_dev(sc->sc_dev,
338 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
339 mvxpbm_buf_size(sc->sc_bm) / 1024,
340 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
341
342 /*
343 * make sure DMA engines are in reset state
344 */
345 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
346 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
347
348 /*
349 * Address decoding window
350 */
351 mvxpe_wininit(sc, mva->mva_tags);
352
353 /*
354 * MAC address
355 */
356 dict = device_properties(self);
357 if (dict)
358 enaddrp = prop_dictionary_get(dict, "mac-address");
359 if (enaddrp) {
360 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
361 maddrh = enaddr[0] << 24;
362 maddrh |= enaddr[1] << 16;
363 maddrh |= enaddr[2] << 8;
364 maddrh |= enaddr[3];
365 maddrl = enaddr[4] << 8;
366 maddrl |= enaddr[5];
367 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
368 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
369 }
370 else {
371 /*
372 * even if enaddr is not found in dictionary,
373 * the port may be initialized by IPL program such as U-BOOT.
374 */
375 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
376 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
377 if ((maddrh | maddrl) == 0) {
378 aprint_error_dev(self, "No Ethernet address\n");
379 return;
380 }
381 }
382 sc->sc_enaddr[0] = maddrh >> 24;
383 sc->sc_enaddr[1] = maddrh >> 16;
384 sc->sc_enaddr[2] = maddrh >> 8;
385 sc->sc_enaddr[3] = maddrh >> 0;
386 sc->sc_enaddr[4] = maddrl >> 8;
387 sc->sc_enaddr[5] = maddrl >> 0;
388 aprint_normal_dev(self, "Ethernet address %s\n",
389 ether_sprintf(sc->sc_enaddr));
390
391 /*
392 * Register interrupt handlers
393 * XXX: handle Ethernet unit intr. and Error intr.
394 */
395 mvxpe_disable_intr(sc);
396 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
397
398 /*
399 * MIB buffer allocation
400 */
401 sc->sc_sysctl_mib_size =
402 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
403 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
404 if (sc->sc_sysctl_mib == NULL)
405 goto fail;
406 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
407
408 /*
409 * Device DMA Buffer allocation
410 */
411 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
412 if (mvxpe_ring_alloc_queue(sc, q) != 0)
413 goto fail;
414 mvxpe_ring_init_queue(sc, q);
415 }
416
417 /*
418 * We can support 802.1Q VLAN-sized frames and jumbo
419 * Ethernet frames.
420 */
421 sc->sc_ethercom.ec_capabilities |=
422 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
423 ifp->if_softc = sc;
424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
425 ifp->if_start = mvxpe_start;
426 ifp->if_ioctl = mvxpe_ioctl;
427 ifp->if_init = mvxpe_init;
428 ifp->if_stop = mvxpe_stop;
429 ifp->if_watchdog = mvxpe_watchdog;
430
431 /*
432 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
433 */
434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
435 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
437 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
439 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
441 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
443 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
444
445 /*
446 * Initialize struct ifnet
447 */
448 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
449 IFQ_SET_READY(&ifp->if_snd);
450 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
451
452 /*
453 * Enable DMA engines and Initiazlie Device Regisers.
454 */
455 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
456 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
457 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
458 mvxpe_sc_lock(sc); /* XXX */
459 mvxpe_filter_setup(sc);
460 mvxpe_sc_unlock(sc);
461 mvxpe_initreg(ifp);
462
463 /*
464 * Now MAC is working, setup MII.
465 */
466 if (mii_init == 0) {
467 /*
468 * MII bus is shared by all MACs and all PHYs in SoC.
469 * serializing the bus access should be safe.
470 */
471 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
472 mii_init = 1;
473 }
474 sc->sc_mii.mii_ifp = ifp;
475 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg;
476 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg;
477 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg;
478
479 sc->sc_ethercom.ec_mii = &sc->sc_mii;
480 ifmedia_init(&sc->sc_mii.mii_media, 0,
481 mvxpe_mediachange, mvxpe_mediastatus);
482 /*
483 * XXX: phy addressing highly depends on Board Design.
484 * we assume phyaddress == MAC unit number here,
485 * but some boards may not.
486 */
487 mii_attach(self, &sc->sc_mii, 0xffffffff,
488 MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
489 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
490 if (mii == NULL) {
491 aprint_error_dev(self, "no PHY found!\n");
492 ifmedia_add(&sc->sc_mii.mii_media,
493 IFM_ETHER|IFM_MANUAL, 0, NULL);
494 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
495 } else {
496 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
497 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
498 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
499 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
500 }
501
502 /*
503 * Call MI attach routines.
504 */
505 if_attach(ifp);
506
507 ether_ifattach(ifp, sc->sc_enaddr);
508 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
509
510 sysctl_mvxpe_init(sc);
511 mvxpe_evcnt_attach(sc);
512 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
513 RND_TYPE_NET, RND_FLAG_DEFAULT);
514
515 return;
516
517 fail:
518 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
519 mvxpe_ring_dealloc_queue(sc, q);
520 if (sc->sc_sysctl_mib)
521 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
522
523 return;
524 }
525
526 STATIC int
527 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
528 {
529 #ifdef MVXPE_EVENT_COUNTERS
530 int q;
531
532 /* Master Interrupt Handler */
533 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
534 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
535 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
536 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
537 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
538 NULL, device_xname(sc->sc_dev), "MISC Intr.");
539
540 /* RXTXTH Interrupt */
541 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
542 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
543
544 /* MISC Interrupt */
545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
546 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
548 NULL, device_xname(sc->sc_dev), "MISC link status changed");
549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
550 NULL, device_xname(sc->sc_dev), "MISC internal address error");
551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
552 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
554 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
556 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
557 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
558 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
559 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
560 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
561 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
562 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
563 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
564 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
565
566 /* RxTx Interrupt */
567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
568 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
570 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet");
571 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
572 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
573 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
574 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
575 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
576 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
577 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
578 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
579
580 /* Link */
581 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
582 NULL, device_xname(sc->sc_dev), "link up");
583 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
584 NULL, device_xname(sc->sc_dev), "link down");
585
586 /* Rx Descriptor */
587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
588 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
589 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
590 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
591 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
592 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
593 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
594 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
595 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
596 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
597
598 /* Tx Descriptor */
599 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
600 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
601 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
602 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
603 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
604 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
605 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
606 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
607
608 /* Status Registers */
609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
610 NULL, device_xname(sc->sc_dev), "Rx discard counter");
611 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
612 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
613 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
614 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
615 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
616 NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
617 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
618 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
619
620 /* Device Driver Errors */
621 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
622 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
623 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
624 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
625 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
626 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
627 static const char *rxq_desc[] = {
628 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
629 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
630 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
631 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
632 };
633 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
634 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
635 }
636 #undef MVXPE_QUEUE_DESC
637 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
638 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
639 static const char *txq_desc[] = {
640 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
641 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
642 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
643 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
644 };
645 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
646 NULL, device_xname(sc->sc_dev), txq_desc[q]);
647 }
648 #undef MVXPE_QUEUE_DESC
649 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
650 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
651 static const char *rxqe_desc[] = {
652 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
653 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
654 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
655 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
656 };
657 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
658 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
659 }
660 #undef MVXPE_QUEUE_DESC
661 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
662 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
663 static const char *txqe_desc[] = {
664 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
665 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
666 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
667 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
668 };
669 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
670 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
671 }
672 #undef MVXPE_QUEUE_DESC
673
674 #endif /* MVXPE_EVENT_COUNTERS */
675 return 0;
676 }
677
678 STATIC void
679 mvxpe_sc_lock(struct mvxpe_softc *sc)
680 {
681 mutex_enter(&sc->sc_mtx);
682 }
683
684 STATIC void
685 mvxpe_sc_unlock(struct mvxpe_softc *sc)
686 {
687 mutex_exit(&sc->sc_mtx);
688 }
689
690 /*
691 * MII
692 */
693 STATIC int
694 mvxpe_miibus_readreg(device_t dev, int phy, int reg)
695 {
696 struct mvxpe_softc *sc = device_private(dev);
697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
698 uint32_t smi, val;
699 int i;
700
701 mutex_enter(&mii_mutex);
702
703 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
704 DELAY(1);
705 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
706 break;
707 }
708 if (i == MVXPE_PHY_TIMEOUT) {
709 aprint_error_ifnet(ifp, "SMI busy timeout\n");
710 mutex_exit(&mii_mutex);
711 return -1;
712 }
713
714 smi =
715 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
716 MVXPE_WRITE(sc, MVXPE_SMI, smi);
717
718 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
719 DELAY(1);
720 smi = MVXPE_READ(sc, MVXPE_SMI);
721 if (smi & MVXPE_SMI_READVALID)
722 break;
723 }
724
725 mutex_exit(&mii_mutex);
726
727 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
728
729 val = smi & MVXPE_SMI_DATA_MASK;
730
731 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#x\n", phy, reg, val);
732
733 return val;
734 }
735
736 STATIC void
737 mvxpe_miibus_writereg(device_t dev, int phy, int reg, int val)
738 {
739 struct mvxpe_softc *sc = device_private(dev);
740 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
741 uint32_t smi;
742 int i;
743
744 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#x\n", phy, reg, val);
745
746 mutex_enter(&mii_mutex);
747
748 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
749 DELAY(1);
750 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
751 break;
752 }
753 if (i == MVXPE_PHY_TIMEOUT) {
754 aprint_error_ifnet(ifp, "SMI busy timeout\n");
755 mutex_exit(&mii_mutex);
756 return;
757 }
758
759 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
760 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
761 MVXPE_WRITE(sc, MVXPE_SMI, smi);
762
763 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
764 DELAY(1);
765 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
766 break;
767 }
768
769 mutex_exit(&mii_mutex);
770
771 if (i == MVXPE_PHY_TIMEOUT)
772 aprint_error_ifnet(ifp, "phy write timed out\n");
773 }
774
775 STATIC void
776 mvxpe_miibus_statchg(struct ifnet *ifp)
777 {
778
779 /* nothing to do */
780 }
781
782 /*
783 * Address Decoding Window
784 */
785 STATIC void
786 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
787 {
788 device_t pdev = device_parent(sc->sc_dev);
789 uint64_t base;
790 uint32_t en, ac, size;
791 int window, target, attr, rv, i;
792
793 /* First disable all address decode windows */
794 en = MVXPE_BARE_EN_MASK;
795 MVXPE_WRITE(sc, MVXPE_BARE, en);
796
797 ac = 0;
798 for (window = 0, i = 0;
799 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
800 rv = marvell_winparams_by_tag(pdev, tags[i],
801 &target, &attr, &base, &size);
802 if (rv != 0 || size == 0)
803 continue;
804
805 if (base > 0xffffffffULL) {
806 if (window >= MVXPE_NREMAP) {
807 aprint_error_dev(sc->sc_dev,
808 "can't remap window %d\n", window);
809 continue;
810 }
811 MVXPE_WRITE(sc, MVXPE_HA(window),
812 (base >> 32) & 0xffffffff);
813 }
814
815 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
816 MVXPE_BASEADDR_TARGET(target) |
817 MVXPE_BASEADDR_ATTR(attr) |
818 MVXPE_BASEADDR_BASE(base));
819 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
820
821 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
822 window, base, size);
823
824 en &= ~(1 << window);
825 /* set full access (r/w) */
826 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
827 window++;
828 }
829 /* allow to access decode window */
830 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
831
832 MVXPE_WRITE(sc, MVXPE_BARE, en);
833 }
834
835 /*
836 * Device Register Initialization
837 * reset device registers to device driver default value.
838 * the device is not enabled here.
839 */
840 STATIC int
841 mvxpe_initreg(struct ifnet *ifp)
842 {
843 struct mvxpe_softc *sc = ifp->if_softc;
844 int serdes = 0;
845 uint32_t reg;
846 int q, i;
847
848 DPRINTIFNET(ifp, 1, "initializing device register\n");
849
850 /* Init TX/RX Queue Registers */
851 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
852 mvxpe_rx_lockq(sc, q);
853 if (mvxpe_rx_queue_init(ifp, q) != 0) {
854 aprint_error_ifnet(ifp,
855 "initialization failed: cannot initialize queue\n");
856 mvxpe_rx_unlockq(sc, q);
857 return ENOBUFS;
858 }
859 mvxpe_rx_unlockq(sc, q);
860
861 mvxpe_tx_lockq(sc, q);
862 if (mvxpe_tx_queue_init(ifp, q) != 0) {
863 aprint_error_ifnet(ifp,
864 "initialization failed: cannot initialize queue\n");
865 mvxpe_tx_unlockq(sc, q);
866 return ENOBUFS;
867 }
868 mvxpe_tx_unlockq(sc, q);
869 }
870
871 /* Tx MTU Limit */
872 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
873
874 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
875 reg = MVXPE_READ(sc, MVXPE_PMACC0);
876 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
877 serdes = 1;
878
879 /* Ethernet Unit Control */
880 reg = MVXPE_READ(sc, MVXPE_EUC);
881 reg |= MVXPE_EUC_POLLING;
882 MVXPE_WRITE(sc, MVXPE_EUC, reg);
883
884 /* Auto Negotiation */
885 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
886 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
887 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
888 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
889 if (serdes) {
890 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
891 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
892 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
893 }
894 MVXPE_WRITE(sc, MVXPE_PANC, reg);
895
896 /* EEE: Low Power Idle */
897 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
898 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
899 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
900
901 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
902 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
903
904 reg = MVXPE_LPIC2_MUSTSET;
905 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
906
907 /* Port MAC Control set 0 */
908 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
909 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
910 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
911 if (serdes)
912 reg |= MVXPE_PMACC0_PORTTYPE;
913 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
914
915 /* Port MAC Control set 1 is only used for loop-back test */
916
917 /* Port MAC Control set 2 */
918 reg = MVXPE_READ(sc, MVXPE_PMACC2);
919 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
920 reg |= MVXPE_PMACC2_MUSTSET;
921 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
922
923 /* Port MAC Control set 3 is used for IPG tune */
924
925 /* Port MAC Control set 4 is not used */
926
927 /* Port Configuration */
928 /* Use queue 0 only */
929 reg = MVXPE_READ(sc, MVXPE_PXC);
930 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK |
931 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK);
932 MVXPE_WRITE(sc, MVXPE_PXC, reg);
933
934 /* Port Configuration Extended: enable Tx CRC generation */
935 reg = MVXPE_READ(sc, MVXPE_PXCX);
936 reg &= ~MVXPE_PXCX_TXCRCDIS;
937 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
938
939 /* clear MIB counter registers(clear by read) */
940 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
941 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
942
943 /* Set SDC register except IPGINT bits */
944 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
945 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
946 reg |= MVXPE_SDC_BLMR;
947 reg |= MVXPE_SDC_BLMT;
948 MVXPE_WRITE(sc, MVXPE_SDC, reg);
949
950 return 0;
951 }
952
953 /*
954 * Descriptor Ring Controls for each of queues
955 */
956 STATIC void *
957 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
958 {
959 bus_dma_segment_t segs;
960 void *kva = NULL;
961 int nsegs;
962
963 /*
964 * Allocate the descriptor queues.
965 * struct mvxpe_ring_data contians array of descriptor per queue.
966 */
967 if (bus_dmamem_alloc(sc->sc_dmat,
968 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
969 aprint_error_dev(sc->sc_dev,
970 "can't alloc device memory (%zu bytes)\n", size);
971 return NULL;
972 }
973 if (bus_dmamem_map(sc->sc_dmat,
974 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
975 aprint_error_dev(sc->sc_dev,
976 "can't map dma buffers (%zu bytes)\n", size);
977 goto fail1;
978 }
979
980 if (bus_dmamap_create(sc->sc_dmat,
981 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
982 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
983 goto fail2;
984 }
985 if (bus_dmamap_load(sc->sc_dmat,
986 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
987 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
988 goto fail3;
989 }
990 memset(kva, 0, size);
991 return kva;
992
993 fail3:
994 bus_dmamap_destroy(sc->sc_dmat, *map);
995 memset(map, 0, sizeof(*map));
996 fail2:
997 bus_dmamem_unmap(sc->sc_dmat, kva, size);
998 fail1:
999 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
1000 return NULL;
1001 }
1002
1003 STATIC int
1004 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
1005 {
1006 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1007 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1008
1009 /*
1010 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1011 * queue length. real queue length is limited by
1012 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1013 *
1014 * because descriptor ring reallocation needs reprogramming of
1015 * DMA registers, we allocate enough descriptor for hard limit
1016 * of queue length.
1017 */
1018 rx->rx_descriptors =
1019 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1020 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1021 if (rx->rx_descriptors == NULL)
1022 goto fail;
1023
1024 tx->tx_descriptors =
1025 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1026 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1027 if (tx->tx_descriptors == NULL)
1028 goto fail;
1029
1030 return 0;
1031 fail:
1032 mvxpe_ring_dealloc_queue(sc, q);
1033 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1034 return ENOMEM;
1035 }
1036
1037 STATIC void
1038 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1039 {
1040 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1041 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1042 bus_dma_segment_t *segs;
1043 bus_size_t size;
1044 void *kva;
1045 int nsegs;
1046
1047 /* Rx */
1048 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1049 if (kva) {
1050 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1051 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1052 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1053
1054 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1055 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1056 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1057 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1058 }
1059
1060 /* Tx */
1061 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1062 if (kva) {
1063 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1064 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1065 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1066
1067 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1068 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1069 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1070 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1071 }
1072
1073 /* Clear doungling pointers all */
1074 memset(rx, 0, sizeof(*rx));
1075 memset(tx, 0, sizeof(*tx));
1076 }
1077
1078 STATIC void
1079 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1080 {
1081 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1082 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1083 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1084 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1085 static const int rx_default_queue_len[] = {
1086 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1087 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1088 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1089 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1090 };
1091 static const int tx_default_queue_len[] = {
1092 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1093 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1094 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1095 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1096 };
1097 extern uint32_t mvTclk;
1098 int i;
1099
1100 /* Rx handle */
1101 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1102 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1103 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1104 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1105 }
1106 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1107 rx->rx_dma = rx->rx_cpu = 0;
1108 rx->rx_queue_len = rx_default_queue_len[q];
1109 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1110 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1111 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1112 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1113 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1114
1115 /* Tx handle */
1116 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1117 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1118 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1119 MVXPE_TX_MBUF(sc, q, i) = NULL;
1120 /* Tx handle needs DMA map for busdma_load_mbuf() */
1121 if (bus_dmamap_create(sc->sc_dmat,
1122 mvxpbm_chunk_size(sc->sc_bm),
1123 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1124 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
1125 &MVXPE_TX_MAP(sc, q, i))) {
1126 aprint_error_dev(sc->sc_dev,
1127 "can't create dma map (tx ring %d)\n", i);
1128 }
1129 }
1130 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1131 tx->tx_dma = tx->tx_cpu = 0;
1132 tx->tx_queue_len = tx_default_queue_len[q];
1133 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1134 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1135 tx->tx_used = 0;
1136 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1137 }
1138
1139 STATIC void
1140 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1141 {
1142 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1143 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1144 int i;
1145
1146 KASSERT_RX_MTX(sc, q);
1147 KASSERT_TX_MTX(sc, q);
1148
1149 /* Rx handle */
1150 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1151 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1152 continue;
1153 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1154 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1155 }
1156 rx->rx_dma = rx->rx_cpu = 0;
1157
1158 /* Tx handle */
1159 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1160 if (MVXPE_TX_MBUF(sc, q, i) == NULL)
1161 continue;
1162 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1163 m_freem(MVXPE_TX_MBUF(sc, q, i));
1164 MVXPE_TX_MBUF(sc, q, i) = NULL;
1165 }
1166 tx->tx_dma = tx->tx_cpu = 0;
1167 tx->tx_used = 0;
1168 }
1169
1170 STATIC void
1171 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1172 {
1173 int wrap;
1174
1175 KASSERT_RX_MTX(sc, q);
1176 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1177 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1178
1179 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1180 if (wrap > 0) {
1181 count -= wrap;
1182 KASSERT(count > 0);
1183 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1184 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1185 }
1186 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1187 MVXPE_RX_DESC_OFF(sc, q, idx),
1188 sizeof(struct mvxpe_rx_desc) * count, ops);
1189 }
1190
1191 STATIC void
1192 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1193 {
1194 int wrap = 0;
1195
1196 KASSERT_TX_MTX(sc, q);
1197 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1198 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1199
1200 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1201 if (wrap > 0) {
1202 count -= wrap;
1203 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1204 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1205 }
1206 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1207 MVXPE_TX_DESC_OFF(sc, q, idx),
1208 sizeof(struct mvxpe_tx_desc) * count, ops);
1209 }
1210
1211 /*
1212 * Rx/Tx Queue Control
1213 */
1214 STATIC int
1215 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1216 {
1217 struct mvxpe_softc *sc = ifp->if_softc;
1218 uint32_t reg;
1219
1220 KASSERT_RX_MTX(sc, q);
1221 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1222
1223 /* descriptor address */
1224 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1225
1226 /* Rx buffer size and descriptor ring size */
1227 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1228 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1229 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1230 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1231 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1232
1233 /* Rx packet offset address */
1234 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1235 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1236 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1237 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1238
1239 /* Rx DMA SNOOP */
1240 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1241 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1242 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1243
1244 /* if DMA is not working, register is not updated */
1245 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1246 return 0;
1247 }
1248
1249 STATIC int
1250 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1251 {
1252 struct mvxpe_softc *sc = ifp->if_softc;
1253 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1254 uint32_t reg;
1255
1256 KASSERT_TX_MTX(sc, q);
1257 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1258
1259 /* descriptor address */
1260 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1261
1262 /* Tx threshold, and descriptor ring size */
1263 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1264 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1265 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1266 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1267 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1268
1269 /* if DMA is not working, register is not updated */
1270 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1271 return 0;
1272 }
1273
1274 STATIC int
1275 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1276 {
1277 struct mvxpe_softc *sc = ifp->if_softc;
1278 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1279 uint32_t reg;
1280
1281 KASSERT_RX_MTX(sc, q);
1282
1283 /* Set Rx interrupt threshold */
1284 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1285 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1286 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1287
1288 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1289 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1290
1291 /* Unmask RXTX_TH Intr. */
1292 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1293 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1294 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1295 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1296
1297 /* Enable Rx queue */
1298 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1299 reg |= MVXPE_RQC_ENQ(q);
1300 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1301
1302 return 0;
1303 }
1304
1305 STATIC int
1306 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1307 {
1308 struct mvxpe_softc *sc = ifp->if_softc;
1309 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1310 uint32_t reg;
1311
1312 KASSERT_TX_MTX(sc, q);
1313
1314 /* Set Tx interrupt threshold */
1315 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1316 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1317 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1318 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1319
1320 /* Unmask RXTX_TH Intr. */
1321 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1322 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1323 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1324
1325 /* Don't update MVXPE_TQC here, there is no packet yet. */
1326 return 0;
1327 }
1328
1329 STATIC void
1330 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1331 {
1332 KASSERT(q >= 0);
1333 KASSERT(q < MVXPE_QUEUE_SIZE);
1334 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1335 }
1336
1337 STATIC void
1338 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1339 {
1340 KASSERT(q >= 0);
1341 KASSERT(q < MVXPE_QUEUE_SIZE);
1342 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1343 }
1344
1345 STATIC void
1346 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1347 {
1348 KASSERT(q >= 0);
1349 KASSERT(q < MVXPE_QUEUE_SIZE);
1350 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1351 }
1352
1353 STATIC void
1354 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1355 {
1356 KASSERT(q >= 0);
1357 KASSERT(q < MVXPE_QUEUE_SIZE);
1358 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1359 }
1360
1361 /*
1362 * Interrupt Handlers
1363 */
1364 STATIC void
1365 mvxpe_disable_intr(struct mvxpe_softc *sc)
1366 {
1367 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1368 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1369 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1370 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1371 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1372 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1373 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1374 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1375 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1376 }
1377
1378 STATIC void
1379 mvxpe_enable_intr(struct mvxpe_softc *sc)
1380 {
1381 uint32_t reg;
1382
1383 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1384 reg = MVXPE_READ(sc, MVXPE_PMIM);
1385 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1386 reg |= MVXPE_PMI_LINKCHANGE;
1387 reg |= MVXPE_PMI_IAE;
1388 reg |= MVXPE_PMI_RXOVERRUN;
1389 reg |= MVXPE_PMI_RXCRCERROR;
1390 reg |= MVXPE_PMI_RXLARGEPACKET;
1391 reg |= MVXPE_PMI_TXUNDRN;
1392 #if 0
1393 /*
1394 * The device may raise false interrupts for SERDES even if the device
1395 * is not configured to use SERDES connection.
1396 */
1397 reg |= MVXPE_PMI_PRBSERROR;
1398 reg |= MVXPE_PMI_SRSE;
1399 #else
1400 reg &= ~MVXPE_PMI_PRBSERROR;
1401 reg &= ~MVXPE_PMI_SRSE;
1402 #endif
1403 reg |= MVXPE_PMI_TREQ_MASK;
1404 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1405
1406 /* Enable Summary Bit to check all interrupt cause. */
1407 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1408 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1409 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1410 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1411 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1412
1413 /* Enable All Queue Interrupt */
1414 reg = MVXPE_READ(sc, MVXPE_PIE);
1415 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1416 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1417 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1418 }
1419
1420 STATIC int
1421 mvxpe_rxtxth_intr(void *arg)
1422 {
1423 struct mvxpe_softc *sc = arg;
1424 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1425 uint32_t ic, queues, datum = 0;
1426
1427 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1428 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1429
1430 mvxpe_sc_lock(sc);
1431 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1432 if (ic == 0) {
1433 mvxpe_sc_unlock(sc);
1434 return 0;
1435 }
1436 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1437 datum = datum ^ ic;
1438
1439 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1440
1441 /* ack maintance interrupt first */
1442 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1443 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1444 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1445 }
1446 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1447 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1448 mvxpe_misc_intr(sc);
1449 }
1450 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1451 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1452 mvxpe_rxtx_intr(sc);
1453 }
1454 if (!(ifp->if_flags & IFF_RUNNING)) {
1455 mvxpe_sc_unlock(sc);
1456 return 1;
1457 }
1458
1459 /* RxTxTH interrupt */
1460 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1461 if (queues) {
1462 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1463 mvxpe_rx(sc, queues);
1464 }
1465 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1466 if (queues) {
1467 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1468 mvxpe_tx_complete(sc, queues);
1469 }
1470 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1471 if (queues) {
1472 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1473 mvxpe_rx_refill(sc, queues);
1474 }
1475 mvxpe_sc_unlock(sc);
1476
1477 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1478 mvxpe_start(ifp);
1479
1480 rnd_add_uint32(&sc->sc_rnd_source, datum);
1481
1482 return 1;
1483 }
1484
1485 STATIC int
1486 mvxpe_misc_intr(void *arg)
1487 {
1488 struct mvxpe_softc *sc = arg;
1489 #ifdef MVXPE_DEBUG
1490 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1491 #endif
1492 uint32_t ic;
1493 uint32_t datum = 0;
1494 int claimed = 0;
1495
1496 DPRINTSC(sc, 2, "got MISC_INTR\n");
1497 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1498
1499 KASSERT_SC_MTX(sc);
1500
1501 for (;;) {
1502 ic = MVXPE_READ(sc, MVXPE_PMIC);
1503 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1504 if (ic == 0)
1505 break;
1506 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1507 datum = datum ^ ic;
1508 claimed = 1;
1509
1510 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1511 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1512 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1513 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1514 }
1515 if (ic & MVXPE_PMI_LINKCHANGE) {
1516 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1517 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1518 mvxpe_linkupdate(sc);
1519 }
1520 if (ic & MVXPE_PMI_IAE) {
1521 DPRINTIFNET(ifp, 2, "+IAE\n");
1522 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1523 }
1524 if (ic & MVXPE_PMI_RXOVERRUN) {
1525 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1526 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1527 }
1528 if (ic & MVXPE_PMI_RXCRCERROR) {
1529 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1530 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1531 }
1532 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1533 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1534 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1535 }
1536 if (ic & MVXPE_PMI_TXUNDRN) {
1537 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1538 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1539 }
1540 if (ic & MVXPE_PMI_PRBSERROR) {
1541 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1542 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1543 }
1544 if (ic & MVXPE_PMI_TREQ_MASK) {
1545 DPRINTIFNET(ifp, 2, "+TREQ\n");
1546 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1547 }
1548 }
1549 if (datum)
1550 rnd_add_uint32(&sc->sc_rnd_source, datum);
1551
1552 return claimed;
1553 }
1554
1555 STATIC int
1556 mvxpe_rxtx_intr(void *arg)
1557 {
1558 struct mvxpe_softc *sc = arg;
1559 #ifdef MVXPE_DEBUG
1560 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1561 #endif
1562 uint32_t datum = 0;
1563 uint32_t prxtxic;
1564 int claimed = 0;
1565
1566 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1567 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1568
1569 KASSERT_SC_MTX(sc);
1570
1571 for (;;) {
1572 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1573 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1574 if (prxtxic == 0)
1575 break;
1576 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1577 datum = datum ^ prxtxic;
1578 claimed = 1;
1579
1580 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1581
1582 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1583 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1584 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1585 }
1586 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1587 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1588 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1589 }
1590 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1591 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1592 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1593 }
1594 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1595 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1596 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1597 }
1598 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1599 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1600 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1601 }
1602 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1603 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1604 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1605 }
1606 }
1607 if (datum)
1608 rnd_add_uint32(&sc->sc_rnd_source, datum);
1609
1610 return claimed;
1611 }
1612
1613 STATIC void
1614 mvxpe_tick(void *arg)
1615 {
1616 struct mvxpe_softc *sc = arg;
1617 struct mii_data *mii = &sc->sc_mii;
1618
1619 mvxpe_sc_lock(sc);
1620
1621 mii_tick(mii);
1622 mii_pollstat(&sc->sc_mii);
1623
1624 /* read mib regisers(clear by read) */
1625 mvxpe_update_mib(sc);
1626
1627 /* read counter registers(clear by read) */
1628 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1629 MVXPE_READ(sc, MVXPE_PDFC));
1630 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1631 MVXPE_READ(sc, MVXPE_POFC));
1632 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1633 MVXPE_READ(sc, MVXPE_TXBADFCS));
1634 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1635 MVXPE_READ(sc, MVXPE_TXDROPPED));
1636 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1637 MVXPE_READ(sc, MVXPE_LPIC));
1638
1639 mvxpe_sc_unlock(sc);
1640
1641 callout_schedule(&sc->sc_tick_ch, hz);
1642 }
1643
1644
1645 /*
1646 * struct ifnet and mii callbacks
1647 */
1648 STATIC void
1649 mvxpe_start(struct ifnet *ifp)
1650 {
1651 struct mvxpe_softc *sc = ifp->if_softc;
1652 struct mbuf *m;
1653 int q;
1654
1655 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1656 DPRINTIFNET(ifp, 1, "not running\n");
1657 return;
1658 }
1659
1660 mvxpe_sc_lock(sc);
1661 if (!MVXPE_IS_LINKUP(sc)) {
1662 /* If Link is DOWN, can't start TX */
1663 DPRINTIFNET(ifp, 1, "link fail\n");
1664 for (;;) {
1665 /*
1666 * discard stale packets all.
1667 * these may confuse DAD, ARP or timer based protocols.
1668 */
1669 IFQ_DEQUEUE(&ifp->if_snd, m);
1670 if (m == NULL)
1671 break;
1672 m_freem(m);
1673 }
1674 mvxpe_sc_unlock(sc);
1675 return;
1676 }
1677 for (;;) {
1678 /*
1679 * don't use IFQ_POLL().
1680 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1681 * on SMP enabled networking stack.
1682 */
1683 IFQ_DEQUEUE(&ifp->if_snd, m);
1684 if (m == NULL)
1685 break;
1686
1687 q = mvxpe_tx_queue_select(sc, m);
1688 if (q < 0)
1689 break;
1690 /* mutex is held in mvxpe_tx_queue_select() */
1691
1692 if (mvxpe_tx_queue(sc, m, q) != 0) {
1693 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1694 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1695 mvxpe_tx_unlockq(sc, q);
1696 break;
1697 }
1698 mvxpe_tx_unlockq(sc, q);
1699 KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1700 KASSERT(sc->sc_tx_ring[q].tx_used <=
1701 sc->sc_tx_ring[q].tx_queue_len);
1702 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1703 sc->sc_tx_pending++;
1704 ifp->if_opackets++;
1705 ifp->if_timer = 1;
1706 sc->sc_wdogsoft = 1;
1707 bpf_mtap(ifp, m);
1708 }
1709 mvxpe_sc_unlock(sc);
1710
1711 return;
1712 }
1713
1714 STATIC int
1715 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1716 {
1717 struct mvxpe_softc *sc = ifp->if_softc;
1718 struct ifreq *ifr = data;
1719 int error = 0;
1720 int s;
1721
1722 switch (cmd) {
1723 case SIOCGIFMEDIA:
1724 case SIOCSIFMEDIA:
1725 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n");
1726 s = splnet(); /* XXX: is there suitable mutex? */
1727 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1728 splx(s);
1729 break;
1730 default:
1731 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1732 error = ether_ioctl(ifp, cmd, data);
1733 if (error == ENETRESET) {
1734 if (ifp->if_flags & IFF_RUNNING) {
1735 mvxpe_sc_lock(sc);
1736 mvxpe_filter_setup(sc);
1737 mvxpe_sc_unlock(sc);
1738 }
1739 error = 0;
1740 }
1741 break;
1742 }
1743
1744 return error;
1745 }
1746
1747 STATIC int
1748 mvxpe_init(struct ifnet *ifp)
1749 {
1750 struct mvxpe_softc *sc = ifp->if_softc;
1751 struct mii_data *mii = &sc->sc_mii;
1752 uint32_t reg;
1753 int q;
1754
1755 mvxpe_sc_lock(sc);
1756
1757 /* Start DMA Engine */
1758 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1759 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1760 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1761
1762 /* Enable port */
1763 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1764 reg |= MVXPE_PMACC0_PORTEN;
1765 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1766
1767 /* Link up */
1768 mvxpe_linkup(sc);
1769
1770 /* Enable All Queue and interrupt of each Queue */
1771 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1772 mvxpe_rx_lockq(sc, q);
1773 mvxpe_rx_queue_enable(ifp, q);
1774 mvxpe_rx_queue_refill(sc, q);
1775 mvxpe_rx_unlockq(sc, q);
1776
1777 mvxpe_tx_lockq(sc, q);
1778 mvxpe_tx_queue_enable(ifp, q);
1779 mvxpe_tx_unlockq(sc, q);
1780 }
1781
1782 /* Enable interrupt */
1783 mvxpe_enable_intr(sc);
1784
1785 /* Set Counter */
1786 callout_schedule(&sc->sc_tick_ch, hz);
1787
1788 /* Media check */
1789 mii_mediachg(mii);
1790
1791 ifp->if_flags |= IFF_RUNNING;
1792 ifp->if_flags &= ~IFF_OACTIVE;
1793
1794 mvxpe_sc_unlock(sc);
1795 return 0;
1796 }
1797
1798 /* ARGSUSED */
1799 STATIC void
1800 mvxpe_stop(struct ifnet *ifp, int disable)
1801 {
1802 struct mvxpe_softc *sc = ifp->if_softc;
1803 uint32_t reg;
1804 int q, cnt;
1805
1806 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1807
1808 mvxpe_sc_lock(sc);
1809
1810 callout_stop(&sc->sc_tick_ch);
1811
1812 /* Link down */
1813 mvxpe_linkdown(sc);
1814
1815 /* Disable Rx interrupt */
1816 reg = MVXPE_READ(sc, MVXPE_PIE);
1817 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1818 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1819
1820 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1821 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1822 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1823 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1824
1825 /* Wait for all Rx activity to terminate. */
1826 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1827 reg = MVXPE_RQC_DIS(reg);
1828 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1829 cnt = 0;
1830 do {
1831 if (cnt >= RX_DISABLE_TIMEOUT) {
1832 aprint_error_ifnet(ifp,
1833 "timeout for RX stopped. rqc 0x%x\n", reg);
1834 break;
1835 }
1836 cnt++;
1837 reg = MVXPE_READ(sc, MVXPE_RQC);
1838 } while (reg & MVXPE_RQC_EN_MASK);
1839
1840 /* Wait for all Tx activety to terminate. */
1841 reg = MVXPE_READ(sc, MVXPE_PIE);
1842 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1843 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1844
1845 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1846 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1847 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1848
1849 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1850 reg = MVXPE_TQC_DIS(reg);
1851 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1852 cnt = 0;
1853 do {
1854 if (cnt >= TX_DISABLE_TIMEOUT) {
1855 aprint_error_ifnet(ifp,
1856 "timeout for TX stopped. tqc 0x%x\n", reg);
1857 break;
1858 }
1859 cnt++;
1860 reg = MVXPE_READ(sc, MVXPE_TQC);
1861 } while (reg & MVXPE_TQC_EN_MASK);
1862
1863 /* Wait for all Tx FIFO is empty */
1864 cnt = 0;
1865 do {
1866 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1867 aprint_error_ifnet(ifp,
1868 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1869 break;
1870 }
1871 cnt++;
1872 reg = MVXPE_READ(sc, MVXPE_PS0);
1873 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1874
1875 /* Reset the MAC Port Enable bit */
1876 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1877 reg &= ~MVXPE_PMACC0_PORTEN;
1878 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1879
1880 /* Disable each of queue */
1881 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1882 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1883
1884 mvxpe_rx_lockq(sc, q);
1885 mvxpe_tx_lockq(sc, q);
1886
1887 /* Disable Rx packet buffer refill request */
1888 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1889 reg |= MVXPE_PRXDQTH_NODT(0);
1890 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1891
1892 if (disable) {
1893 /*
1894 * Hold Reset state of DMA Engine
1895 * (must write 0x0 to restart it)
1896 */
1897 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1898 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1899 mvxpe_ring_flush_queue(sc, q);
1900 }
1901
1902 mvxpe_tx_unlockq(sc, q);
1903 mvxpe_rx_unlockq(sc, q);
1904 }
1905
1906 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1907
1908 mvxpe_sc_unlock(sc);
1909 }
1910
1911 STATIC void
1912 mvxpe_watchdog(struct ifnet *ifp)
1913 {
1914 struct mvxpe_softc *sc = ifp->if_softc;
1915 int q;
1916
1917 mvxpe_sc_lock(sc);
1918
1919 /*
1920 * Reclaim first as there is a possibility of losing Tx completion
1921 * interrupts.
1922 */
1923 mvxpe_tx_complete(sc, 0xff);
1924 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1925 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1926
1927 if (tx->tx_dma != tx->tx_cpu) {
1928 if (sc->sc_wdogsoft) {
1929 /*
1930 * There is race condition between CPU and DMA
1931 * engine. When DMA engine encounters queue end,
1932 * it clears MVXPE_TQC_ENQ bit.
1933 * XXX: how about enhanced mode?
1934 */
1935 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1936 ifp->if_timer = 5;
1937 sc->sc_wdogsoft = 0;
1938 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1939 } else {
1940 aprint_error_ifnet(ifp, "watchdog timeout\n");
1941 ifp->if_oerrors++;
1942 mvxpe_linkreset(sc);
1943 mvxpe_sc_unlock(sc);
1944
1945 /* trigger reinitialize sequence */
1946 mvxpe_stop(ifp, 1);
1947 mvxpe_init(ifp);
1948
1949 mvxpe_sc_lock(sc);
1950 }
1951 }
1952 }
1953 mvxpe_sc_unlock(sc);
1954 }
1955
1956 STATIC int
1957 mvxpe_ifflags_cb(struct ethercom *ec)
1958 {
1959 struct ifnet *ifp = &ec->ec_if;
1960 struct mvxpe_softc *sc = ifp->if_softc;
1961 int change = ifp->if_flags ^ sc->sc_if_flags;
1962
1963 mvxpe_sc_lock(sc);
1964
1965 if (change != 0)
1966 sc->sc_if_flags = ifp->if_flags;
1967
1968 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1969 mvxpe_sc_unlock(sc);
1970 return ENETRESET;
1971 }
1972
1973 if ((change & IFF_PROMISC) != 0)
1974 mvxpe_filter_setup(sc);
1975
1976 if ((change & IFF_UP) != 0)
1977 mvxpe_linkreset(sc);
1978
1979 mvxpe_sc_unlock(sc);
1980 return 0;
1981 }
1982
1983 STATIC int
1984 mvxpe_mediachange(struct ifnet *ifp)
1985 {
1986 return ether_mediachange(ifp);
1987 }
1988
1989 STATIC void
1990 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1991 {
1992 ether_mediastatus(ifp, ifmr);
1993 }
1994
1995 /*
1996 * Link State Notify
1997 */
1998 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
1999 {
2000 int linkup; /* bool */
2001
2002 KASSERT_SC_MTX(sc);
2003
2004 /* tell miibus */
2005 mii_pollstat(&sc->sc_mii);
2006
2007 /* syslog */
2008 linkup = MVXPE_IS_LINKUP(sc);
2009 if (sc->sc_linkstate == linkup)
2010 return;
2011
2012 #ifdef DEBUG
2013 log(LOG_DEBUG,
2014 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2015 #endif
2016 if (linkup)
2017 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2018 else
2019 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2020
2021 sc->sc_linkstate = linkup;
2022 }
2023
2024 STATIC void
2025 mvxpe_linkup(struct mvxpe_softc *sc)
2026 {
2027 uint32_t reg;
2028
2029 KASSERT_SC_MTX(sc);
2030
2031 /* set EEE parameters */
2032 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2033 if (sc->sc_cf.cf_lpi)
2034 reg |= MVXPE_LPIC1_LPIRE;
2035 else
2036 reg &= ~MVXPE_LPIC1_LPIRE;
2037 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2038
2039 /* set auto-negotiation parameters */
2040 reg = MVXPE_READ(sc, MVXPE_PANC);
2041 if (sc->sc_cf.cf_fc) {
2042 /* flow control negotiation */
2043 reg |= MVXPE_PANC_PAUSEADV;
2044 reg |= MVXPE_PANC_ANFCEN;
2045 }
2046 else {
2047 reg &= ~MVXPE_PANC_PAUSEADV;
2048 reg &= ~MVXPE_PANC_ANFCEN;
2049 }
2050 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2051 reg &= ~MVXPE_PANC_FORCELINKPASS;
2052 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2053
2054 mii_mediachg(&sc->sc_mii);
2055 }
2056
2057 STATIC void
2058 mvxpe_linkdown(struct mvxpe_softc *sc)
2059 {
2060 struct mii_softc *mii;
2061 uint32_t reg;
2062
2063 KASSERT_SC_MTX(sc);
2064 return;
2065
2066 reg = MVXPE_READ(sc, MVXPE_PANC);
2067 reg |= MVXPE_PANC_FORCELINKFAIL;
2068 reg &= MVXPE_PANC_FORCELINKPASS;
2069 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2070
2071 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2072 if (mii)
2073 mii_phy_down(mii);
2074 }
2075
2076 STATIC void
2077 mvxpe_linkreset(struct mvxpe_softc *sc)
2078 {
2079 struct mii_softc *mii;
2080
2081 KASSERT_SC_MTX(sc);
2082
2083 /* force reset PHY first */
2084 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2085 if (mii)
2086 mii_phy_reset(mii);
2087
2088 /* reinit MAC and PHY */
2089 mvxpe_linkdown(sc);
2090 if ((sc->sc_if_flags & IFF_UP) != 0)
2091 mvxpe_linkup(sc);
2092 }
2093
2094 /*
2095 * Tx Subroutines
2096 */
2097 STATIC int
2098 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2099 {
2100 int q = 0;
2101
2102 /* XXX: get attribute from ALTQ framework? */
2103 mvxpe_tx_lockq(sc, q);
2104 return 0;
2105 }
2106
2107 STATIC int
2108 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2109 {
2110 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2111 bus_dma_segment_t *txsegs;
2112 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2113 struct mvxpe_tx_desc *t = NULL;
2114 uint32_t ptxsu;
2115 int txnsegs;
2116 int start, used;
2117 int i;
2118
2119 KASSERT_TX_MTX(sc, q);
2120 KASSERT(tx->tx_used >= 0);
2121 KASSERT(tx->tx_used <= tx->tx_queue_len);
2122
2123 /* load mbuf using dmamap of 1st descriptor */
2124 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2125 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2126 m_freem(m);
2127 return ENOBUFS;
2128 }
2129 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2130 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2131 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2132 /* we have no enough descriptors or mbuf is broken */
2133 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2134 m_freem(m);
2135 return ENOBUFS;
2136 }
2137 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2138 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2139
2140 /* remember mbuf using 1st descriptor */
2141 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2142 bus_dmamap_sync(sc->sc_dmat,
2143 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2144 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2145
2146 /* load to tx descriptors */
2147 start = tx->tx_cpu;
2148 used = 0;
2149 for (i = 0; i < txnsegs; i++) {
2150 if (__predict_false(txsegs[i].ds_len == 0))
2151 continue;
2152 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2153 t->command = 0;
2154 t->l4ichk = 0;
2155 t->flags = 0;
2156 if (i == 0) {
2157 /* 1st descriptor */
2158 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2159 t->command |= MVXPE_TX_CMD_PADDING;
2160 t->command |= MVXPE_TX_CMD_F;
2161 mvxpe_tx_set_csumflag(ifp, t, m);
2162 }
2163 t->bufptr = txsegs[i].ds_addr;
2164 t->bytecnt = txsegs[i].ds_len;
2165 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2166 tx->tx_used++;
2167 used++;
2168 }
2169 /* t is last descriptor here */
2170 KASSERT(t != NULL);
2171 t->command |= MVXPE_TX_CMD_L;
2172
2173 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2174 #ifdef MVXPE_DEBUG
2175 if (mvxpe_debug > 2)
2176 for (i = start; i <= tx->tx_cpu; i++) {
2177 t = MVXPE_TX_DESC(sc, q, i);
2178 mvxpe_dump_txdesc(t, i);
2179 }
2180 #endif
2181 mvxpe_ring_sync_tx(sc, q, start, used,
2182 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2183
2184 while (used > 255) {
2185 ptxsu = MVXPE_PTXSU_NOWD(255);
2186 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2187 used -= 255;
2188 }
2189 if (used > 0) {
2190 ptxsu = MVXPE_PTXSU_NOWD(used);
2191 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2192 }
2193 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2194
2195 DPRINTSC(sc, 2,
2196 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2197 DPRINTSC(sc, 2,
2198 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2199 DPRINTSC(sc, 2,
2200 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2201 DPRINTSC(sc, 2,
2202 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2203 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2204 DPRINTIFNET(ifp, 2,
2205 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2206 tx->tx_cpu, tx->tx_dma, tx->tx_used);
2207 return 0;
2208 }
2209
2210 STATIC void
2211 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2212 struct mvxpe_tx_desc *t, struct mbuf *m)
2213 {
2214 struct ether_header *eh;
2215 int csum_flags;
2216 uint32_t iphl = 0, ipoff = 0;
2217
2218
2219 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2220
2221 eh = mtod(m, struct ether_header *);
2222 switch (htons(eh->ether_type)) {
2223 case ETHERTYPE_IP:
2224 case ETHERTYPE_IPV6:
2225 ipoff = ETHER_HDR_LEN;
2226 break;
2227 case ETHERTYPE_VLAN:
2228 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2229 break;
2230 }
2231
2232 if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2233 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2234 t->command |= MVXPE_TX_CMD_L3_IP4;
2235 }
2236 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2237 iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
2238 t->command |= MVXPE_TX_CMD_L3_IP6;
2239 }
2240 else {
2241 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2242 return;
2243 }
2244
2245
2246 /* L3 */
2247 if (csum_flags & M_CSUM_IPv4) {
2248 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2249 }
2250
2251 /* L4 */
2252 if ((csum_flags &
2253 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) {
2254 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2255 }
2256 else if (csum_flags & M_CSUM_TCPv4) {
2257 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2258 t->command |= MVXPE_TX_CMD_L4_TCP;
2259 }
2260 else if (csum_flags & M_CSUM_UDPv4) {
2261 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2262 t->command |= MVXPE_TX_CMD_L4_UDP;
2263 }
2264 else if (csum_flags & M_CSUM_TCPv6) {
2265 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2266 t->command |= MVXPE_TX_CMD_L4_TCP;
2267 }
2268 else if (csum_flags & M_CSUM_UDPv6) {
2269 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2270 t->command |= MVXPE_TX_CMD_L4_UDP;
2271 }
2272
2273 t->l4ichk = 0;
2274 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2275 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2276 }
2277
2278 STATIC void
2279 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2280 {
2281 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2282 int q;
2283
2284 DPRINTSC(sc, 2, "tx completed.\n");
2285
2286 KASSERT_SC_MTX(sc);
2287
2288 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2289 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2290 continue;
2291 mvxpe_tx_lockq(sc, q);
2292 mvxpe_tx_queue_complete(sc, q);
2293 mvxpe_tx_unlockq(sc, q);
2294 }
2295 KASSERT(sc->sc_tx_pending >= 0);
2296 if (sc->sc_tx_pending == 0)
2297 ifp->if_timer = 0;
2298 }
2299
2300 STATIC void
2301 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2302 {
2303 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2304 struct mvxpe_tx_desc *t;
2305 uint32_t ptxs, ptxsu, ndesc;
2306 int i;
2307
2308 KASSERT_TX_MTX(sc, q);
2309
2310 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2311 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2312 if (ndesc == 0)
2313 return;
2314
2315 DPRINTSC(sc, 2,
2316 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2317
2318 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2319 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2320
2321 for (i = 0; i < ndesc; i++) {
2322 int error = 0;
2323
2324 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2325 if (t->flags & MVXPE_TX_F_ES) {
2326 DPRINTSC(sc, 1,
2327 "tx error queue %d desc %d\n",
2328 q, tx->tx_dma);
2329 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2330 case MVXPE_TX_F_EC_LC:
2331 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2332 break;
2333 case MVXPE_TX_F_EC_UR:
2334 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2335 break;
2336 case MVXPE_TX_F_EC_RL:
2337 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2338 break;
2339 default:
2340 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2341 break;
2342 }
2343 error = 1;
2344 }
2345 if (MVXPE_TX_MBUF(sc, q, tx->tx_dma) != NULL) {
2346 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2347 bus_dmamap_unload(sc->sc_dmat,
2348 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2349 m_freem(MVXPE_TX_MBUF(sc, q, tx->tx_dma));
2350 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2351 sc->sc_tx_pending--;
2352 }
2353 else
2354 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2355 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2356 tx->tx_used--;
2357 if (error)
2358 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2359 else
2360 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2361 }
2362 KASSERT(tx->tx_used >= 0);
2363 KASSERT(tx->tx_used <= tx->tx_queue_len);
2364 while (ndesc > 255) {
2365 ptxsu = MVXPE_PTXSU_NORB(255);
2366 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2367 ndesc -= 255;
2368 }
2369 if (ndesc > 0) {
2370 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2371 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2372 }
2373 DPRINTSC(sc, 2,
2374 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2375 q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2376 }
2377
2378 /*
2379 * Rx Subroutines
2380 */
2381 STATIC void
2382 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2383 {
2384 int q, npkt;
2385
2386 KASSERT_SC_MTX(sc);
2387
2388 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2389 /* mutex is held by rx_queue_select */
2390 mvxpe_rx_queue(sc, q, npkt);
2391 mvxpe_rx_unlockq(sc, q);
2392 }
2393 }
2394
2395 STATIC void
2396 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2397 {
2398 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2399 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2400 struct mvxpe_rx_desc *r;
2401 struct mvxpbm_chunk *chunk;
2402 struct mbuf *m;
2403 uint32_t prxsu;
2404 int error = 0;
2405 int i;
2406
2407 KASSERT_RX_MTX(sc, q);
2408
2409 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2410 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2411
2412 for (i = 0; i < npkt; i++) {
2413 /* get descriptor and packet */
2414 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2415 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2416 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2417 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2418
2419 /* check errors */
2420 if (r->status & MVXPE_RX_ES) {
2421 switch (r->status & MVXPE_RX_EC_MASK) {
2422 case MVXPE_RX_EC_CE:
2423 DPRINTIFNET(ifp, 1, "CRC error\n");
2424 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2425 break;
2426 case MVXPE_RX_EC_OR:
2427 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2428 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2429 break;
2430 case MVXPE_RX_EC_MF:
2431 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2432 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2433 break;
2434 case MVXPE_RX_EC_RE:
2435 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2436 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2437 break;
2438 }
2439 error = 1;
2440 goto rx_done;
2441 }
2442 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2443 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2444 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2445 error = 1;
2446 goto rx_done;
2447 }
2448
2449 if (chunk == NULL) {
2450 device_printf(sc->sc_dev,
2451 "got rx interrupt, but no chunk\n");
2452 error = 1;
2453 goto rx_done;
2454 }
2455
2456 /* extract packet buffer */
2457 if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2458 error = 1;
2459 goto rx_done;
2460 }
2461 m = chunk->m;
2462 m_set_rcvif(m, ifp);
2463 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2464 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2465 mvxpe_rx_set_csumflag(ifp, r, m);
2466 ifp->if_ipackets++;
2467 bpf_mtap(ifp, m);
2468 if_percpuq_enqueue(ifp->if_percpuq, m);
2469 chunk = NULL; /* the BM chunk goes to networking stack now */
2470 rx_done:
2471 if (chunk) {
2472 /* rx error. just return the chunk to BM. */
2473 mvxpbm_free_chunk(chunk);
2474 }
2475 if (error)
2476 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2477 else
2478 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2479 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2480 }
2481 /* DMA status update */
2482 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2483 while (npkt > 255) {
2484 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2485 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2486 npkt -= 255;
2487 }
2488 if (npkt > 0) {
2489 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2490 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2491 }
2492
2493 DPRINTSC(sc, 2,
2494 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2495 DPRINTSC(sc, 2,
2496 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2497 DPRINTSC(sc, 2,
2498 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2499 DPRINTSC(sc, 2,
2500 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2501 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2502 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2503 rx->rx_cpu, rx->rx_dma);
2504 }
2505
2506 STATIC int
2507 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2508 {
2509 uint32_t prxs, npkt;
2510 int q;
2511
2512 KASSERT_SC_MTX(sc);
2513 KASSERT(queue != NULL);
2514 DPRINTSC(sc, 2, "selecting rx queue\n");
2515
2516 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2517 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2518 continue;
2519
2520 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2521 npkt = MVXPE_PRXS_GET_ODC(prxs);
2522 if (npkt == 0)
2523 continue;
2524
2525 DPRINTSC(sc, 2,
2526 "queue %d selected: prxs=%#x, %u pakcet received.\n",
2527 q, prxs, npkt);
2528 *queue = q;
2529 mvxpe_rx_lockq(sc, q);
2530 return npkt;
2531 }
2532
2533 return 0;
2534 }
2535
2536 STATIC void
2537 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2538 {
2539 int q;
2540
2541 KASSERT_SC_MTX(sc);
2542
2543 /* XXX: check rx bit array */
2544 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2545 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2546 continue;
2547
2548 mvxpe_rx_lockq(sc, q);
2549 mvxpe_rx_queue_refill(sc, q);
2550 mvxpe_rx_unlockq(sc, q);
2551 }
2552 }
2553
2554 STATIC void
2555 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2556 {
2557 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2558 uint32_t prxs, prxsu, ndesc;
2559 int idx, refill = 0;
2560 int npkt;
2561
2562 KASSERT_RX_MTX(sc, q);
2563
2564 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2565 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2566 refill = rx->rx_queue_len - ndesc;
2567 if (refill <= 0)
2568 return;
2569 DPRINTPRXS(2, q);
2570 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2571
2572 idx = rx->rx_cpu;
2573 for (npkt = 0; npkt < refill; npkt++)
2574 if (mvxpe_rx_queue_add(sc, q) != 0)
2575 break;
2576 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2577 if (npkt == 0)
2578 return;
2579
2580 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2581 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2582
2583 while (npkt > 255) {
2584 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2585 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2586 npkt -= 255;
2587 }
2588 if (npkt > 0) {
2589 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2590 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2591 }
2592 DPRINTPRXS(2, q);
2593 return;
2594 }
2595
2596 STATIC int
2597 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2598 {
2599 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2600 struct mvxpe_rx_desc *r;
2601 struct mvxpbm_chunk *chunk = NULL;
2602
2603 KASSERT_RX_MTX(sc, q);
2604
2605 /* Allocate the packet buffer */
2606 chunk = mvxpbm_alloc(sc->sc_bm);
2607 if (chunk == NULL) {
2608 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2609 return ENOBUFS;
2610 }
2611
2612 /* Add the packet to descritor */
2613 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2614 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2615 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2616
2617 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2618 r->bufptr = chunk->buf_pa;
2619 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2620 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2621 return 0;
2622 }
2623
2624 STATIC void
2625 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2626 struct mvxpe_rx_desc *r, struct mbuf *m0)
2627 {
2628 uint32_t csum_flags = 0;
2629
2630 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0)
2631 return; /* not a IP packet */
2632
2633 /* L3 */
2634 if (r->status & MVXPE_RX_L3_IP) {
2635 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx;
2636 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 &&
2637 (csum_flags & M_CSUM_IPv4) != 0) {
2638 csum_flags |= M_CSUM_IPv4_BAD;
2639 goto finish;
2640 }
2641 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2642 /*
2643 * r->l4chk has partial checksum of each framgment.
2644 * but there is no way to use it in NetBSD.
2645 */
2646 return;
2647 }
2648 }
2649
2650 /* L4 */
2651 switch (r->status & MVXPE_RX_L4_MASK) {
2652 case MVXPE_RX_L4_TCP:
2653 if (r->status & MVXPE_RX_L3_IP)
2654 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx;
2655 else
2656 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx;
2657 break;
2658 case MVXPE_RX_L4_UDP:
2659 if (r->status & MVXPE_RX_L3_IP)
2660 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx;
2661 else
2662 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx;
2663 break;
2664 case MVXPE_RX_L4_OTH:
2665 default:
2666 break;
2667 }
2668 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags &
2669 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0)
2670 csum_flags |= M_CSUM_TCP_UDP_BAD;
2671 finish:
2672 m0->m_pkthdr.csum_flags = csum_flags;
2673 }
2674
2675 /*
2676 * MAC address filter
2677 */
2678 STATIC uint8_t
2679 mvxpe_crc8(const uint8_t *data, size_t size)
2680 {
2681 int bit;
2682 uint8_t byte;
2683 uint8_t crc = 0;
2684 const uint8_t poly = 0x07;
2685
2686 while(size--)
2687 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2688 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2689
2690 return crc;
2691 }
2692
2693 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2694
2695 STATIC void
2696 mvxpe_filter_setup(struct mvxpe_softc *sc)
2697 {
2698 struct ethercom *ec = &sc->sc_ethercom;
2699 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2700 struct ether_multi *enm;
2701 struct ether_multistep step;
2702 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2703 uint32_t pxc;
2704 int i;
2705 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2706
2707 KASSERT_SC_MTX(sc);
2708
2709 memset(dfut, 0, sizeof(dfut));
2710 memset(dfsmt, 0, sizeof(dfsmt));
2711 memset(dfomt, 0, sizeof(dfomt));
2712
2713 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2714 goto allmulti;
2715 }
2716
2717 ETHER_FIRST_MULTI(step, ec, enm);
2718 while (enm != NULL) {
2719 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2720 /* ranges are complex and somewhat rare */
2721 goto allmulti;
2722 }
2723 /* chip handles some IPv4 multicast specially */
2724 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2725 i = enm->enm_addrlo[5];
2726 dfsmt[i>>2] |=
2727 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2728 } else {
2729 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2730 dfomt[i>>2] |=
2731 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2732 }
2733
2734 ETHER_NEXT_MULTI(step, enm);
2735 }
2736 goto set;
2737
2738 allmulti:
2739 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2740 for (i = 0; i < MVXPE_NDFSMT; i++) {
2741 dfsmt[i] = dfomt[i] =
2742 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2743 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2744 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2745 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2746 }
2747 }
2748
2749 set:
2750 pxc = MVXPE_READ(sc, MVXPE_PXC);
2751 pxc &= ~MVXPE_PXC_UPM;
2752 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2753 if (ifp->if_flags & IFF_BROADCAST) {
2754 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2755 }
2756 if (ifp->if_flags & IFF_PROMISC) {
2757 pxc |= MVXPE_PXC_UPM;
2758 }
2759 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2760
2761 /* Set Destination Address Filter Unicast Table */
2762 if (ifp->if_flags & IFF_PROMISC) {
2763 /* pass all unicast addresses */
2764 for (i = 0; i < MVXPE_NDFUT; i++) {
2765 dfut[i] =
2766 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2767 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2768 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2769 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2770 }
2771 }
2772 else {
2773 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2774 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2775 }
2776 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2777
2778 /* Set Destination Address Filter Multicast Tables */
2779 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2780 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2781 }
2782
2783 /*
2784 * sysctl(9)
2785 */
2786 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2787 {
2788 int rc;
2789 const struct sysctlnode *node;
2790
2791 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2792 0, CTLTYPE_NODE, "mvxpe",
2793 SYSCTL_DESCR("mvxpe interface controls"),
2794 NULL, 0, NULL, 0,
2795 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2796 goto err;
2797 }
2798
2799 mvxpe_root_num = node->sysctl_num;
2800 return;
2801
2802 err:
2803 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2804 }
2805
2806 STATIC int
2807 sysctl_read_mib(SYSCTLFN_ARGS)
2808 {
2809 struct mvxpe_sysctl_mib *arg;
2810 struct mvxpe_softc *sc;
2811 struct sysctlnode node;
2812 uint64_t val;
2813 int err;
2814
2815 node = *rnode;
2816 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2817 if (arg == NULL)
2818 return EINVAL;
2819
2820 sc = arg->sc;
2821 if (sc == NULL)
2822 return EINVAL;
2823 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2824 return EINVAL;
2825
2826 mvxpe_sc_lock(sc);
2827 val = arg->counter;
2828 mvxpe_sc_unlock(sc);
2829
2830 node.sysctl_data = &val;
2831 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2832 if (err)
2833 return err;
2834 if (newp)
2835 return EINVAL;
2836
2837 return 0;
2838 }
2839
2840
2841 STATIC int
2842 sysctl_clear_mib(SYSCTLFN_ARGS)
2843 {
2844 struct mvxpe_softc *sc;
2845 struct sysctlnode node;
2846 int val;
2847 int err;
2848
2849 node = *rnode;
2850 sc = (struct mvxpe_softc *)rnode->sysctl_data;
2851 if (sc == NULL)
2852 return EINVAL;
2853
2854 val = 0;
2855 node.sysctl_data = &val;
2856 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2857 if (err || newp == NULL)
2858 return err;
2859 if (val < 0 || val > 1)
2860 return EINVAL;
2861 if (val == 1) {
2862 mvxpe_sc_lock(sc);
2863 mvxpe_clear_mib(sc);
2864 mvxpe_sc_unlock(sc);
2865 }
2866
2867 return 0;
2868 }
2869
2870 STATIC int
2871 sysctl_set_queue_length(SYSCTLFN_ARGS)
2872 {
2873 struct mvxpe_sysctl_queue *arg;
2874 struct mvxpe_rx_ring *rx = NULL;
2875 struct mvxpe_tx_ring *tx = NULL;
2876 struct mvxpe_softc *sc;
2877 struct sysctlnode node;
2878 uint32_t reg;
2879 int val;
2880 int err;
2881
2882 node = *rnode;
2883
2884 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2885 if (arg == NULL)
2886 return EINVAL;
2887 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2888 return EINVAL;
2889 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2890 return EINVAL;
2891
2892 sc = arg->sc;
2893 if (sc == NULL)
2894 return EINVAL;
2895
2896 /* read queue length */
2897 mvxpe_sc_lock(sc);
2898 switch (arg->rxtx) {
2899 case MVXPE_SYSCTL_RX:
2900 mvxpe_rx_lockq(sc, arg->queue);
2901 rx = MVXPE_RX_RING(sc, arg->queue);
2902 val = rx->rx_queue_len;
2903 mvxpe_rx_unlockq(sc, arg->queue);
2904 break;
2905 case MVXPE_SYSCTL_TX:
2906 mvxpe_tx_lockq(sc, arg->queue);
2907 tx = MVXPE_TX_RING(sc, arg->queue);
2908 val = tx->tx_queue_len;
2909 mvxpe_tx_unlockq(sc, arg->queue);
2910 break;
2911 }
2912
2913 node.sysctl_data = &val;
2914 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2915 if (err || newp == NULL) {
2916 mvxpe_sc_unlock(sc);
2917 return err;
2918 }
2919
2920 /* update queue length */
2921 if (val < 8 || val > MVXPE_RX_RING_CNT) {
2922 mvxpe_sc_unlock(sc);
2923 return EINVAL;
2924 }
2925 switch (arg->rxtx) {
2926 case MVXPE_SYSCTL_RX:
2927 mvxpe_rx_lockq(sc, arg->queue);
2928 rx->rx_queue_len = val;
2929 rx->rx_queue_th_received =
2930 rx->rx_queue_len / MVXPE_RXTH_RATIO;
2931 rx->rx_queue_th_free =
2932 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2933
2934 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2935 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2936 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2937
2938 mvxpe_rx_unlockq(sc, arg->queue);
2939 break;
2940 case MVXPE_SYSCTL_TX:
2941 mvxpe_tx_lockq(sc, arg->queue);
2942 tx->tx_queue_len = val;
2943 tx->tx_queue_th_free =
2944 tx->tx_queue_len / MVXPE_TXTH_RATIO;
2945
2946 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2947 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2948 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2949
2950 mvxpe_tx_unlockq(sc, arg->queue);
2951 break;
2952 }
2953 mvxpe_sc_unlock(sc);
2954
2955 return 0;
2956 }
2957
2958 STATIC int
2959 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2960 {
2961 struct mvxpe_sysctl_queue *arg;
2962 struct mvxpe_rx_ring *rx = NULL;
2963 struct mvxpe_softc *sc;
2964 struct sysctlnode node;
2965 extern uint32_t mvTclk;
2966 uint32_t reg, time_mvtclk;
2967 int time_us;
2968 int err;
2969
2970 node = *rnode;
2971
2972 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2973 if (arg == NULL)
2974 return EINVAL;
2975 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2976 return EINVAL;
2977 if (arg->rxtx != MVXPE_SYSCTL_RX)
2978 return EINVAL;
2979
2980 sc = arg->sc;
2981 if (sc == NULL)
2982 return EINVAL;
2983
2984 /* read queue length */
2985 mvxpe_sc_lock(sc);
2986 mvxpe_rx_lockq(sc, arg->queue);
2987 rx = MVXPE_RX_RING(sc, arg->queue);
2988 time_mvtclk = rx->rx_queue_th_time;
2989 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
2990 node.sysctl_data = &time_us;
2991 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
2992 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
2993 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2994 if (err || newp == NULL) {
2995 mvxpe_rx_unlockq(sc, arg->queue);
2996 mvxpe_sc_unlock(sc);
2997 return err;
2998 }
2999
3000 /* update queue length (0[sec] - 1[sec]) */
3001 if (time_us < 0 || time_us > (1000 * 1000)) {
3002 mvxpe_rx_unlockq(sc, arg->queue);
3003 mvxpe_sc_unlock(sc);
3004 return EINVAL;
3005 }
3006 time_mvtclk =
3007 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
3008 rx->rx_queue_th_time = time_mvtclk;
3009 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
3010 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
3011 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
3012 mvxpe_rx_unlockq(sc, arg->queue);
3013 mvxpe_sc_unlock(sc);
3014
3015 return 0;
3016 }
3017
3018
3019 STATIC void
3020 sysctl_mvxpe_init(struct mvxpe_softc *sc)
3021 {
3022 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3023 const struct sysctlnode *node;
3024 int mvxpe_nodenum;
3025 int mvxpe_mibnum;
3026 int mvxpe_rxqueuenum;
3027 int mvxpe_txqueuenum;
3028 int q, i;
3029
3030 /* hw.mvxpe.mvxpe[unit] */
3031 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3032 0, CTLTYPE_NODE, ifp->if_xname,
3033 SYSCTL_DESCR("mvxpe per-controller controls"),
3034 NULL, 0, NULL, 0,
3035 CTL_HW, mvxpe_root_num, CTL_CREATE,
3036 CTL_EOL) != 0) {
3037 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3038 return;
3039 }
3040 mvxpe_nodenum = node->sysctl_num;
3041
3042 /* hw.mvxpe.mvxpe[unit].mib */
3043 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3044 0, CTLTYPE_NODE, "mib",
3045 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3046 NULL, 0, NULL, 0,
3047 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3048 CTL_EOL) != 0) {
3049 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3050 return;
3051 }
3052 mvxpe_mibnum = node->sysctl_num;
3053
3054 /* hw.mvxpe.mvxpe[unit].rx */
3055 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3056 0, CTLTYPE_NODE, "rx",
3057 SYSCTL_DESCR("Rx Queues"),
3058 NULL, 0, NULL, 0,
3059 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3060 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3061 return;
3062 }
3063 mvxpe_rxqueuenum = node->sysctl_num;
3064
3065 /* hw.mvxpe.mvxpe[unit].tx */
3066 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3067 0, CTLTYPE_NODE, "tx",
3068 SYSCTL_DESCR("Tx Queues"),
3069 NULL, 0, NULL, 0,
3070 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3071 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3072 return;
3073 }
3074 mvxpe_txqueuenum = node->sysctl_num;
3075
3076 #ifdef MVXPE_DEBUG
3077 /* hw.mvxpe.debug */
3078 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3079 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3080 SYSCTL_DESCR("mvxpe device driver debug control"),
3081 NULL, 0, &mvxpe_debug, 0,
3082 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3083 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3084 return;
3085 }
3086 #endif
3087 /*
3088 * MIB access
3089 */
3090 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3091 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3092 const char *name = mvxpe_mib_list[i].sysctl_name;
3093 const char *desc = mvxpe_mib_list[i].desc;
3094 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3095
3096 mib_arg->sc = sc;
3097 mib_arg->index = i;
3098 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3099 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3100 sysctl_read_mib, 0, (void *)mib_arg, 0,
3101 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3102 CTL_CREATE, CTL_EOL) != 0) {
3103 aprint_normal_dev(sc->sc_dev,
3104 "couldn't create sysctl node\n");
3105 break;
3106 }
3107 }
3108
3109 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3110 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3111 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3112 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3113 static const char *sysctl_queue_names[] = {
3114 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3115 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3116 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3117 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3118 };
3119 #undef MVXPE_SYSCTL_NAME
3120 #ifdef SYSCTL_INCLUDE_DESCR
3121 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3122 static const char *sysctl_queue_descrs[] = {
3123 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1),
3124 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3),
3125 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5),
3126 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7),
3127 };
3128 #undef MVXPE_SYSCTL_DESCR
3129 #endif /* SYSCTL_INCLUDE_DESCR */
3130 int mvxpe_curnum;
3131
3132 rxarg->sc = txarg->sc = sc;
3133 rxarg->queue = txarg->queue = q;
3134 rxarg->rxtx = MVXPE_SYSCTL_RX;
3135 txarg->rxtx = MVXPE_SYSCTL_TX;
3136
3137 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3138 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3139 0, CTLTYPE_NODE,
3140 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3141 NULL, 0, NULL, 0,
3142 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3143 CTL_CREATE, CTL_EOL) != 0) {
3144 aprint_normal_dev(sc->sc_dev,
3145 "couldn't create sysctl node\n");
3146 break;
3147 }
3148 mvxpe_curnum = node->sysctl_num;
3149
3150 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3151 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3152 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3153 SYSCTL_DESCR("maximum length of the queue"),
3154 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3155 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3156 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3157 aprint_normal_dev(sc->sc_dev,
3158 "couldn't create sysctl node\n");
3159 break;
3160 }
3161
3162 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3163 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3164 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3165 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3166 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3167 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3168 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3169 aprint_normal_dev(sc->sc_dev,
3170 "couldn't create sysctl node\n");
3171 break;
3172 }
3173
3174 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3175 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3176 0, CTLTYPE_NODE,
3177 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3178 NULL, 0, NULL, 0,
3179 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3180 CTL_CREATE, CTL_EOL) != 0) {
3181 aprint_normal_dev(sc->sc_dev,
3182 "couldn't create sysctl node\n");
3183 break;
3184 }
3185 mvxpe_curnum = node->sysctl_num;
3186
3187 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3188 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3189 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3190 SYSCTL_DESCR("maximum length of the queue"),
3191 sysctl_set_queue_length, 0, (void *)txarg, 0,
3192 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3193 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3194 aprint_normal_dev(sc->sc_dev,
3195 "couldn't create sysctl node\n");
3196 break;
3197 }
3198 }
3199
3200 /* hw.mvxpe.mvxpe[unit].clear_mib */
3201 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3202 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3203 SYSCTL_DESCR("mvxpe device driver debug control"),
3204 sysctl_clear_mib, 0, (void *)sc, 0,
3205 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3206 CTL_EOL) != 0) {
3207 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3208 return;
3209 }
3210
3211 }
3212
3213 /*
3214 * MIB
3215 */
3216 STATIC void
3217 mvxpe_clear_mib(struct mvxpe_softc *sc)
3218 {
3219 int i;
3220
3221 KASSERT_SC_MTX(sc);
3222
3223 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3224 if (mvxpe_mib_list[i].reg64)
3225 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3226 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3227 sc->sc_sysctl_mib[i].counter = 0;
3228 }
3229 }
3230
3231 STATIC void
3232 mvxpe_update_mib(struct mvxpe_softc *sc)
3233 {
3234 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3235 int i;
3236
3237 KASSERT_SC_MTX(sc);
3238
3239 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3240 uint32_t val_hi;
3241 uint32_t val_lo;
3242 uint64_t val;
3243
3244 if (mvxpe_mib_list[i].reg64) {
3245 /* XXX: implement bus_space_read_8() */
3246 val_lo = MVXPE_READ_MIB(sc,
3247 (mvxpe_mib_list[i].regnum + 4));
3248 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3249 }
3250 else {
3251 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3252 val_hi = 0;
3253 }
3254
3255 if ((val_lo | val_hi) == 0)
3256 continue;
3257
3258 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3259 sc->sc_sysctl_mib[i].counter += val;
3260
3261 switch (mvxpe_mib_list[i].ext) {
3262 case MVXPE_MIBEXT_IF_OERRORS:
3263 ifp->if_oerrors += val;
3264 break;
3265 case MVXPE_MIBEXT_IF_IERRORS:
3266 ifp->if_ierrors += val;
3267 break;
3268 case MVXPE_MIBEXT_IF_COLLISIONS:
3269 ifp->if_collisions += val;
3270 break;
3271 default:
3272 break;
3273 }
3274
3275 }
3276 }
3277
3278 /*
3279 * for Debug
3280 */
3281 STATIC void
3282 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3283 {
3284 #define DESC_PRINT(X) \
3285 if (X) \
3286 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3287
3288 DESC_PRINT(desc->command);
3289 DESC_PRINT(desc->l4ichk);
3290 DESC_PRINT(desc->bytecnt);
3291 DESC_PRINT(desc->bufptr);
3292 DESC_PRINT(desc->flags);
3293 #undef DESC_PRINT
3294 }
3295
3296 STATIC void
3297 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3298 {
3299 #define DESC_PRINT(X) \
3300 if (X) \
3301 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3302
3303 DESC_PRINT(desc->status);
3304 DESC_PRINT(desc->bytecnt);
3305 DESC_PRINT(desc->bufptr);
3306 DESC_PRINT(desc->l4chk);
3307 #undef DESC_PRINT
3308 }
3309