if_mvxpe.c revision 1.5 1 /* $NetBSD: if_mvxpe.c,v 1.5 2016/02/13 05:21:11 hikaru Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.5 2016/02/13 05:21:11 hikaru Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/mvxpbmvar.h>
62 #include <dev/marvell/if_mvxpereg.h>
63 #include <dev/marvell/if_mvxpevar.h>
64
65 #include "locators.h"
66
67 #if BYTE_ORDER == BIG_ENDIAN
68 #error "BIG ENDIAN not supported"
69 #endif
70
71 #ifdef MVXPE_DEBUG
72 #define STATIC /* nothing */
73 #else
74 #define STATIC static
75 #endif
76
77 /* autoconf(9) */
78 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79 STATIC void mvxpe_attach(device_t, device_t, void *);
80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82 mvxpe_match, mvxpe_attach, NULL, NULL);
83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86 /* MII */
87 STATIC int mvxpe_miibus_readreg(device_t, int, int);
88 STATIC void mvxpe_miibus_writereg(device_t, int, int, int);
89 STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91 /* Addres Decoding Window */
92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94 /* Device Register Initialization */
95 STATIC int mvxpe_initreg(struct ifnet *);
96
97 /* Descriptor Ring Control for each of queues */
98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106 /* Rx/Tx Queue Control */
107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116 /* Interrupt Handlers */
117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119 STATIC int mvxpe_rxtxth_intr(void *);
120 STATIC int mvxpe_misc_intr(void *);
121 STATIC int mvxpe_rxtx_intr(void *);
122 STATIC void mvxpe_tick(void *);
123
124 /* struct ifnet and mii callbacks*/
125 STATIC void mvxpe_start(struct ifnet *);
126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127 STATIC int mvxpe_init(struct ifnet *);
128 STATIC void mvxpe_stop(struct ifnet *, int);
129 STATIC void mvxpe_watchdog(struct ifnet *);
130 STATIC int mvxpe_ifflags_cb(struct ethercom *);
131 STATIC int mvxpe_mediachange(struct ifnet *);
132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134 /* Link State Notify */
135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136 STATIC void mvxpe_linkup(struct mvxpe_softc *);
137 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140 /* Tx Subroutines */
141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144 struct mvxpe_tx_desc *, struct mbuf *);
145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148 /* Rx Subroutines */
149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156 struct mvxpe_rx_desc *, struct mbuf *);
157
158 /* MAC address filter */
159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162 /* sysctl(9) */
163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169 /* MIB */
170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173 /* for Debug */
174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177 STATIC int mvxpe_root_num;
178 STATIC kmutex_t mii_mutex;
179 STATIC int mii_init = 0;
180 #ifdef MVXPE_DEBUG
181 STATIC int mvxpe_debug = MVXPE_DEBUG;
182 #endif
183
184 /*
185 * List of MIB register and names
186 */
187 STATIC struct mvxpe_mib_def {
188 uint32_t regnum;
189 int reg64;
190 const char *sysctl_name;
191 const char *desc;
192 } mvxpe_mib_list[] = {
193 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
194 "Good Octets Rx"},
195 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
196 "Bad Octets Rx"},
197 {MVXPE_MIB_RX_MAC_TRNS_ERR, 0, "rx_mac_err",
198 "MAC Transmit Error"},
199 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
200 "Good Frames Rx"},
201 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
202 "Bad Frames Rx"},
203 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
204 "Broadcast Frames Rx"},
205 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
206 "Multicast Frames Rx"},
207 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
208 "Frame Size 1 - 64"},
209 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
210 "Frame Size 65 - 127"},
211 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
212 "Frame Size 128 - 255"},
213 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
214 "Frame Size 256 - 511"},
215 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
216 "Frame Size 512 - 1023"},
217 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max",
218 "Frame Size 1024 - Max"},
219 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
220 "Good Octets Tx"},
221 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
222 "Good Frames Tx"},
223 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
224 "Excessive Collision"},
225 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
226 "Multicast Frames Tx"},
227 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
228 "Broadcast Frames Tx"},
229 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
230 "Unknown MAC Control"},
231 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
232 "Flow Control Tx"},
233 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
234 "Good Flow Control Rx"},
235 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
236 "Bad Flow Control Rx"},
237 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
238 "Undersized Packets Rx"},
239 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
240 "Fragmented Packets Rx"},
241 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
242 "Oversized Packets Rx"},
243 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
244 "Jabber Packets Rx"},
245 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
246 "MAC Rx Errors"},
247 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
248 "MAC CRC Errors"},
249 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
250 "MAC Collision"},
251 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
252 "MAC Late Collision"},
253 };
254
255 /*
256 * autoconf(9)
257 */
258 /* ARGSUSED */
259 STATIC int
260 mvxpe_match(device_t parent, cfdata_t match, void *aux)
261 {
262 struct marvell_attach_args *mva = aux;
263 bus_size_t pv_off;
264 uint32_t pv;
265
266 if (strcmp(mva->mva_name, match->cf_name) != 0)
267 return 0;
268 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
269 return 0;
270
271 /* check port version */
272 pv_off = mva->mva_offset + MVXPE_PV;
273 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
274 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
275 return 0; /* old version is not supported */
276
277 return 1;
278 }
279
280 /* ARGSUSED */
281 STATIC void
282 mvxpe_attach(device_t parent, device_t self, void *aux)
283 {
284 struct mvxpe_softc *sc = device_private(self);
285 struct mii_softc *mii;
286 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
287 struct marvell_attach_args *mva = aux;
288 prop_dictionary_t dict;
289 prop_data_t enaddrp = NULL;
290 uint32_t phyaddr, maddrh, maddrl;
291 uint8_t enaddr[ETHER_ADDR_LEN];
292 int q;
293
294 aprint_naive("\n");
295 aprint_normal(": Marvell ARMADA GbE Controller\n");
296 memset(sc, 0, sizeof(*sc));
297 sc->sc_dev = self;
298 sc->sc_port = mva->mva_unit;
299 sc->sc_iot = mva->mva_iot;
300 sc->sc_dmat = mva->mva_dmat;
301 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
302 callout_init(&sc->sc_tick_ch, 0);
303 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
304
305 /*
306 * BUS space
307 */
308 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
309 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
310 aprint_error_dev(self, "Cannot map registers\n");
311 goto fail;
312 }
313 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
314 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
315 &sc->sc_mibh)) {
316 aprint_error_dev(self,
317 "Cannot map destination address filter registers\n");
318 goto fail;
319 }
320 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
321 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
322
323 /*
324 * Buffer Manager(BM) subsystem.
325 */
326 sc->sc_bm = mvxpbm_device(mva);
327 if (sc->sc_bm == NULL) {
328 aprint_error_dev(self, "no Buffer Manager.\n");
329 goto fail;
330 }
331 aprint_normal_dev(self,
332 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
333 aprint_normal_dev(sc->sc_dev,
334 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
335 mvxpbm_buf_size(sc->sc_bm) / 1024,
336 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
337
338 /*
339 * make sure DMA engines are in reset state
340 */
341 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
342 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
343
344 /*
345 * Address decoding window
346 */
347 mvxpe_wininit(sc, mva->mva_tags);
348
349 /*
350 * MAC address
351 */
352 dict = device_properties(self);
353 if (dict)
354 enaddrp = prop_dictionary_get(dict, "mac-address");
355 if (enaddrp) {
356 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
357 maddrh = enaddr[0] << 24;
358 maddrh |= enaddr[1] << 16;
359 maddrh |= enaddr[2] << 8;
360 maddrh |= enaddr[3];
361 maddrl = enaddr[4] << 8;
362 maddrl |= enaddr[5];
363 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
364 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
365 }
366 else {
367 /*
368 * even if enaddr is not found in dictionary,
369 * the port may be initialized by IPL program such as U-BOOT.
370 */
371 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
372 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
373 if ((maddrh | maddrl) == 0) {
374 aprint_error_dev(self, "No Ethernet address\n");
375 return;
376 }
377 }
378 sc->sc_enaddr[0] = maddrh >> 24;
379 sc->sc_enaddr[1] = maddrh >> 16;
380 sc->sc_enaddr[2] = maddrh >> 8;
381 sc->sc_enaddr[3] = maddrh >> 0;
382 sc->sc_enaddr[4] = maddrl >> 8;
383 sc->sc_enaddr[5] = maddrl >> 0;
384 aprint_normal_dev(self, "Ethernet address %s\n",
385 ether_sprintf(sc->sc_enaddr));
386
387 /*
388 * Register interrupt handlers
389 * XXX: handle Ethernet unit intr. and Error intr.
390 */
391 mvxpe_disable_intr(sc);
392 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
393
394 /*
395 * MIB buffer allocation
396 */
397 sc->sc_sysctl_mib_size =
398 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
399 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
400 if (sc->sc_sysctl_mib == NULL)
401 goto fail;
402 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
403
404 /*
405 * Device DMA Buffer allocation
406 */
407 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
408 if (mvxpe_ring_alloc_queue(sc, q) != 0)
409 goto fail;
410 mvxpe_ring_init_queue(sc, q);
411 }
412
413 /*
414 * We can support 802.1Q VLAN-sized frames and jumbo
415 * Ethernet frames.
416 */
417 sc->sc_ethercom.ec_capabilities |=
418 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
419 ifp->if_softc = sc;
420 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
421 ifp->if_start = mvxpe_start;
422 ifp->if_ioctl = mvxpe_ioctl;
423 ifp->if_init = mvxpe_init;
424 ifp->if_stop = mvxpe_stop;
425 ifp->if_watchdog = mvxpe_watchdog;
426
427 /*
428 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
429 */
430 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
431 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
432 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
433 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
434 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
435 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
437 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
439 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
440
441 /*
442 * Initialize struct ifnet
443 */
444 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
445 IFQ_SET_READY(&ifp->if_snd);
446 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
447
448 /*
449 * Enable DMA engines and Initiazlie Device Regisers.
450 */
451 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
452 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
453 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
454 mvxpe_sc_lock(sc); /* XXX */
455 mvxpe_filter_setup(sc);
456 mvxpe_sc_unlock(sc);
457 mvxpe_initreg(ifp);
458
459 /*
460 * Now MAC is working, setup MII.
461 */
462 if (mii_init == 0) {
463 /*
464 * MII bus is shared by all MACs and all PHYs in SoC.
465 * serializing the bus access should be safe.
466 */
467 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
468 mii_init = 1;
469 }
470 sc->sc_mii.mii_ifp = ifp;
471 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg;
472 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg;
473 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg;
474
475 sc->sc_ethercom.ec_mii = &sc->sc_mii;
476 ifmedia_init(&sc->sc_mii.mii_media, 0,
477 mvxpe_mediachange, mvxpe_mediastatus);
478 /*
479 * XXX: phy addressing highly depends on Board Design.
480 * we assume phyaddress == MAC unit number here,
481 * but some boards may not.
482 */
483 mii_attach(self, &sc->sc_mii, 0xffffffff,
484 MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
485 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
486 if (mii == NULL) {
487 aprint_error_dev(self, "no PHY found!\n");
488 ifmedia_add(&sc->sc_mii.mii_media,
489 IFM_ETHER|IFM_MANUAL, 0, NULL);
490 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
491 } else {
492 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
493 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
494 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
495 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
496 }
497
498 /*
499 * Call MI attach routines.
500 */
501 if_attach(ifp);
502
503 ether_ifattach(ifp, sc->sc_enaddr);
504 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
505
506 sysctl_mvxpe_init(sc);
507 mvxpe_evcnt_attach(sc);
508 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
509 RND_TYPE_NET, RND_FLAG_DEFAULT);
510
511 return;
512
513 fail:
514 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
515 mvxpe_ring_dealloc_queue(sc, q);
516 if (sc->sc_sysctl_mib)
517 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
518
519 return;
520 }
521
522 STATIC int
523 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
524 {
525 #ifdef MVXPE_EVENT_COUNTERS
526 int q;
527
528 /* Master Interrupt Handler */
529 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
530 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
531 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
532 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
533 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
534 NULL, device_xname(sc->sc_dev), "MISC Intr.");
535
536 /* RXTXTH Interrupt */
537 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
538 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
539
540 /* MISC Interrupt */
541 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
542 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
543 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
544 NULL, device_xname(sc->sc_dev), "MISC link status changed");
545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
546 NULL, device_xname(sc->sc_dev), "MISC internal address error");
547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
548 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
550 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
552 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
554 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
556 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
557 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
558 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
559 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
560 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
561
562 /* RxTx Interrupt */
563 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
564 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
565 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
566 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet");
567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
568 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
570 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
571 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
572 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
573 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
574 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
575
576 /* Link */
577 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
578 NULL, device_xname(sc->sc_dev), "link up");
579 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
580 NULL, device_xname(sc->sc_dev), "link down");
581
582 /* Rx Descriptor */
583 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
584 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
585 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
586 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
588 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
589 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
590 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
591 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
592 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
593
594 /* Tx Descriptor */
595 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
596 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
597 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
598 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
599 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
600 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
601 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
602 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
603
604 /* Status Registers */
605 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
606 NULL, device_xname(sc->sc_dev), "Rx discard counter");
607 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
608 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
610 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
611 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
612 NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
613 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
614 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
615
616 /* Device Driver Errors */
617 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
618 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
619 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
620 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
621 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
622 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
623 static const char *rxq_desc[] = {
624 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
625 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
626 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
627 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
628 };
629 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
630 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
631 }
632 #undef MVXPE_QUEUE_DESC
633 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
634 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
635 static const char *txq_desc[] = {
636 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
637 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
638 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
639 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
640 };
641 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
642 NULL, device_xname(sc->sc_dev), txq_desc[q]);
643 }
644 #undef MVXPE_QUEUE_DESC
645 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
646 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
647 static const char *rxqe_desc[] = {
648 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
649 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
650 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
651 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
652 };
653 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
654 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
655 }
656 #undef MVXPE_QUEUE_DESC
657 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
658 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
659 static const char *txqe_desc[] = {
660 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
661 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
662 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
663 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
664 };
665 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
666 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
667 }
668 #undef MVXPE_QUEUE_DESC
669
670 #endif /* MVXPE_EVENT_COUNTERS */
671 return 0;
672 }
673
674 STATIC void
675 mvxpe_sc_lock(struct mvxpe_softc *sc)
676 {
677 mutex_enter(&sc->sc_mtx);
678 }
679
680 STATIC void
681 mvxpe_sc_unlock(struct mvxpe_softc *sc)
682 {
683 mutex_exit(&sc->sc_mtx);
684 }
685
686 /*
687 * MII
688 */
689 STATIC int
690 mvxpe_miibus_readreg(device_t dev, int phy, int reg)
691 {
692 struct mvxpe_softc *sc = device_private(dev);
693 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
694 uint32_t smi, val;
695 int i;
696
697 mutex_enter(&mii_mutex);
698
699 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
700 DELAY(1);
701 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
702 break;
703 }
704 if (i == MVXPE_PHY_TIMEOUT) {
705 aprint_error_ifnet(ifp, "SMI busy timeout\n");
706 mutex_exit(&mii_mutex);
707 return -1;
708 }
709
710 smi =
711 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
712 MVXPE_WRITE(sc, MVXPE_SMI, smi);
713
714 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
715 DELAY(1);
716 smi = MVXPE_READ(sc, MVXPE_SMI);
717 if (smi & MVXPE_SMI_READVALID)
718 break;
719 }
720
721 mutex_exit(&mii_mutex);
722
723 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
724
725 val = smi & MVXPE_SMI_DATA_MASK;
726
727 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#x\n", phy, reg, val);
728
729 return val;
730 }
731
732 STATIC void
733 mvxpe_miibus_writereg(device_t dev, int phy, int reg, int val)
734 {
735 struct mvxpe_softc *sc = device_private(dev);
736 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
737 uint32_t smi;
738 int i;
739
740 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#x\n", phy, reg, val);
741
742 mutex_enter(&mii_mutex);
743
744 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
745 DELAY(1);
746 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
747 break;
748 }
749 if (i == MVXPE_PHY_TIMEOUT) {
750 aprint_error_ifnet(ifp, "SMI busy timeout\n");
751 mutex_exit(&mii_mutex);
752 return;
753 }
754
755 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
756 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
757 MVXPE_WRITE(sc, MVXPE_SMI, smi);
758
759 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
760 DELAY(1);
761 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
762 break;
763 }
764
765 mutex_exit(&mii_mutex);
766
767 if (i == MVXPE_PHY_TIMEOUT)
768 aprint_error_ifnet(ifp, "phy write timed out\n");
769 }
770
771 STATIC void
772 mvxpe_miibus_statchg(struct ifnet *ifp)
773 {
774
775 /* nothing to do */
776 }
777
778 /*
779 * Address Decoding Window
780 */
781 STATIC void
782 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
783 {
784 device_t pdev = device_parent(sc->sc_dev);
785 uint64_t base;
786 uint32_t en, ac, size;
787 int window, target, attr, rv, i;
788
789 /* First disable all address decode windows */
790 en = MVXPE_BARE_EN_MASK;
791 MVXPE_WRITE(sc, MVXPE_BARE, en);
792
793 ac = 0;
794 for (window = 0, i = 0;
795 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
796 rv = marvell_winparams_by_tag(pdev, tags[i],
797 &target, &attr, &base, &size);
798 if (rv != 0 || size == 0)
799 continue;
800
801 if (base > 0xffffffffULL) {
802 if (window >= MVXPE_NREMAP) {
803 aprint_error_dev(sc->sc_dev,
804 "can't remap window %d\n", window);
805 continue;
806 }
807 MVXPE_WRITE(sc, MVXPE_HA(window),
808 (base >> 32) & 0xffffffff);
809 }
810
811 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
812 MVXPE_BASEADDR_TARGET(target) |
813 MVXPE_BASEADDR_ATTR(attr) |
814 MVXPE_BASEADDR_BASE(base));
815 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
816
817 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
818 window, base, size);
819
820 en &= ~(1 << window);
821 /* set full access (r/w) */
822 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
823 window++;
824 }
825 /* allow to access decode window */
826 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
827
828 MVXPE_WRITE(sc, MVXPE_BARE, en);
829 }
830
831 /*
832 * Device Register Initialization
833 * reset device registers to device driver default value.
834 * the device is not enabled here.
835 */
836 STATIC int
837 mvxpe_initreg(struct ifnet *ifp)
838 {
839 struct mvxpe_softc *sc = ifp->if_softc;
840 int serdes = 0;
841 uint32_t reg;
842 int q, i;
843
844 DPRINTIFNET(ifp, 1, "initializing device register\n");
845
846 /* Init TX/RX Queue Registers */
847 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
848 mvxpe_rx_lockq(sc, q);
849 if (mvxpe_rx_queue_init(ifp, q) != 0) {
850 aprint_error_ifnet(ifp,
851 "initialization failed: cannot initialize queue\n");
852 mvxpe_rx_unlockq(sc, q);
853 return ENOBUFS;
854 }
855 mvxpe_rx_unlockq(sc, q);
856
857 mvxpe_tx_lockq(sc, q);
858 if (mvxpe_tx_queue_init(ifp, q) != 0) {
859 aprint_error_ifnet(ifp,
860 "initialization failed: cannot initialize queue\n");
861 mvxpe_tx_unlockq(sc, q);
862 return ENOBUFS;
863 }
864 mvxpe_tx_unlockq(sc, q);
865 }
866
867 /* Tx MTU Limit */
868 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
869
870 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
871 reg = MVXPE_READ(sc, MVXPE_PMACC0);
872 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
873 serdes = 1;
874
875 /* Ethernet Unit Control */
876 reg = MVXPE_READ(sc, MVXPE_EUC);
877 reg |= MVXPE_EUC_POLLING;
878 MVXPE_WRITE(sc, MVXPE_EUC, reg);
879
880 /* Auto Negotiation */
881 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
882 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
883 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
884 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
885 if (serdes) {
886 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
887 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
888 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
889 }
890 MVXPE_WRITE(sc, MVXPE_PANC, reg);
891
892 /* EEE: Low Power Idle */
893 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
894 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
895 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
896
897 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
898 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
899
900 reg = MVXPE_LPIC2_MUSTSET;
901 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
902
903 /* Port MAC Control set 0 */
904 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
905 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
906 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
907 if (serdes)
908 reg |= MVXPE_PMACC0_PORTTYPE;
909 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
910
911 /* Port MAC Control set 1 is only used for loop-back test */
912
913 /* Port MAC Control set 2 */
914 reg = MVXPE_READ(sc, MVXPE_PMACC2);
915 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
916 reg |= MVXPE_PMACC2_MUSTSET;
917 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
918
919 /* Port MAC Control set 3 is used for IPG tune */
920
921 /* Port MAC Control set 4 is not used */
922
923 /* Port Configuration Extended: enable Tx CRC generation */
924 reg = MVXPE_READ(sc, MVXPE_PXCX);
925 reg &= ~MVXPE_PXCX_TXCRCDIS;
926 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
927
928 /* clear MIB counter registers(clear by read) */
929 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
930 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
931
932 /* Set SDC register except IPGINT bits */
933 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
934 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
935 reg |= MVXPE_SDC_BLMR;
936 reg |= MVXPE_SDC_BLMT;
937 MVXPE_WRITE(sc, MVXPE_SDC, reg);
938
939 return 0;
940 }
941
942 /*
943 * Descriptor Ring Controls for each of queues
944 */
945 STATIC void *
946 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
947 {
948 bus_dma_segment_t segs;
949 void *kva = NULL;
950 int nsegs;
951
952 /*
953 * Allocate the descriptor queues.
954 * struct mvxpe_ring_data contians array of descriptor per queue.
955 */
956 if (bus_dmamem_alloc(sc->sc_dmat,
957 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
958 aprint_error_dev(sc->sc_dev,
959 "can't alloc device memory (%zu bytes)\n", size);
960 return NULL;
961 }
962 if (bus_dmamem_map(sc->sc_dmat,
963 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
964 aprint_error_dev(sc->sc_dev,
965 "can't map dma buffers (%zu bytes)\n", size);
966 goto fail1;
967 }
968
969 if (bus_dmamap_create(sc->sc_dmat,
970 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
971 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
972 goto fail2;
973 }
974 if (bus_dmamap_load(sc->sc_dmat,
975 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
976 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
977 goto fail3;
978 }
979 memset(kva, 0, size);
980 return kva;
981
982 fail3:
983 bus_dmamap_destroy(sc->sc_dmat, *map);
984 memset(map, 0, sizeof(*map));
985 fail2:
986 bus_dmamem_unmap(sc->sc_dmat, kva, size);
987 fail1:
988 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
989 return NULL;
990 }
991
992 STATIC int
993 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
994 {
995 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
996 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
997
998 /*
999 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1000 * queue length. real queue length is limited by
1001 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1002 *
1003 * because descriptor ring reallocation needs reprogramming of
1004 * DMA registers, we allocate enough descriptor for hard limit
1005 * of queue length.
1006 */
1007 rx->rx_descriptors =
1008 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1009 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1010 if (rx->rx_descriptors == NULL)
1011 goto fail;
1012
1013 tx->tx_descriptors =
1014 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1015 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1016 if (tx->tx_descriptors == NULL)
1017 goto fail;
1018
1019 return 0;
1020 fail:
1021 mvxpe_ring_dealloc_queue(sc, q);
1022 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1023 return ENOMEM;
1024 }
1025
1026 STATIC void
1027 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1028 {
1029 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1030 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1031 bus_dma_segment_t *segs;
1032 bus_size_t size;
1033 void *kva;
1034 int nsegs;
1035
1036 /* Rx */
1037 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1038 if (kva) {
1039 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1040 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1041 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1042
1043 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1044 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1045 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1046 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1047 }
1048
1049 /* Tx */
1050 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1051 if (kva) {
1052 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1053 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1054 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1055
1056 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1057 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1058 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1059 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1060 }
1061
1062 /* Clear doungling pointers all */
1063 memset(rx, 0, sizeof(*rx));
1064 memset(tx, 0, sizeof(*tx));
1065 }
1066
1067 STATIC void
1068 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1069 {
1070 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1071 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1072 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1073 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1074 static const int rx_default_queue_len[] = {
1075 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1076 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1077 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1078 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1079 };
1080 static const int tx_default_queue_len[] = {
1081 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1082 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1083 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1084 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1085 };
1086 extern uint32_t mvTclk;
1087 int i;
1088
1089 /* Rx handle */
1090 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1091 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1092 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1093 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1094 }
1095 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1096 rx->rx_dma = rx->rx_cpu = 0;
1097 rx->rx_queue_len = rx_default_queue_len[q];
1098 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1099 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1100 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1101 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1102 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1103
1104 /* Tx handle */
1105 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1106 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1107 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1108 MVXPE_TX_MBUF(sc, q, i) = NULL;
1109 /* Tx handle needs DMA map for busdma_load_mbuf() */
1110 if (bus_dmamap_create(sc->sc_dmat,
1111 mvxpbm_chunk_size(sc->sc_bm),
1112 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1113 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
1114 &MVXPE_TX_MAP(sc, q, i))) {
1115 aprint_error_dev(sc->sc_dev,
1116 "can't create dma map (tx ring %d)\n", i);
1117 }
1118 }
1119 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1120 tx->tx_dma = tx->tx_cpu = 0;
1121 tx->tx_queue_len = tx_default_queue_len[q];
1122 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1123 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1124 tx->tx_used = 0;
1125 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1126 }
1127
1128 STATIC void
1129 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1130 {
1131 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1132 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1133 int i;
1134
1135 KASSERT_RX_MTX(sc, q);
1136 KASSERT_TX_MTX(sc, q);
1137
1138 /* Rx handle */
1139 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1140 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1141 continue;
1142 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1143 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1144 }
1145 rx->rx_dma = rx->rx_cpu = 0;
1146
1147 /* Tx handle */
1148 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1149 if (MVXPE_TX_MBUF(sc, q, i) == NULL)
1150 continue;
1151 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1152 m_freem(MVXPE_TX_MBUF(sc, q, i));
1153 MVXPE_TX_MBUF(sc, q, i) = NULL;
1154 }
1155 tx->tx_dma = tx->tx_cpu = 0;
1156 tx->tx_used = 0;
1157 }
1158
1159 STATIC void
1160 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1161 {
1162 int wrap;
1163
1164 KASSERT_RX_MTX(sc, q);
1165 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1166 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1167
1168 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1169 if (wrap > 0) {
1170 count -= wrap;
1171 KASSERT(count > 0);
1172 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1173 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1174 }
1175 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1176 MVXPE_RX_DESC_OFF(sc, q, idx),
1177 sizeof(struct mvxpe_rx_desc) * count, ops);
1178 }
1179
1180 STATIC void
1181 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1182 {
1183 int wrap = 0;
1184
1185 KASSERT_TX_MTX(sc, q);
1186 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1187 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1188
1189 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1190 if (wrap > 0) {
1191 count -= wrap;
1192 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1193 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1194 }
1195 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1196 MVXPE_TX_DESC_OFF(sc, q, idx),
1197 sizeof(struct mvxpe_tx_desc) * count, ops);
1198 }
1199
1200 /*
1201 * Rx/Tx Queue Control
1202 */
1203 STATIC int
1204 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1205 {
1206 struct mvxpe_softc *sc = ifp->if_softc;
1207 uint32_t reg;
1208
1209 KASSERT_RX_MTX(sc, q);
1210 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1211
1212 /* descriptor address */
1213 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1214
1215 /* Rx buffer size and descriptor ring size */
1216 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1217 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1218 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1219 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1220 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1221
1222 /* Rx packet offset address */
1223 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1224 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1225 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1226 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1227
1228 /* Rx DMA SNOOP */
1229 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1230 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1231 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1232
1233 /* if DMA is not working, register is not updated */
1234 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1235 return 0;
1236 }
1237
1238 STATIC int
1239 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1240 {
1241 struct mvxpe_softc *sc = ifp->if_softc;
1242 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1243 uint32_t reg;
1244
1245 KASSERT_TX_MTX(sc, q);
1246 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1247
1248 /* descriptor address */
1249 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1250
1251 /* Tx threshold, and descriptor ring size */
1252 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1253 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1254 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1255 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1256 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1257
1258 /* if DMA is not working, register is not updated */
1259 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1260 return 0;
1261 }
1262
1263 STATIC int
1264 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1265 {
1266 struct mvxpe_softc *sc = ifp->if_softc;
1267 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1268 uint32_t reg;
1269
1270 KASSERT_RX_MTX(sc, q);
1271
1272 /* Set Rx interrupt threshold */
1273 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1274 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1275 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1276
1277 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1278 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1279
1280 /* Unmask RXTX Intr. */
1281 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1282 reg |= MVXPE_PRXTXI_RREQ(q); /* Rx resource error */
1283 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1284
1285 /* Unmask RXTX_TH Intr. */
1286 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1287 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1288 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1289 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1290
1291 /* Enable Rx queue */
1292 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1293 reg |= MVXPE_RQC_ENQ(q);
1294 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1295
1296 return 0;
1297 }
1298
1299 STATIC int
1300 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1301 {
1302 struct mvxpe_softc *sc = ifp->if_softc;
1303 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1304 uint32_t reg;
1305
1306 KASSERT_TX_MTX(sc, q);
1307
1308 /* Set Tx interrupt threshold */
1309 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1310 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1311 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1312 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1313
1314 /* Unmask RXTX_TH Intr. */
1315 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1316 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1317 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1318
1319 /* Don't update MVXPE_TQC here, there is no packet yet. */
1320 return 0;
1321 }
1322
1323 STATIC void
1324 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1325 {
1326 KASSERT(q >= 0);
1327 KASSERT(q < MVXPE_QUEUE_SIZE);
1328 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1329 }
1330
1331 STATIC void
1332 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1333 {
1334 KASSERT(q >= 0);
1335 KASSERT(q < MVXPE_QUEUE_SIZE);
1336 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1337 }
1338
1339 STATIC void
1340 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1341 {
1342 KASSERT(q >= 0);
1343 KASSERT(q < MVXPE_QUEUE_SIZE);
1344 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1345 }
1346
1347 STATIC void
1348 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1349 {
1350 KASSERT(q >= 0);
1351 KASSERT(q < MVXPE_QUEUE_SIZE);
1352 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1353 }
1354
1355 /*
1356 * Interrupt Handlers
1357 */
1358 STATIC void
1359 mvxpe_disable_intr(struct mvxpe_softc *sc)
1360 {
1361 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1362 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1363 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1364 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1365 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1366 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1367 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1368 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1369 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1370 }
1371
1372 STATIC void
1373 mvxpe_enable_intr(struct mvxpe_softc *sc)
1374 {
1375 uint32_t reg;
1376
1377 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1378 reg = MVXPE_READ(sc, MVXPE_PMIM);
1379 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1380 reg |= MVXPE_PMI_LINKCHANGE;
1381 reg |= MVXPE_PMI_IAE;
1382 reg |= MVXPE_PMI_RXOVERRUN;
1383 reg |= MVXPE_PMI_RXCRCERROR;
1384 reg |= MVXPE_PMI_RXLARGEPACKET;
1385 reg |= MVXPE_PMI_TXUNDRN;
1386 reg |= MVXPE_PMI_PRBSERROR;
1387 reg |= MVXPE_PMI_SRSE;
1388 reg |= MVXPE_PMI_TREQ_MASK;
1389 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1390
1391 /* Enable RXTX Intr. (via RXTX_TH Summary bit) */
1392 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1393 reg |= MVXPE_PRXTXI_RREQ_MASK; /* Rx resource error */
1394 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1395
1396 /* Enable Summary Bit to check all interrupt cause. */
1397 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1398 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1399 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1400 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1401 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1402
1403 /* Enable All Queue Interrupt */
1404 reg = MVXPE_READ(sc, MVXPE_PIE);
1405 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1406 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1407 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1408 }
1409
1410 STATIC int
1411 mvxpe_rxtxth_intr(void *arg)
1412 {
1413 struct mvxpe_softc *sc = arg;
1414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1415 uint32_t ic, queues, datum = 0;
1416
1417 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1418 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1419
1420 mvxpe_sc_lock(sc);
1421 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1422 if (ic == 0) {
1423 mvxpe_sc_unlock(sc);
1424 return 0;
1425 }
1426 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1427 datum = datum ^ ic;
1428
1429 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1430
1431 /* ack maintance interrupt first */
1432 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1433 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1434 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1435 }
1436 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1437 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1438 mvxpe_misc_intr(sc);
1439 }
1440 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1441 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1442 mvxpe_rxtx_intr(sc);
1443 }
1444 if (!(ifp->if_flags & IFF_RUNNING)) {
1445 mvxpe_sc_unlock(sc);
1446 return 1;
1447 }
1448
1449 /* RxTxTH interrupt */
1450 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1451 if (queues) {
1452 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1453 mvxpe_rx(sc, queues);
1454 }
1455 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1456 if (queues) {
1457 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1458 mvxpe_tx_complete(sc, queues);
1459 }
1460 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1461 if (queues) {
1462 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1463 mvxpe_rx_refill(sc, queues);
1464 }
1465 mvxpe_sc_unlock(sc);
1466
1467 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1468 mvxpe_start(ifp);
1469
1470 rnd_add_uint32(&sc->sc_rnd_source, datum);
1471
1472 return 1;
1473 }
1474
1475 STATIC int
1476 mvxpe_misc_intr(void *arg)
1477 {
1478 struct mvxpe_softc *sc = arg;
1479 #ifdef MVXPE_DEBUG
1480 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1481 #endif
1482 uint32_t ic;
1483 uint32_t datum = 0;
1484 int claimed = 0;
1485
1486 DPRINTSC(sc, 2, "got MISC_INTR\n");
1487 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1488
1489 KASSERT_SC_MTX(sc);
1490
1491 for (;;) {
1492 ic = MVXPE_READ(sc, MVXPE_PMIC);
1493 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1494 if (ic == 0)
1495 break;
1496 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1497 datum = datum ^ ic;
1498 claimed = 1;
1499
1500 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1501 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1502 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1503 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1504 }
1505 if (ic & MVXPE_PMI_LINKCHANGE) {
1506 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1507 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1508 mvxpe_linkupdate(sc);
1509 }
1510 if (ic & MVXPE_PMI_IAE) {
1511 DPRINTIFNET(ifp, 2, "+IAE\n");
1512 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1513 }
1514 if (ic & MVXPE_PMI_RXOVERRUN) {
1515 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1516 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1517 }
1518 if (ic & MVXPE_PMI_RXCRCERROR) {
1519 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1520 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1521 }
1522 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1523 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1524 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1525 }
1526 if (ic & MVXPE_PMI_TXUNDRN) {
1527 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1528 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1529 }
1530 if (ic & MVXPE_PMI_PRBSERROR) {
1531 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1532 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1533 }
1534 if (ic & MVXPE_PMI_TREQ_MASK) {
1535 DPRINTIFNET(ifp, 2, "+TREQ\n");
1536 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1537 }
1538 }
1539 if (datum)
1540 rnd_add_uint32(&sc->sc_rnd_source, datum);
1541
1542 return claimed;
1543 }
1544
1545 STATIC int
1546 mvxpe_rxtx_intr(void *arg)
1547 {
1548 struct mvxpe_softc *sc = arg;
1549 #ifdef MVXPE_DEBUG
1550 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1551 #endif
1552 uint32_t datum = 0;
1553 uint32_t prxtxic;
1554 int claimed = 0;
1555
1556 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1557 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1558
1559 KASSERT_SC_MTX(sc);
1560
1561 for (;;) {
1562 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1563 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1564 if (prxtxic == 0)
1565 break;
1566 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1567 datum = datum ^ prxtxic;
1568 claimed = 1;
1569
1570 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1571
1572 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1573 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1574 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1575 }
1576 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1577 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1578 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1579 }
1580 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1581 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1582 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1583 }
1584 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1585 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1586 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1587 }
1588 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1589 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1590 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1591 }
1592 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1593 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1594 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1595 }
1596 }
1597 if (datum)
1598 rnd_add_uint32(&sc->sc_rnd_source, datum);
1599
1600 return claimed;
1601 }
1602
1603 STATIC void
1604 mvxpe_tick(void *arg)
1605 {
1606 struct mvxpe_softc *sc = arg;
1607 struct mii_data *mii = &sc->sc_mii;
1608
1609 mvxpe_sc_lock(sc);
1610
1611 mii_tick(mii);
1612 mii_pollstat(&sc->sc_mii);
1613
1614 /* read mib regisers(clear by read) */
1615 mvxpe_update_mib(sc);
1616
1617 /* read counter registers(clear by read) */
1618 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1619 MVXPE_READ(sc, MVXPE_PDFC));
1620 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1621 MVXPE_READ(sc, MVXPE_POFC));
1622 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1623 MVXPE_READ(sc, MVXPE_TXBADFCS));
1624 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1625 MVXPE_READ(sc, MVXPE_TXDROPPED));
1626 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1627 MVXPE_READ(sc, MVXPE_LPIC));
1628
1629 mvxpe_sc_unlock(sc);
1630
1631 callout_schedule(&sc->sc_tick_ch, hz);
1632 }
1633
1634
1635 /*
1636 * struct ifnet and mii callbacks
1637 */
1638 STATIC void
1639 mvxpe_start(struct ifnet *ifp)
1640 {
1641 struct mvxpe_softc *sc = ifp->if_softc;
1642 struct mbuf *m;
1643 int q;
1644
1645 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1646 DPRINTIFNET(ifp, 1, "not running\n");
1647 return;
1648 }
1649
1650 mvxpe_sc_lock(sc);
1651 if (!MVXPE_IS_LINKUP(sc)) {
1652 /* If Link is DOWN, can't start TX */
1653 DPRINTIFNET(ifp, 1, "link fail\n");
1654 for (;;) {
1655 /*
1656 * discard stale packets all.
1657 * these may confuse DAD, ARP or timer based protocols.
1658 */
1659 IFQ_DEQUEUE(&ifp->if_snd, m);
1660 if (m == NULL)
1661 break;
1662 m_freem(m);
1663 }
1664 mvxpe_sc_unlock(sc);
1665 return;
1666 }
1667 for (;;) {
1668 /*
1669 * don't use IFQ_POLL().
1670 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1671 * on SMP enabled networking stack.
1672 */
1673 IFQ_DEQUEUE(&ifp->if_snd, m);
1674 if (m == NULL)
1675 break;
1676
1677 q = mvxpe_tx_queue_select(sc, m);
1678 if (q < 0)
1679 break;
1680 /* mutex is held in mvxpe_tx_queue_select() */
1681
1682 if (mvxpe_tx_queue(sc, m, q) != 0) {
1683 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1684 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1685 mvxpe_tx_unlockq(sc, q);
1686 break;
1687 }
1688 mvxpe_tx_unlockq(sc, q);
1689 KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1690 KASSERT(sc->sc_tx_ring[q].tx_used <=
1691 sc->sc_tx_ring[q].tx_queue_len);
1692 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1693 sc->sc_tx_pending++;
1694 ifp->if_timer = 1;
1695 sc->sc_wdogsoft = 1;
1696 bpf_mtap(ifp, m);
1697 }
1698 mvxpe_sc_unlock(sc);
1699
1700 return;
1701 }
1702
1703 STATIC int
1704 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1705 {
1706 struct mvxpe_softc *sc = ifp->if_softc;
1707 struct ifreq *ifr = data;
1708 int error = 0;
1709 int s;
1710
1711 switch (cmd) {
1712 case SIOCGIFMEDIA:
1713 case SIOCSIFMEDIA:
1714 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n");
1715 s = splnet(); /* XXX: is there suitable mutex? */
1716 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1717 splx(s);
1718 break;
1719 default:
1720 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1721 error = ether_ioctl(ifp, cmd, data);
1722 if (error == ENETRESET) {
1723 if (ifp->if_flags & IFF_RUNNING) {
1724 mvxpe_sc_lock(sc);
1725 mvxpe_filter_setup(sc);
1726 mvxpe_sc_unlock(sc);
1727 }
1728 error = 0;
1729 }
1730 break;
1731 }
1732
1733 return error;
1734 }
1735
1736 STATIC int
1737 mvxpe_init(struct ifnet *ifp)
1738 {
1739 struct mvxpe_softc *sc = ifp->if_softc;
1740 struct mii_data *mii = &sc->sc_mii;
1741 uint32_t reg;
1742 int q;
1743
1744 mvxpe_sc_lock(sc);
1745
1746 /* Start DMA Engine */
1747 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1748 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1749 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1750
1751 /* Enable port */
1752 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1753 reg |= MVXPE_PMACC0_PORTEN;
1754 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1755
1756 /* Link up */
1757 mvxpe_linkup(sc);
1758
1759 /* Enable All Queue and interrupt of each Queue */
1760 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1761 mvxpe_rx_lockq(sc, q);
1762 mvxpe_rx_queue_enable(ifp, q);
1763 mvxpe_rx_queue_refill(sc, q);
1764 mvxpe_rx_unlockq(sc, q);
1765
1766 mvxpe_tx_lockq(sc, q);
1767 mvxpe_tx_queue_enable(ifp, q);
1768 mvxpe_tx_unlockq(sc, q);
1769 }
1770
1771 /* Enable interrupt */
1772 mvxpe_enable_intr(sc);
1773
1774 /* Set Counter */
1775 callout_schedule(&sc->sc_tick_ch, hz);
1776
1777 /* Media check */
1778 mii_mediachg(mii);
1779
1780 ifp->if_flags |= IFF_RUNNING;
1781 ifp->if_flags &= ~IFF_OACTIVE;
1782
1783 mvxpe_sc_unlock(sc);
1784 return 0;
1785 }
1786
1787 /* ARGSUSED */
1788 STATIC void
1789 mvxpe_stop(struct ifnet *ifp, int disable)
1790 {
1791 struct mvxpe_softc *sc = ifp->if_softc;
1792 uint32_t reg;
1793 int q, cnt;
1794
1795 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1796
1797 mvxpe_sc_lock(sc);
1798
1799 callout_stop(&sc->sc_tick_ch);
1800
1801 /* Link down */
1802 mvxpe_linkdown(sc);
1803
1804 /* Disable Rx interrupt */
1805 reg = MVXPE_READ(sc, MVXPE_PIE);
1806 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1807 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1808
1809 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1810 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1811 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1812 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1813
1814 /* Wait for all Rx activity to terminate. */
1815 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1816 reg = MVXPE_RQC_DIS(reg);
1817 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1818 cnt = 0;
1819 do {
1820 if (cnt >= RX_DISABLE_TIMEOUT) {
1821 aprint_error_ifnet(ifp,
1822 "timeout for RX stopped. rqc 0x%x\n", reg);
1823 break;
1824 }
1825 cnt++;
1826 reg = MVXPE_READ(sc, MVXPE_RQC);
1827 } while (reg & MVXPE_RQC_EN_MASK);
1828
1829 /* Wait for all Tx activety to terminate. */
1830 reg = MVXPE_READ(sc, MVXPE_PIE);
1831 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1832 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1833
1834 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1835 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1836 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1837
1838 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1839 reg = MVXPE_TQC_DIS(reg);
1840 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1841 cnt = 0;
1842 do {
1843 if (cnt >= TX_DISABLE_TIMEOUT) {
1844 aprint_error_ifnet(ifp,
1845 "timeout for TX stopped. tqc 0x%x\n", reg);
1846 break;
1847 }
1848 cnt++;
1849 reg = MVXPE_READ(sc, MVXPE_TQC);
1850 } while (reg & MVXPE_TQC_EN_MASK);
1851
1852 /* Wait for all Tx FIFO is empty */
1853 cnt = 0;
1854 do {
1855 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1856 aprint_error_ifnet(ifp,
1857 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1858 break;
1859 }
1860 cnt++;
1861 reg = MVXPE_READ(sc, MVXPE_PS0);
1862 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1863
1864 /* Reset the MAC Port Enable bit */
1865 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1866 reg &= ~MVXPE_PMACC0_PORTEN;
1867 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1868
1869 /* Disable each of queue */
1870 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1871 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1872
1873 mvxpe_rx_lockq(sc, q);
1874 mvxpe_tx_lockq(sc, q);
1875
1876 /* Disable Rx packet buffer refill request */
1877 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1878 reg |= MVXPE_PRXDQTH_NODT(0);
1879 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1880
1881 if (disable) {
1882 /*
1883 * Hold Reset state of DMA Engine
1884 * (must write 0x0 to restart it)
1885 */
1886 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1887 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1888 mvxpe_ring_flush_queue(sc, q);
1889 }
1890
1891 mvxpe_tx_unlockq(sc, q);
1892 mvxpe_rx_unlockq(sc, q);
1893 }
1894
1895 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1896
1897 mvxpe_sc_unlock(sc);
1898 }
1899
1900 STATIC void
1901 mvxpe_watchdog(struct ifnet *ifp)
1902 {
1903 struct mvxpe_softc *sc = ifp->if_softc;
1904 int q;
1905
1906 mvxpe_sc_lock(sc);
1907
1908 /*
1909 * Reclaim first as there is a possibility of losing Tx completion
1910 * interrupts.
1911 */
1912 mvxpe_tx_complete(sc, 0xff);
1913 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1914 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1915
1916 if (tx->tx_dma != tx->tx_cpu) {
1917 if (sc->sc_wdogsoft) {
1918 /*
1919 * There is race condition between CPU and DMA
1920 * engine. When DMA engine encounters queue end,
1921 * it clears MVXPE_TQC_ENQ bit.
1922 * XXX: how about enhanced mode?
1923 */
1924 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1925 ifp->if_timer = 5;
1926 sc->sc_wdogsoft = 0;
1927 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1928 } else {
1929 aprint_error_ifnet(ifp, "watchdog timeout\n");
1930 ifp->if_oerrors++;
1931 mvxpe_linkreset(sc);
1932 mvxpe_sc_unlock(sc);
1933
1934 /* trigger reinitialize sequence */
1935 mvxpe_stop(ifp, 1);
1936 mvxpe_init(ifp);
1937
1938 mvxpe_sc_lock(sc);
1939 }
1940 }
1941 }
1942 mvxpe_sc_unlock(sc);
1943 }
1944
1945 STATIC int
1946 mvxpe_ifflags_cb(struct ethercom *ec)
1947 {
1948 struct ifnet *ifp = &ec->ec_if;
1949 struct mvxpe_softc *sc = ifp->if_softc;
1950 int change = ifp->if_flags ^ sc->sc_if_flags;
1951
1952 mvxpe_sc_lock(sc);
1953
1954 if (change != 0)
1955 sc->sc_if_flags = ifp->if_flags;
1956
1957 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1958 mvxpe_sc_unlock(sc);
1959 return ENETRESET;
1960 }
1961
1962 if ((change & IFF_PROMISC) != 0)
1963 mvxpe_filter_setup(sc);
1964
1965 if ((change & IFF_UP) != 0)
1966 mvxpe_linkreset(sc);
1967
1968 mvxpe_sc_unlock(sc);
1969 return 0;
1970 }
1971
1972 STATIC int
1973 mvxpe_mediachange(struct ifnet *ifp)
1974 {
1975 return ether_mediachange(ifp);
1976 }
1977
1978 STATIC void
1979 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1980 {
1981 ether_mediastatus(ifp, ifmr);
1982 }
1983
1984 /*
1985 * Link State Notify
1986 */
1987 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
1988 {
1989 int linkup; /* bool */
1990
1991 KASSERT_SC_MTX(sc);
1992
1993 /* tell miibus */
1994 mii_pollstat(&sc->sc_mii);
1995
1996 /* syslog */
1997 linkup = MVXPE_IS_LINKUP(sc);
1998 if (sc->sc_linkstate == linkup)
1999 return;
2000
2001 #ifdef DEBUG
2002 log(LOG_DEBUG,
2003 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2004 #endif
2005 if (linkup)
2006 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2007 else
2008 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2009
2010 sc->sc_linkstate = linkup;
2011 }
2012
2013 STATIC void
2014 mvxpe_linkup(struct mvxpe_softc *sc)
2015 {
2016 uint32_t reg;
2017
2018 KASSERT_SC_MTX(sc);
2019
2020 /* set EEE parameters */
2021 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2022 if (sc->sc_cf.cf_lpi)
2023 reg |= MVXPE_LPIC1_LPIRE;
2024 else
2025 reg &= ~MVXPE_LPIC1_LPIRE;
2026 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2027
2028 /* set auto-negotiation parameters */
2029 reg = MVXPE_READ(sc, MVXPE_PANC);
2030 if (sc->sc_cf.cf_fc) {
2031 /* flow control negotiation */
2032 reg |= MVXPE_PANC_PAUSEADV;
2033 reg |= MVXPE_PANC_ANFCEN;
2034 }
2035 else {
2036 reg &= ~MVXPE_PANC_PAUSEADV;
2037 reg &= ~MVXPE_PANC_ANFCEN;
2038 }
2039 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2040 reg &= ~MVXPE_PANC_FORCELINKPASS;
2041 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2042
2043 mii_mediachg(&sc->sc_mii);
2044 }
2045
2046 STATIC void
2047 mvxpe_linkdown(struct mvxpe_softc *sc)
2048 {
2049 struct mii_softc *mii;
2050 uint32_t reg;
2051
2052 KASSERT_SC_MTX(sc);
2053 return;
2054
2055 reg = MVXPE_READ(sc, MVXPE_PANC);
2056 reg |= MVXPE_PANC_FORCELINKFAIL;
2057 reg &= MVXPE_PANC_FORCELINKPASS;
2058 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2059
2060 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2061 if (mii)
2062 mii_phy_down(mii);
2063 }
2064
2065 STATIC void
2066 mvxpe_linkreset(struct mvxpe_softc *sc)
2067 {
2068 struct mii_softc *mii;
2069
2070 KASSERT_SC_MTX(sc);
2071
2072 /* force reset PHY first */
2073 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2074 if (mii)
2075 mii_phy_reset(mii);
2076
2077 /* reinit MAC and PHY */
2078 mvxpe_linkdown(sc);
2079 if ((sc->sc_if_flags & IFF_UP) != 0)
2080 mvxpe_linkup(sc);
2081 }
2082
2083 /*
2084 * Tx Subroutines
2085 */
2086 STATIC int
2087 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2088 {
2089 int q = 0;
2090
2091 /* XXX: get attribute from ALTQ framework? */
2092 mvxpe_tx_lockq(sc, q);
2093 return 0;
2094 }
2095
2096 STATIC int
2097 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2098 {
2099 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2100 bus_dma_segment_t *txsegs;
2101 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2102 struct mvxpe_tx_desc *t = NULL;
2103 uint32_t ptxsu;
2104 int txnsegs;
2105 int start, used;
2106 int i;
2107
2108 KASSERT_TX_MTX(sc, q);
2109 KASSERT(tx->tx_used >= 0);
2110 KASSERT(tx->tx_used <= tx->tx_queue_len);
2111
2112 /* load mbuf using dmamap of 1st descriptor */
2113 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2114 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2115 m_freem(m);
2116 return ENOBUFS;
2117 }
2118 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2119 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2120 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2121 /* we have no enough descriptors or mbuf is broken */
2122 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2123 m_freem(m);
2124 return ENOBUFS;
2125 }
2126 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2127 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2128
2129 /* remember mbuf using 1st descriptor */
2130 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2131 bus_dmamap_sync(sc->sc_dmat,
2132 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2133 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2134
2135 /* load to tx descriptors */
2136 start = tx->tx_cpu;
2137 used = 0;
2138 for (i = 0; i < txnsegs; i++) {
2139 if (__predict_false(txsegs[i].ds_len == 0))
2140 continue;
2141 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2142 t->command = 0;
2143 t->l4ichk = 0;
2144 t->flags = 0;
2145 if (i == 0) {
2146 /* 1st descriptor */
2147 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2148 t->command |= MVXPE_TX_CMD_PADDING;
2149 t->command |= MVXPE_TX_CMD_F;
2150 mvxpe_tx_set_csumflag(ifp, t, m);
2151 }
2152 t->bufptr = txsegs[i].ds_addr;
2153 t->bytecnt = txsegs[i].ds_len;
2154 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2155 tx->tx_used++;
2156 used++;
2157 }
2158 /* t is last descriptor here */
2159 KASSERT(t != NULL);
2160 t->command |= MVXPE_TX_CMD_L;
2161
2162 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2163 #ifdef MVXPE_DEBUG
2164 if (mvxpe_debug > 2)
2165 for (i = start; i <= tx->tx_cpu; i++) {
2166 t = MVXPE_TX_DESC(sc, q, i);
2167 mvxpe_dump_txdesc(t, i);
2168 }
2169 #endif
2170 mvxpe_ring_sync_tx(sc, q, start, used,
2171 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2172
2173 while (used > 255) {
2174 ptxsu = MVXPE_PTXSU_NOWD(255);
2175 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2176 used -= 255;
2177 }
2178 if (used > 0) {
2179 ptxsu = MVXPE_PTXSU_NOWD(used);
2180 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2181 }
2182 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2183
2184 DPRINTSC(sc, 2,
2185 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2186 DPRINTSC(sc, 2,
2187 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2188 DPRINTSC(sc, 2,
2189 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2190 DPRINTSC(sc, 2,
2191 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2192 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2193 DPRINTIFNET(ifp, 2,
2194 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2195 tx->tx_cpu, tx->tx_dma, tx->tx_used);
2196 return 0;
2197 }
2198
2199 STATIC void
2200 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2201 struct mvxpe_tx_desc *t, struct mbuf *m)
2202 {
2203 struct ether_header *eh;
2204 int csum_flags;
2205 uint32_t iphl = 0, ipoff = 0;
2206
2207
2208 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2209
2210 eh = mtod(m, struct ether_header *);
2211 switch (htons(eh->ether_type)) {
2212 case ETHERTYPE_IP:
2213 case ETHERTYPE_IPV6:
2214 ipoff = ETHER_HDR_LEN;
2215 break;
2216 case ETHERTYPE_VLAN:
2217 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2218 break;
2219 }
2220
2221 if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2222 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2223 t->command |= MVXPE_TX_CMD_L3_IP4;
2224 }
2225 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2226 iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
2227 t->command |= MVXPE_TX_CMD_L3_IP6;
2228 }
2229 else {
2230 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2231 return;
2232 }
2233
2234
2235 /* L3 */
2236 if (csum_flags & M_CSUM_IPv4) {
2237 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2238 }
2239
2240 /* L4 */
2241 if ((csum_flags &
2242 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) {
2243 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2244 }
2245 else if (csum_flags & M_CSUM_TCPv4) {
2246 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2247 t->command |= MVXPE_TX_CMD_L4_TCP;
2248 }
2249 else if (csum_flags & M_CSUM_UDPv4) {
2250 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2251 t->command |= MVXPE_TX_CMD_L4_UDP;
2252 }
2253 else if (csum_flags & M_CSUM_TCPv6) {
2254 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2255 t->command |= MVXPE_TX_CMD_L4_TCP;
2256 }
2257 else if (csum_flags & M_CSUM_UDPv6) {
2258 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2259 t->command |= MVXPE_TX_CMD_L4_UDP;
2260 }
2261
2262 t->l4ichk = 0;
2263 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2264 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2265 }
2266
2267 STATIC void
2268 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2269 {
2270 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2271 int q;
2272
2273 DPRINTSC(sc, 2, "tx completed.\n");
2274
2275 KASSERT_SC_MTX(sc);
2276
2277 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2278 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2279 continue;
2280 mvxpe_tx_lockq(sc, q);
2281 mvxpe_tx_queue_complete(sc, q);
2282 mvxpe_tx_unlockq(sc, q);
2283 }
2284 KASSERT(sc->sc_tx_pending >= 0);
2285 if (sc->sc_tx_pending == 0)
2286 ifp->if_timer = 0;
2287 }
2288
2289 STATIC void
2290 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2291 {
2292 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2293 struct mvxpe_tx_desc *t;
2294 uint32_t ptxs, ptxsu, ndesc;
2295 int i;
2296
2297 KASSERT_TX_MTX(sc, q);
2298
2299 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2300 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2301 if (ndesc == 0)
2302 return;
2303
2304 DPRINTSC(sc, 2,
2305 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2306
2307 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2308 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2309
2310 for (i = 0; i < ndesc; i++) {
2311 int error = 0;
2312
2313 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2314 if (t->flags & MVXPE_TX_F_ES) {
2315 DPRINTSC(sc, 1,
2316 "tx error queue %d desc %d\n",
2317 q, tx->tx_dma);
2318 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2319 case MVXPE_TX_F_EC_LC:
2320 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2321 break;
2322 case MVXPE_TX_F_EC_UR:
2323 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2324 break;
2325 case MVXPE_TX_F_EC_RL:
2326 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2327 break;
2328 default:
2329 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2330 break;
2331 }
2332 error = 1;
2333 }
2334 if (MVXPE_TX_MBUF(sc, q, tx->tx_dma) != NULL) {
2335 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2336 bus_dmamap_unload(sc->sc_dmat,
2337 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2338 m_freem(MVXPE_TX_MBUF(sc, q, tx->tx_dma));
2339 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2340 sc->sc_tx_pending--;
2341 }
2342 else
2343 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2344 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2345 tx->tx_used--;
2346 if (error)
2347 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2348 else
2349 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2350 }
2351 KASSERT(tx->tx_used >= 0);
2352 KASSERT(tx->tx_used <= tx->tx_queue_len);
2353 while (ndesc > 255) {
2354 ptxsu = MVXPE_PTXSU_NORB(255);
2355 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2356 ndesc -= 255;
2357 }
2358 if (ndesc > 0) {
2359 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2360 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2361 }
2362 DPRINTSC(sc, 2,
2363 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2364 q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2365 }
2366
2367 /*
2368 * Rx Subroutines
2369 */
2370 STATIC void
2371 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2372 {
2373 int q, npkt;
2374
2375 KASSERT_SC_MTX(sc);
2376
2377 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2378 /* mutex is held by rx_queue_select */
2379 mvxpe_rx_queue(sc, q, npkt);
2380 mvxpe_rx_unlockq(sc, q);
2381 }
2382 }
2383
2384 STATIC void
2385 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2386 {
2387 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2388 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2389 struct mvxpe_rx_desc *r;
2390 struct mvxpbm_chunk *chunk;
2391 struct mbuf *m;
2392 uint32_t prxsu;
2393 int error = 0;
2394 int i;
2395
2396 KASSERT_RX_MTX(sc, q);
2397
2398 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2399 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2400
2401 for (i = 0; i < npkt; i++) {
2402 /* get descriptor and packet */
2403 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2404 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2405 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2406 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2407
2408 /* check errors */
2409 if (r->status & MVXPE_RX_ES) {
2410 switch (r->status & MVXPE_RX_EC_MASK) {
2411 case MVXPE_RX_EC_CE:
2412 DPRINTIFNET(ifp, 1, "CRC error\n");
2413 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2414 break;
2415 case MVXPE_RX_EC_OR:
2416 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2417 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2418 break;
2419 case MVXPE_RX_EC_MF:
2420 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2421 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2422 break;
2423 case MVXPE_RX_EC_RE:
2424 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2425 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2426 break;
2427 }
2428 error = 1;
2429 goto rx_done;
2430 }
2431 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2432 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2433 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2434 error = 1;
2435 goto rx_done;
2436 }
2437
2438 if (chunk == NULL) {
2439 device_printf(sc->sc_dev,
2440 "got rx interrupt, but no chunk\n");
2441 error = 1;
2442 goto rx_done;
2443 }
2444
2445 /* extract packet buffer */
2446 if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2447 error = 1;
2448 goto rx_done;
2449 }
2450 m = chunk->m;
2451 m->m_pkthdr.rcvif = ifp;
2452 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2453 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2454 mvxpe_rx_set_csumflag(ifp, r, m);
2455 ifp->if_ipackets++;
2456 bpf_mtap(ifp, m);
2457 if_percpuq_enqueue(ifp->if_percpuq, m);
2458 chunk = NULL; /* the BM chunk goes to networking stack now */
2459 rx_done:
2460 if (chunk) {
2461 /* rx error. just return the chunk to BM. */
2462 mvxpbm_free_chunk(chunk);
2463 }
2464 if (error)
2465 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2466 else
2467 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2468 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2469 }
2470 /* DMA status update */
2471 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2472 while (npkt > 255) {
2473 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2474 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2475 npkt -= 255;
2476 }
2477 if (npkt > 0) {
2478 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2479 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2480 }
2481
2482 DPRINTSC(sc, 2,
2483 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2484 DPRINTSC(sc, 2,
2485 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2486 DPRINTSC(sc, 2,
2487 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2488 DPRINTSC(sc, 2,
2489 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2490 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2491 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2492 rx->rx_cpu, rx->rx_dma);
2493 }
2494
2495 STATIC int
2496 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2497 {
2498 uint32_t prxs, npkt;
2499 int q;
2500
2501 KASSERT_SC_MTX(sc);
2502 KASSERT(queue != NULL);
2503 DPRINTSC(sc, 2, "selecting rx queue\n");
2504
2505 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2506 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2507 continue;
2508
2509 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2510 npkt = MVXPE_PRXS_GET_ODC(prxs);
2511 if (npkt == 0)
2512 continue;
2513
2514 DPRINTSC(sc, 2,
2515 "queue %d selected: prxs=%#x, %u pakcet received.\n",
2516 q, prxs, npkt);
2517 *queue = q;
2518 mvxpe_rx_lockq(sc, q);
2519 return npkt;
2520 }
2521
2522 return 0;
2523 }
2524
2525 STATIC void
2526 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2527 {
2528 int q;
2529
2530 KASSERT_SC_MTX(sc);
2531
2532 /* XXX: check rx bit array */
2533 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2534 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2535 continue;
2536
2537 mvxpe_rx_lockq(sc, q);
2538 mvxpe_rx_queue_refill(sc, q);
2539 mvxpe_rx_unlockq(sc, q);
2540 }
2541 }
2542
2543 STATIC void
2544 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2545 {
2546 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2547 uint32_t prxs, prxsu, ndesc;
2548 int idx, refill = 0;
2549 int npkt;
2550
2551 KASSERT_RX_MTX(sc, q);
2552
2553 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2554 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2555 refill = rx->rx_queue_len - ndesc;
2556 if (refill <= 0)
2557 return;
2558 DPRINTPRXS(2, q);
2559 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2560
2561 idx = rx->rx_cpu;
2562 for (npkt = 0; npkt < refill; npkt++)
2563 if (mvxpe_rx_queue_add(sc, q) != 0)
2564 break;
2565 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2566 if (npkt == 0)
2567 return;
2568
2569 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2570 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2571
2572 while (npkt > 255) {
2573 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2574 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2575 npkt -= 255;
2576 }
2577 if (npkt > 0) {
2578 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2579 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2580 }
2581 DPRINTPRXS(2, q);
2582 return;
2583 }
2584
2585 STATIC int
2586 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2587 {
2588 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2589 struct mvxpe_rx_desc *r;
2590 struct mvxpbm_chunk *chunk = NULL;
2591
2592 KASSERT_RX_MTX(sc, q);
2593
2594 /* Allocate the packet buffer */
2595 chunk = mvxpbm_alloc(sc->sc_bm);
2596 if (chunk == NULL) {
2597 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2598 return ENOBUFS;
2599 }
2600
2601 /* Add the packet to descritor */
2602 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2603 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2604 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2605
2606 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2607 r->bufptr = chunk->buf_pa;
2608 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2609 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2610 return 0;
2611 }
2612
2613 STATIC void
2614 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2615 struct mvxpe_rx_desc *r, struct mbuf *m0)
2616 {
2617 uint32_t csum_flags = 0;
2618
2619 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0)
2620 return; /* not a IP packet */
2621
2622 /* L3 */
2623 if (r->status & MVXPE_RX_L3_IP) {
2624 csum_flags |= M_CSUM_IPv4;
2625 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0) {
2626 csum_flags |= M_CSUM_IPv4_BAD;
2627 goto finish;
2628 }
2629 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2630 /*
2631 * r->l4chk has partial checksum of each framgment.
2632 * but there is no way to use it in NetBSD.
2633 */
2634 return;
2635 }
2636 }
2637
2638 /* L4 */
2639 switch (r->status & MVXPE_RX_L4_MASK) {
2640 case MVXPE_RX_L4_TCP:
2641 if (r->status & MVXPE_RX_L3_IP)
2642 csum_flags |= M_CSUM_TCPv4;
2643 else
2644 csum_flags |= M_CSUM_TCPv6;
2645 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2646 csum_flags |= M_CSUM_TCP_UDP_BAD;
2647 break;
2648 case MVXPE_RX_L4_UDP:
2649 if (r->status & MVXPE_RX_L3_IP)
2650 csum_flags |= M_CSUM_UDPv4;
2651 else
2652 csum_flags |= M_CSUM_UDPv6;
2653 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2654 csum_flags |= M_CSUM_TCP_UDP_BAD;
2655 break;
2656 case MVXPE_RX_L4_OTH:
2657 default:
2658 break;
2659 }
2660 finish:
2661 m0->m_pkthdr.csum_flags |= (csum_flags & ifp->if_csum_flags_rx);
2662 }
2663
2664 /*
2665 * MAC address filter
2666 */
2667 STATIC uint8_t
2668 mvxpe_crc8(const uint8_t *data, size_t size)
2669 {
2670 int bit;
2671 uint8_t byte;
2672 uint8_t crc = 0;
2673 const uint8_t poly = 0x07;
2674
2675 while(size--)
2676 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2677 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2678
2679 return crc;
2680 }
2681
2682 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2683
2684 STATIC void
2685 mvxpe_filter_setup(struct mvxpe_softc *sc)
2686 {
2687 struct ethercom *ec = &sc->sc_ethercom;
2688 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2689 struct ether_multi *enm;
2690 struct ether_multistep step;
2691 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2692 uint32_t pxc;
2693 int i;
2694 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2695
2696 KASSERT_SC_MTX(sc);
2697
2698 memset(dfut, 0, sizeof(dfut));
2699 memset(dfsmt, 0, sizeof(dfsmt));
2700 memset(dfomt, 0, sizeof(dfomt));
2701
2702 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2703 goto allmulti;
2704 }
2705
2706 ETHER_FIRST_MULTI(step, ec, enm);
2707 while (enm != NULL) {
2708 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2709 /* ranges are complex and somewhat rare */
2710 goto allmulti;
2711 }
2712 /* chip handles some IPv4 multicast specially */
2713 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2714 i = enm->enm_addrlo[5];
2715 dfsmt[i>>2] |=
2716 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2717 } else {
2718 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2719 dfomt[i>>2] |=
2720 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2721 }
2722
2723 ETHER_NEXT_MULTI(step, enm);
2724 }
2725 goto set;
2726
2727 allmulti:
2728 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2729 for (i = 0; i < MVXPE_NDFSMT; i++) {
2730 dfsmt[i] = dfomt[i] =
2731 MVXPE_DF(0, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2732 MVXPE_DF(1, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2733 MVXPE_DF(2, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2734 MVXPE_DF(3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2735 }
2736 }
2737
2738 set:
2739 pxc = MVXPE_READ(sc, MVXPE_PXC);
2740 pxc &= ~MVXPE_PXC_UPM;
2741 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2742 if (ifp->if_flags & IFF_BROADCAST) {
2743 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2744 }
2745 if (ifp->if_flags & IFF_PROMISC) {
2746 pxc |= MVXPE_PXC_UPM;
2747 }
2748 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2749
2750 /* Set Destination Address Filter Unicast Table */
2751 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2752 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2753 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2754
2755 /* Set Destination Address Filter Multicast Tables */
2756 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2757 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2758 }
2759
2760 /*
2761 * sysctl(9)
2762 */
2763 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2764 {
2765 int rc;
2766 const struct sysctlnode *node;
2767
2768 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2769 0, CTLTYPE_NODE, "mvxpe",
2770 SYSCTL_DESCR("mvxpe interface controls"),
2771 NULL, 0, NULL, 0,
2772 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2773 goto err;
2774 }
2775
2776 mvxpe_root_num = node->sysctl_num;
2777 return;
2778
2779 err:
2780 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2781 }
2782
2783 STATIC int
2784 sysctl_read_mib(SYSCTLFN_ARGS)
2785 {
2786 struct mvxpe_sysctl_mib *arg;
2787 struct mvxpe_softc *sc;
2788 struct sysctlnode node;
2789 uint64_t val;
2790 int err;
2791
2792 node = *rnode;
2793 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2794 if (arg == NULL)
2795 return EINVAL;
2796
2797 sc = arg->sc;
2798 if (sc == NULL)
2799 return EINVAL;
2800 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2801 return EINVAL;
2802
2803 mvxpe_sc_lock(sc);
2804 val = arg->counter;
2805 mvxpe_sc_unlock(sc);
2806
2807 node.sysctl_data = &val;
2808 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2809 if (err)
2810 return err;
2811 if (newp)
2812 return EINVAL;
2813
2814 return 0;
2815 }
2816
2817
2818 STATIC int
2819 sysctl_clear_mib(SYSCTLFN_ARGS)
2820 {
2821 struct mvxpe_softc *sc;
2822 struct sysctlnode node;
2823 int val;
2824 int err;
2825
2826 node = *rnode;
2827 sc = (struct mvxpe_softc *)rnode->sysctl_data;
2828 if (sc == NULL)
2829 return EINVAL;
2830
2831 val = 0;
2832 node.sysctl_data = &val;
2833 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2834 if (err || newp == NULL)
2835 return err;
2836 if (val < 0 || val > 1)
2837 return EINVAL;
2838 if (val == 1) {
2839 mvxpe_sc_lock(sc);
2840 mvxpe_clear_mib(sc);
2841 mvxpe_sc_unlock(sc);
2842 }
2843
2844 return 0;
2845 }
2846
2847 STATIC int
2848 sysctl_set_queue_length(SYSCTLFN_ARGS)
2849 {
2850 struct mvxpe_sysctl_queue *arg;
2851 struct mvxpe_rx_ring *rx = NULL;
2852 struct mvxpe_tx_ring *tx = NULL;
2853 struct mvxpe_softc *sc;
2854 struct sysctlnode node;
2855 uint32_t reg;
2856 int val;
2857 int err;
2858
2859 node = *rnode;
2860
2861 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2862 if (arg == NULL)
2863 return EINVAL;
2864 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2865 return EINVAL;
2866 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2867 return EINVAL;
2868
2869 sc = arg->sc;
2870 if (sc == NULL)
2871 return EINVAL;
2872
2873 /* read queue length */
2874 mvxpe_sc_lock(sc);
2875 switch (arg->rxtx) {
2876 case MVXPE_SYSCTL_RX:
2877 mvxpe_rx_lockq(sc, arg->queue);
2878 rx = MVXPE_RX_RING(sc, arg->queue);
2879 val = rx->rx_queue_len;
2880 mvxpe_rx_unlockq(sc, arg->queue);
2881 break;
2882 case MVXPE_SYSCTL_TX:
2883 mvxpe_tx_lockq(sc, arg->queue);
2884 tx = MVXPE_TX_RING(sc, arg->queue);
2885 val = tx->tx_queue_len;
2886 mvxpe_tx_unlockq(sc, arg->queue);
2887 break;
2888 }
2889
2890 node.sysctl_data = &val;
2891 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2892 if (err || newp == NULL) {
2893 mvxpe_sc_unlock(sc);
2894 return err;
2895 }
2896
2897 /* update queue length */
2898 if (val < 8 || val > MVXPE_RX_RING_CNT) {
2899 mvxpe_sc_unlock(sc);
2900 return EINVAL;
2901 }
2902 switch (arg->rxtx) {
2903 case MVXPE_SYSCTL_RX:
2904 mvxpe_rx_lockq(sc, arg->queue);
2905 rx->rx_queue_len = val;
2906 rx->rx_queue_th_received =
2907 rx->rx_queue_len / MVXPE_RXTH_RATIO;
2908 rx->rx_queue_th_free =
2909 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2910
2911 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2912 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2913 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2914
2915 mvxpe_rx_unlockq(sc, arg->queue);
2916 break;
2917 case MVXPE_SYSCTL_TX:
2918 mvxpe_tx_lockq(sc, arg->queue);
2919 tx->tx_queue_len = val;
2920 tx->tx_queue_th_free =
2921 tx->tx_queue_len / MVXPE_TXTH_RATIO;
2922
2923 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2924 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2925 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2926
2927 mvxpe_tx_unlockq(sc, arg->queue);
2928 break;
2929 }
2930 mvxpe_sc_unlock(sc);
2931
2932 return 0;
2933 }
2934
2935 STATIC int
2936 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2937 {
2938 struct mvxpe_sysctl_queue *arg;
2939 struct mvxpe_rx_ring *rx = NULL;
2940 struct mvxpe_softc *sc;
2941 struct sysctlnode node;
2942 extern uint32_t mvTclk;
2943 uint32_t reg, time_mvtclk;
2944 int time_us;
2945 int err;
2946
2947 node = *rnode;
2948
2949 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2950 if (arg == NULL)
2951 return EINVAL;
2952 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2953 return EINVAL;
2954 if (arg->rxtx != MVXPE_SYSCTL_RX)
2955 return EINVAL;
2956
2957 sc = arg->sc;
2958 if (sc == NULL)
2959 return EINVAL;
2960
2961 /* read queue length */
2962 mvxpe_sc_lock(sc);
2963 mvxpe_rx_lockq(sc, arg->queue);
2964 rx = MVXPE_RX_RING(sc, arg->queue);
2965 time_mvtclk = rx->rx_queue_th_time;
2966 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
2967 node.sysctl_data = &time_us;
2968 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
2969 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
2970 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2971 if (err || newp == NULL) {
2972 mvxpe_rx_unlockq(sc, arg->queue);
2973 mvxpe_sc_unlock(sc);
2974 return err;
2975 }
2976
2977 /* update queue length (0[sec] - 1[sec]) */
2978 if (time_us < 0 || time_us > (1000 * 1000)) {
2979 mvxpe_rx_unlockq(sc, arg->queue);
2980 mvxpe_sc_unlock(sc);
2981 return EINVAL;
2982 }
2983 time_mvtclk =
2984 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
2985 rx->rx_queue_th_time = time_mvtclk;
2986 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
2987 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
2988 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
2989 mvxpe_rx_unlockq(sc, arg->queue);
2990 mvxpe_sc_unlock(sc);
2991
2992 return 0;
2993 }
2994
2995
2996 STATIC void
2997 sysctl_mvxpe_init(struct mvxpe_softc *sc)
2998 {
2999 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3000 const struct sysctlnode *node;
3001 int mvxpe_nodenum;
3002 int mvxpe_mibnum;
3003 int mvxpe_rxqueuenum;
3004 int mvxpe_txqueuenum;
3005 int q, i;
3006
3007 /* hw.mvxpe.mvxpe[unit] */
3008 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3009 0, CTLTYPE_NODE, ifp->if_xname,
3010 SYSCTL_DESCR("mvxpe per-controller controls"),
3011 NULL, 0, NULL, 0,
3012 CTL_HW, mvxpe_root_num, CTL_CREATE,
3013 CTL_EOL) != 0) {
3014 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3015 return;
3016 }
3017 mvxpe_nodenum = node->sysctl_num;
3018
3019 /* hw.mvxpe.mvxpe[unit].mib */
3020 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3021 0, CTLTYPE_NODE, "mib",
3022 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3023 NULL, 0, NULL, 0,
3024 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3025 CTL_EOL) != 0) {
3026 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3027 return;
3028 }
3029 mvxpe_mibnum = node->sysctl_num;
3030
3031 /* hw.mvxpe.mvxpe[unit].rx */
3032 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3033 0, CTLTYPE_NODE, "rx",
3034 SYSCTL_DESCR("Rx Queues"),
3035 NULL, 0, NULL, 0,
3036 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3037 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3038 return;
3039 }
3040 mvxpe_rxqueuenum = node->sysctl_num;
3041
3042 /* hw.mvxpe.mvxpe[unit].tx */
3043 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3044 0, CTLTYPE_NODE, "tx",
3045 SYSCTL_DESCR("Tx Queues"),
3046 NULL, 0, NULL, 0,
3047 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3048 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3049 return;
3050 }
3051 mvxpe_txqueuenum = node->sysctl_num;
3052
3053 #ifdef MVXPE_DEBUG
3054 /* hw.mvxpe.debug */
3055 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3056 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3057 SYSCTL_DESCR("mvgbe device driver debug control"),
3058 NULL, 0, &mvxpe_debug, 0,
3059 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3060 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3061 return;
3062 }
3063 #endif
3064 /*
3065 * MIB access
3066 */
3067 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3068 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3069 const char *name = mvxpe_mib_list[i].sysctl_name;
3070 const char *desc = mvxpe_mib_list[i].desc;
3071 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3072
3073 mib_arg->sc = sc;
3074 mib_arg->index = i;
3075 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3076 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3077 sysctl_read_mib, 0, (void *)mib_arg, 0,
3078 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3079 CTL_CREATE, CTL_EOL) != 0) {
3080 aprint_normal_dev(sc->sc_dev,
3081 "couldn't create sysctl node\n");
3082 break;
3083 }
3084 }
3085
3086 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3087 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3088 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3089 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3090 static const char *sysctl_queue_names[] = {
3091 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3092 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3093 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3094 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3095 };
3096 #undef MVXPE_SYSCTL_NAME
3097 #ifdef SYSCTL_INCLUDE_DESCR
3098 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3099 static const char *sysctl_queue_descrs[] = {
3100 MVXPE_SYSCTL_DESC(0), MVXPE_SYSCTL_DESC(1),
3101 MVXPE_SYSCTL_DESC(2), MVXPE_SYSCTL_DESC(3),
3102 MVXPE_SYSCTL_DESC(4), MVXPE_SYSCTL_DESC(5),
3103 MVXPE_SYSCTL_DESC(6), MVXPE_SYSCTL_DESC(7),
3104 };
3105 #undef MVXPE_SYSCTL_DESCR
3106 #endif /* SYSCTL_INCLUDE_DESCR */
3107 int mvxpe_curnum;
3108
3109 rxarg->sc = txarg->sc = sc;
3110 rxarg->queue = txarg->queue = q;
3111 rxarg->rxtx = MVXPE_SYSCTL_RX;
3112 txarg->rxtx = MVXPE_SYSCTL_TX;
3113
3114 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3115 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3116 0, CTLTYPE_NODE,
3117 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3118 NULL, 0, NULL, 0,
3119 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3120 CTL_CREATE, CTL_EOL) != 0) {
3121 aprint_normal_dev(sc->sc_dev,
3122 "couldn't create sysctl node\n");
3123 break;
3124 }
3125 mvxpe_curnum = node->sysctl_num;
3126
3127 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3128 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3129 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3130 SYSCTL_DESCR("maximum length of the queue"),
3131 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3132 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3133 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3134 aprint_normal_dev(sc->sc_dev,
3135 "couldn't create sysctl node\n");
3136 break;
3137 }
3138
3139 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3140 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3141 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3142 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3143 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3144 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3145 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3146 aprint_normal_dev(sc->sc_dev,
3147 "couldn't create sysctl node\n");
3148 break;
3149 }
3150
3151 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3152 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3153 0, CTLTYPE_NODE,
3154 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3155 NULL, 0, NULL, 0,
3156 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3157 CTL_CREATE, CTL_EOL) != 0) {
3158 aprint_normal_dev(sc->sc_dev,
3159 "couldn't create sysctl node\n");
3160 break;
3161 }
3162 mvxpe_curnum = node->sysctl_num;
3163
3164 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3165 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3166 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3167 SYSCTL_DESCR("maximum length of the queue"),
3168 sysctl_set_queue_length, 0, (void *)txarg, 0,
3169 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3170 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3171 aprint_normal_dev(sc->sc_dev,
3172 "couldn't create sysctl node\n");
3173 break;
3174 }
3175 }
3176
3177 /* hw.mvxpe.mvxpe[unit].clear_mib */
3178 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3179 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3180 SYSCTL_DESCR("mvgbe device driver debug control"),
3181 sysctl_clear_mib, 0, (void *)sc, 0,
3182 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3183 CTL_EOL) != 0) {
3184 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3185 return;
3186 }
3187
3188 }
3189
3190 /*
3191 * MIB
3192 */
3193 STATIC void
3194 mvxpe_clear_mib(struct mvxpe_softc *sc)
3195 {
3196 int i;
3197
3198 KASSERT_SC_MTX(sc);
3199
3200 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3201 if (mvxpe_mib_list[i].reg64)
3202 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3203 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3204 sc->sc_sysctl_mib[i].counter = 0;
3205 }
3206 }
3207
3208 STATIC void
3209 mvxpe_update_mib(struct mvxpe_softc *sc)
3210 {
3211 int i;
3212
3213 KASSERT_SC_MTX(sc);
3214
3215 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3216 uint32_t val_hi;
3217 uint32_t val_lo;
3218
3219 if (mvxpe_mib_list[i].reg64) {
3220 /* XXX: implement bus_space_read_8() */
3221 val_lo = MVXPE_READ_MIB(sc,
3222 (mvxpe_mib_list[i].regnum + 4));
3223 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3224 }
3225 else {
3226 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3227 val_hi = 0;
3228 }
3229
3230 if ((val_lo | val_hi) == 0)
3231 continue;
3232
3233 sc->sc_sysctl_mib[i].counter +=
3234 ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3235 }
3236 }
3237
3238 /*
3239 * for Debug
3240 */
3241 STATIC void
3242 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3243 {
3244 #define DESC_PRINT(X) \
3245 if (X) \
3246 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3247
3248 DESC_PRINT(desc->command);
3249 DESC_PRINT(desc->l4ichk);
3250 DESC_PRINT(desc->bytecnt);
3251 DESC_PRINT(desc->bufptr);
3252 DESC_PRINT(desc->flags);
3253 #undef DESC_PRINT
3254 }
3255
3256 STATIC void
3257 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3258 {
3259 #define DESC_PRINT(X) \
3260 if (X) \
3261 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3262
3263 DESC_PRINT(desc->status);
3264 DESC_PRINT(desc->bytecnt);
3265 DESC_PRINT(desc->bufptr);
3266 DESC_PRINT(desc->l4chk);
3267 #undef DESC_PRINT
3268 }
3269