if_mvxpe.c revision 1.3 1 /* $NetBSD: if_mvxpe.c,v 1.3 2016/02/09 08:32:11 ozaki-r Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.3 2016/02/09 08:32:11 ozaki-r Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/mvxpbmvar.h>
62 #include <dev/marvell/if_mvxpereg.h>
63 #include <dev/marvell/if_mvxpevar.h>
64
65 #include "locators.h"
66
67 #if BYTE_ORDER == BIG_ENDIAN
68 #error "BIG ENDIAN not supported"
69 #endif
70
71 #ifdef MVXPE_DEBUG
72 #define STATIC /* nothing */
73 #else
74 #define STATIC static
75 #endif
76
77 /* autoconf(9) */
78 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79 STATIC void mvxpe_attach(device_t, device_t, void *);
80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82 mvxpe_match, mvxpe_attach, NULL, NULL);
83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86 /* MII */
87 STATIC int mvxpe_miibus_readreg(device_t, int, int);
88 STATIC void mvxpe_miibus_writereg(device_t, int, int, int);
89 STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91 /* Addres Decoding Window */
92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94 /* Device Register Initialization */
95 STATIC int mvxpe_initreg(struct ifnet *);
96
97 /* Descriptor Ring Control for each of queues */
98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106 /* Rx/Tx Queue Control */
107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116 /* Interrupt Handlers */
117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119 STATIC int mvxpe_rxtxth_intr(void *);
120 STATIC int mvxpe_misc_intr(void *);
121 STATIC int mvxpe_rxtx_intr(void *);
122 STATIC void mvxpe_tick(void *);
123
124 /* struct ifnet and mii callbacks*/
125 STATIC void mvxpe_start(struct ifnet *);
126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127 STATIC int mvxpe_init(struct ifnet *);
128 STATIC void mvxpe_stop(struct ifnet *, int);
129 STATIC void mvxpe_watchdog(struct ifnet *);
130 STATIC int mvxpe_ifflags_cb(struct ethercom *);
131 STATIC int mvxpe_mediachange(struct ifnet *);
132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134 /* Link State Notify */
135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136 STATIC void mvxpe_linkup(struct mvxpe_softc *);
137 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140 /* Tx Subroutines */
141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144 struct mvxpe_tx_desc *, struct mbuf *);
145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148 /* Rx Subroutines */
149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156 struct mvxpe_rx_desc *, struct mbuf *);
157
158 /* MAC address filter */
159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162 /* sysctl(9) */
163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169 /* MIB */
170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173 /* for Debug */
174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177 STATIC int mvxpe_root_num;
178 STATIC kmutex_t mii_mutex;
179 STATIC int mii_init = 0;
180 #ifdef MVXPE_DEBUG
181 STATIC int mvxpe_debug = MVXPE_DEBUG;
182 #endif
183
184 /*
185 * List of MIB register and names
186 */
187 STATIC struct mvxpe_mib_def {
188 uint32_t regnum;
189 int reg64;
190 const char *sysctl_name;
191 const char *desc;
192 } mvxpe_mib_list[] = {
193 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
194 "Good Octets Rx"},
195 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
196 "Bad Octets Rx"},
197 {MVXPE_MIB_RX_MAC_TRNS_ERR, 0, "rx_mac_err",
198 "MAC Transmit Error"},
199 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
200 "Good Frames Rx"},
201 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
202 "Bad Frames Rx"},
203 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
204 "Broadcast Frames Rx"},
205 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
206 "Multicast Frames Rx"},
207 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
208 "Frame Size 1 - 64"},
209 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
210 "Frame Size 65 - 127"},
211 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
212 "Frame Size 128 - 255"},
213 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
214 "Frame Size 256 - 511"},
215 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
216 "Frame Size 512 - 1023"},
217 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max",
218 "Frame Size 1024 - Max"},
219 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
220 "Good Octets Tx"},
221 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
222 "Good Frames Tx"},
223 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
224 "Excessive Collision"},
225 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
226 "Multicast Frames Tx"},
227 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
228 "Broadcast Frames Tx"},
229 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
230 "Unknown MAC Control"},
231 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
232 "Flow Control Tx"},
233 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
234 "Good Flow Control Rx"},
235 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
236 "Bad Flow Control Rx"},
237 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
238 "Undersized Packets Rx"},
239 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
240 "Fragmented Packets Rx"},
241 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
242 "Oversized Packets Rx"},
243 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
244 "Jabber Packets Rx"},
245 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
246 "MAC Rx Errors"},
247 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
248 "MAC CRC Errors"},
249 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
250 "MAC Collision"},
251 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
252 "MAC Late Collision"},
253 };
254
255 /*
256 * autoconf(9)
257 */
258 /* ARGSUSED */
259 STATIC int
260 mvxpe_match(device_t parent, cfdata_t match, void *aux)
261 {
262 struct marvell_attach_args *mva = aux;
263 bus_size_t pv_off;
264 uint32_t pv;
265
266 if (strcmp(mva->mva_name, match->cf_name) != 0)
267 return 0;
268 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
269 return 0;
270
271 /* check port version */
272 pv_off = mva->mva_offset + MVXPE_PV;
273 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
274 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
275 return 0; /* old version is not supported */
276
277 return 1;
278 }
279
280 /* ARGSUSED */
281 STATIC void
282 mvxpe_attach(device_t parent, device_t self, void *aux)
283 {
284 struct mvxpe_softc *sc = device_private(self);
285 struct mii_softc *mii;
286 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
287 struct marvell_attach_args *mva = aux;
288 prop_dictionary_t dict;
289 prop_data_t enaddrp = NULL;
290 uint32_t phyaddr, maddrh, maddrl;
291 uint8_t enaddr[ETHER_ADDR_LEN];
292 int q;
293
294 aprint_naive("\n");
295 aprint_normal(": Marvell ARMADA GbE Controller\n");
296 memset(sc, 0, sizeof(*sc));
297 sc->sc_dev = self;
298 sc->sc_port = mva->mva_unit;
299 sc->sc_iot = mva->mva_iot;
300 sc->sc_dmat = mva->mva_dmat;
301 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
302 callout_init(&sc->sc_tick_ch, 0);
303 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
304
305 /*
306 * BUS space
307 */
308 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
309 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
310 aprint_error_dev(self, "Cannot map registers\n");
311 goto fail;
312 }
313 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
314 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
315 &sc->sc_mibh)) {
316 aprint_error_dev(self,
317 "Cannot map destination address filter registers\n");
318 goto fail;
319 }
320 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
321 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
322
323 /*
324 * Buffer Manager(BM) subsystem.
325 */
326 sc->sc_bm = mvxpbm_device(mva);
327 if (sc->sc_bm == NULL) {
328 aprint_error_dev(self, "no Buffer Manager.\n");
329 goto fail;
330 }
331 aprint_normal_dev(self,
332 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
333 aprint_normal_dev(sc->sc_dev,
334 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
335 mvxpbm_buf_size(sc->sc_bm) / 1024,
336 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
337
338 /*
339 * make sure DMA engines are in reset state
340 */
341 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
342 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
343
344 /*
345 * Address decoding window
346 */
347 mvxpe_wininit(sc, mva->mva_tags);
348
349 /*
350 * MAC address
351 */
352 dict = device_properties(self);
353 if (dict)
354 enaddrp = prop_dictionary_get(dict, "mac-address");
355 if (enaddrp) {
356 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
357 maddrh = enaddr[0] << 24;
358 maddrh |= enaddr[1] << 16;
359 maddrh |= enaddr[2] << 8;
360 maddrh |= enaddr[3];
361 maddrl = enaddr[4] << 8;
362 maddrl |= enaddr[5];
363 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
364 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
365 }
366 else {
367 /*
368 * even if enaddr is not found in dictionary,
369 * the port may be initialized by IPL program such as U-BOOT.
370 */
371 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
372 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
373 if ((maddrh | maddrl) == 0) {
374 aprint_error_dev(self, "No Ethernet address\n");
375 return;
376 }
377 }
378 sc->sc_enaddr[0] = maddrh >> 24;
379 sc->sc_enaddr[1] = maddrh >> 16;
380 sc->sc_enaddr[2] = maddrh >> 8;
381 sc->sc_enaddr[3] = maddrh >> 0;
382 sc->sc_enaddr[4] = maddrl >> 8;
383 sc->sc_enaddr[5] = maddrl >> 0;
384 aprint_normal_dev(self, "Ethernet address %s\n",
385 ether_sprintf(sc->sc_enaddr));
386
387 /*
388 * Register interrupt handlers
389 * XXX: handle Ethernet unit intr. and Error intr.
390 */
391 mvxpe_disable_intr(sc);
392 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
393
394 /*
395 * MIB buffer allocation
396 */
397 sc->sc_sysctl_mib_size =
398 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
399 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
400 if (sc->sc_sysctl_mib == NULL)
401 goto fail;
402 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
403
404 /*
405 * Device DMA Buffer allocation
406 */
407 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
408 if (mvxpe_ring_alloc_queue(sc, q) != 0)
409 goto fail;
410 mvxpe_ring_init_queue(sc, q);
411 }
412
413 /*
414 * We can support 802.1Q VLAN-sized frames and jumbo
415 * Ethernet frames.
416 */
417 sc->sc_ethercom.ec_capabilities |=
418 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
419 ifp->if_softc = sc;
420 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
421 ifp->if_start = mvxpe_start;
422 ifp->if_ioctl = mvxpe_ioctl;
423 ifp->if_init = mvxpe_init;
424 ifp->if_stop = mvxpe_stop;
425 ifp->if_watchdog = mvxpe_watchdog;
426
427 /*
428 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
429 */
430 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
431 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
432 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
433 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
434 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
435 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
437 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
439 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
440
441 /*
442 * Initialize struct ifnet
443 */
444 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
445 IFQ_SET_READY(&ifp->if_snd);
446 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
447
448 /*
449 * Enable DMA engines and Initiazlie Device Regisers.
450 */
451 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
452 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
453 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
454 mvxpe_sc_lock(sc); /* XXX */
455 mvxpe_filter_setup(sc);
456 mvxpe_sc_unlock(sc);
457 mvxpe_initreg(ifp);
458
459 /*
460 * Now MAC is working, setup MII.
461 */
462 if (mii_init == 0) {
463 /*
464 * MII bus is shared by all MACs and all PHYs in SoC.
465 * serializing the bus access should be safe.
466 */
467 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
468 mii_init = 1;
469 }
470 sc->sc_mii.mii_ifp = ifp;
471 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg;
472 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg;
473 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg;
474
475 sc->sc_ethercom.ec_mii = &sc->sc_mii;
476 ifmedia_init(&sc->sc_mii.mii_media, 0,
477 mvxpe_mediachange, mvxpe_mediastatus);
478 /*
479 * XXX: phy addressing highly depends on Board Design.
480 * we assume phyaddress == MAC unit number here,
481 * but some boards may not.
482 */
483 mii_attach(self, &sc->sc_mii, 0xffffffff,
484 MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
485 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
486 if (mii == NULL) {
487 aprint_error_dev(self, "no PHY found!\n");
488 ifmedia_add(&sc->sc_mii.mii_media,
489 IFM_ETHER|IFM_MANUAL, 0, NULL);
490 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
491 } else {
492 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
493 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
494 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
495 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
496 }
497
498 /*
499 * Call MI attach routines.
500 */
501 if_attach(ifp);
502
503 ether_ifattach(ifp, sc->sc_enaddr);
504 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
505
506 sysctl_mvxpe_init(sc);
507 mvxpe_evcnt_attach(sc);
508 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
509 RND_TYPE_NET, RND_FLAG_DEFAULT);
510
511 return;
512
513 fail:
514 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
515 mvxpe_ring_dealloc_queue(sc, q);
516 if (sc->sc_sysctl_mib)
517 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
518
519 return;
520 }
521
522 STATIC int
523 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
524 {
525 #ifdef MVXPE_EVENT_COUNTERS
526 int q;
527
528 /* Master Interrupt Handler */
529 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
530 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
531 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
532 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
533 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
534 NULL, device_xname(sc->sc_dev), "MISC Intr.");
535
536 /* RXTXTH Interrupt */
537 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
538 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
539
540 /* MISC Interrupt */
541 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
542 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
543 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
544 NULL, device_xname(sc->sc_dev), "MISC link status changed");
545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
546 NULL, device_xname(sc->sc_dev), "MISC internal address error");
547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
548 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
550 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
552 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
554 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
556 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
557 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
558 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
559 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
560 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
561
562 /* RxTx Interrupt */
563 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
564 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
565 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
566 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet");
567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
568 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
570 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
571 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
572 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
573 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
574 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
575
576 /* Link */
577 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
578 NULL, device_xname(sc->sc_dev), "link up");
579 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
580 NULL, device_xname(sc->sc_dev), "link down");
581
582 /* Rx Descriptor */
583 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
584 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
585 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
586 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
588 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
589 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
590 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
591 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
592 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
593
594 /* Tx Descriptor */
595 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
596 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
597 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
598 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
599 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
600 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
601 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
602 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
603
604 /* Status Registers */
605 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
606 NULL, device_xname(sc->sc_dev), "Rx discard counter");
607 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
608 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
610 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
611 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
612 NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
613 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
614 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
615
616 /* Device Driver Errors */
617 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
618 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
619 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
620 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
621 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
622 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
623 static const char *rxq_desc[] = {
624 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
625 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
626 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
627 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
628 };
629 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
630 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
631 }
632 #undef MVXPE_QUEUE_DESC
633 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
634 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
635 static const char *txq_desc[] = {
636 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
637 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
638 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
639 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
640 };
641 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
642 NULL, device_xname(sc->sc_dev), txq_desc[q]);
643 }
644 #undef MVXPE_QUEUE_DESC
645 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
646 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
647 static const char *rxqe_desc[] = {
648 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
649 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
650 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
651 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
652 };
653 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
654 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
655 }
656 #undef MVXPE_QUEUE_DESC
657 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
658 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
659 static const char *txqe_desc[] = {
660 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
661 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
662 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
663 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
664 };
665 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
666 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
667 }
668 #undef MVXPE_QUEUE_DESC
669
670 #endif /* MVXPE_EVENT_COUNTERS */
671 return 0;
672 }
673
674 STATIC void
675 mvxpe_sc_lock(struct mvxpe_softc *sc)
676 {
677 mutex_enter(&sc->sc_mtx);
678 }
679
680 STATIC void
681 mvxpe_sc_unlock(struct mvxpe_softc *sc)
682 {
683 mutex_exit(&sc->sc_mtx);
684 }
685
686 /*
687 * MII
688 */
689 STATIC int
690 mvxpe_miibus_readreg(device_t dev, int phy, int reg)
691 {
692 struct mvxpe_softc *sc = device_private(dev);
693 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
694 uint32_t smi, val;
695 int i;
696
697 mutex_enter(&mii_mutex);
698
699 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
700 DELAY(1);
701 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
702 break;
703 }
704 if (i == MVXPE_PHY_TIMEOUT) {
705 aprint_error_ifnet(ifp, "SMI busy timeout\n");
706 mutex_exit(&mii_mutex);
707 return -1;
708 }
709
710 smi =
711 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
712 MVXPE_WRITE(sc, MVXPE_SMI, smi);
713
714 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
715 DELAY(1);
716 smi = MVXPE_READ(sc, MVXPE_SMI);
717 if (smi & MVXPE_SMI_READVALID)
718 break;
719 }
720
721 mutex_exit(&mii_mutex);
722
723 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
724
725 val = smi & MVXPE_SMI_DATA_MASK;
726
727 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#x\n", phy, reg, val);
728
729 return val;
730 }
731
732 STATIC void
733 mvxpe_miibus_writereg(device_t dev, int phy, int reg, int val)
734 {
735 struct mvxpe_softc *sc = device_private(dev);
736 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
737 uint32_t smi;
738 int i;
739
740 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#x\n", phy, reg, val);
741
742 mutex_enter(&mii_mutex);
743
744 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
745 DELAY(1);
746 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
747 break;
748 }
749 if (i == MVXPE_PHY_TIMEOUT) {
750 aprint_error_ifnet(ifp, "SMI busy timeout\n");
751 mutex_exit(&mii_mutex);
752 return;
753 }
754
755 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
756 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
757 MVXPE_WRITE(sc, MVXPE_SMI, smi);
758
759 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
760 DELAY(1);
761 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
762 break;
763 }
764
765 mutex_exit(&mii_mutex);
766
767 if (i == MVXPE_PHY_TIMEOUT)
768 aprint_error_ifnet(ifp, "phy write timed out\n");
769 }
770
771 STATIC void
772 mvxpe_miibus_statchg(struct ifnet *ifp)
773 {
774
775 /* nothing to do */
776 }
777
778 /*
779 * Address Decoding Window
780 */
781 STATIC void
782 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
783 {
784 device_t pdev = device_parent(sc->sc_dev);
785 uint64_t base;
786 uint32_t en, ac, size;
787 int window, target, attr, rv, i;
788
789 /* First disable all address decode windows */
790 en = MVXPE_BARE_EN_MASK;
791 MVXPE_WRITE(sc, MVXPE_BARE, en);
792
793 ac = 0;
794 for (window = 0, i = 0;
795 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
796 rv = marvell_winparams_by_tag(pdev, tags[i],
797 &target, &attr, &base, &size);
798 if (rv != 0 || size == 0)
799 continue;
800
801 if (base > 0xffffffffULL) {
802 if (window >= MVXPE_NREMAP) {
803 aprint_error_dev(sc->sc_dev,
804 "can't remap window %d\n", window);
805 continue;
806 }
807 MVXPE_WRITE(sc, MVXPE_HA(window),
808 (base >> 32) & 0xffffffff);
809 }
810
811 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
812 MVXPE_BASEADDR_TARGET(target) |
813 MVXPE_BASEADDR_ATTR(attr) |
814 MVXPE_BASEADDR_BASE(base));
815 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
816
817 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
818 window, base, size);
819
820 en &= ~(1 << window);
821 /* set full access (r/w) */
822 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
823 window++;
824 }
825 /* allow to access decode window */
826 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
827
828 MVXPE_WRITE(sc, MVXPE_BARE, en);
829 }
830
831 /*
832 * Device Register Initialization
833 * reset device registers to device driver default value.
834 * the device is not enabled here.
835 */
836 STATIC int
837 mvxpe_initreg(struct ifnet *ifp)
838 {
839 struct mvxpe_softc *sc = ifp->if_softc;
840 int serdes = 0;
841 uint32_t reg;
842 int q, i;
843
844 DPRINTIFNET(ifp, 1, "initializing device register\n");
845
846 /* Init TX/RX Queue Registers */
847 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
848 mvxpe_rx_lockq(sc, q);
849 if (mvxpe_rx_queue_init(ifp, q) != 0) {
850 aprint_error_ifnet(ifp,
851 "initialization failed: cannot initialize queue\n");
852 mvxpe_rx_unlockq(sc, q);
853 mvxpe_tx_unlockq(sc, q);
854 return ENOBUFS;
855 }
856 mvxpe_rx_unlockq(sc, q);
857
858 mvxpe_tx_lockq(sc, q);
859 if (mvxpe_tx_queue_init(ifp, q) != 0) {
860 aprint_error_ifnet(ifp,
861 "initialization failed: cannot initialize queue\n");
862 mvxpe_rx_unlockq(sc, q);
863 mvxpe_tx_unlockq(sc, q);
864 return ENOBUFS;
865 }
866 mvxpe_tx_unlockq(sc, q);
867 }
868
869 /* Tx MTU Limit */
870 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
871
872 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
873 reg = MVXPE_READ(sc, MVXPE_PMACC0);
874 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
875 serdes = 1;
876
877 /* Ethernet Unit Control */
878 reg = MVXPE_READ(sc, MVXPE_EUC);
879 reg |= MVXPE_EUC_POLLING;
880 MVXPE_WRITE(sc, MVXPE_EUC, reg);
881
882 /* Auto Negotiation */
883 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
884 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
885 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
886 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
887 if (serdes) {
888 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
889 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
890 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
891 }
892 MVXPE_WRITE(sc, MVXPE_PANC, reg);
893
894 /* EEE: Low Power Idle */
895 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
896 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
897 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
898
899 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
900 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
901
902 reg = MVXPE_LPIC2_MUSTSET;
903 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
904
905 /* Port MAC Control set 0 */
906 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
907 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
908 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
909 if (serdes)
910 reg |= MVXPE_PMACC0_PORTTYPE;
911 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
912
913 /* Port MAC Control set 1 is only used for loop-back test */
914
915 /* Port MAC Control set 2 */
916 reg = MVXPE_READ(sc, MVXPE_PMACC2);
917 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
918 reg |= MVXPE_PMACC2_MUSTSET;
919 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
920
921 /* Port MAC Control set 3 is used for IPG tune */
922
923 /* Port MAC Control set 4 is not used */
924
925 /* Port Configuration Extended: enable Tx CRC generation */
926 reg = MVXPE_READ(sc, MVXPE_PXCX);
927 reg &= ~MVXPE_PXCX_TXCRCDIS;
928 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
929
930 /* clear MIB counter registers(clear by read) */
931 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
932 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
933
934 /* Set SDC register except IPGINT bits */
935 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
936 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
937 reg |= MVXPE_SDC_BLMR;
938 reg |= MVXPE_SDC_BLMT;
939 MVXPE_WRITE(sc, MVXPE_SDC, reg);
940
941 return 0;
942 }
943
944 /*
945 * Descriptor Ring Controls for each of queues
946 */
947 STATIC void *
948 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
949 {
950 bus_dma_segment_t segs;
951 void *kva = NULL;
952 int nsegs;
953
954 /*
955 * Allocate the descriptor queues.
956 * struct mvxpe_ring_data contians array of descriptor per queue.
957 */
958 if (bus_dmamem_alloc(sc->sc_dmat,
959 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
960 aprint_error_dev(sc->sc_dev,
961 "can't alloc device memory (%zu bytes)\n", size);
962 return NULL;
963 }
964 if (bus_dmamem_map(sc->sc_dmat,
965 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
966 aprint_error_dev(sc->sc_dev,
967 "can't map dma buffers (%zu bytes)\n", size);
968 goto fail1;
969 }
970
971 if (bus_dmamap_create(sc->sc_dmat,
972 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
973 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
974 goto fail2;
975 }
976 if (bus_dmamap_load(sc->sc_dmat,
977 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
978 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
979 goto fail3;
980 }
981 memset(kva, 0, size);
982 return kva;
983
984 fail3:
985 bus_dmamap_destroy(sc->sc_dmat, *map);
986 memset(map, 0, sizeof(*map));
987 fail2:
988 bus_dmamem_unmap(sc->sc_dmat, kva, size);
989 fail1:
990 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
991 return NULL;
992 }
993
994 STATIC int
995 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
996 {
997 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
998 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
999
1000 /*
1001 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1002 * queue length. real queue length is limited by
1003 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1004 *
1005 * because descriptor ring reallocation needs reprogramming of
1006 * DMA registers, we allocate enough descriptor for hard limit
1007 * of queue length.
1008 */
1009 rx->rx_descriptors =
1010 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1011 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1012 if (rx->rx_descriptors == NULL)
1013 goto fail;
1014
1015 tx->tx_descriptors =
1016 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1017 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1018 if (tx->tx_descriptors == NULL)
1019 goto fail;
1020
1021 return 0;
1022 fail:
1023 mvxpe_ring_dealloc_queue(sc, q);
1024 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1025 return ENOMEM;
1026 }
1027
1028 STATIC void
1029 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1030 {
1031 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1032 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1033 bus_dma_segment_t *segs;
1034 bus_size_t size;
1035 void *kva;
1036 int nsegs;
1037
1038 /* Rx */
1039 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1040 if (kva) {
1041 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1042 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1043 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1044
1045 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1046 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1047 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1048 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1049 }
1050
1051 /* Tx */
1052 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1053 if (kva) {
1054 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1055 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1056 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1057
1058 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1059 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1060 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1061 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1062 }
1063
1064 /* Clear doungling pointers all */
1065 memset(rx, 0, sizeof(*rx));
1066 memset(tx, 0, sizeof(*tx));
1067 }
1068
1069 STATIC void
1070 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1071 {
1072 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1073 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1074 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1075 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1076 static const int rx_default_queue_len[] = {
1077 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1078 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1079 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1080 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1081 };
1082 static const int tx_default_queue_len[] = {
1083 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1084 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1085 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1086 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1087 };
1088 extern uint32_t mvTclk;
1089 int i;
1090
1091 /* Rx handle */
1092 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1093 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1094 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1095 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1096 }
1097 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1098 rx->rx_dma = rx->rx_cpu = 0;
1099 rx->rx_queue_len = rx_default_queue_len[q];
1100 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1101 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1102 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1103 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1104 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1105
1106 /* Tx handle */
1107 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1108 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1109 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1110 MVXPE_TX_MBUF(sc, q, i) = NULL;
1111 /* Tx handle needs DMA map for busdma_load_mbuf() */
1112 if (bus_dmamap_create(sc->sc_dmat,
1113 mvxpbm_chunk_size(sc->sc_bm),
1114 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1115 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
1116 &MVXPE_TX_MAP(sc, q, i))) {
1117 aprint_error_dev(sc->sc_dev,
1118 "can't create dma map (tx ring %d)\n", i);
1119 }
1120 }
1121 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1122 tx->tx_dma = tx->tx_cpu = 0;
1123 tx->tx_queue_len = tx_default_queue_len[q];
1124 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1125 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1126 tx->tx_used = 0;
1127 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1128 }
1129
1130 STATIC void
1131 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1132 {
1133 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1134 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1135 int i;
1136
1137 KASSERT_RX_MTX(sc, q);
1138 KASSERT_TX_MTX(sc, q);
1139
1140 /* Rx handle */
1141 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1142 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1143 continue;
1144 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1145 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1146 }
1147 rx->rx_dma = rx->rx_cpu = 0;
1148
1149 /* Tx handle */
1150 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1151 if (MVXPE_TX_MBUF(sc, q, i) == NULL)
1152 continue;
1153 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1154 m_freem(MVXPE_TX_MBUF(sc, q, i));
1155 MVXPE_TX_MBUF(sc, q, i) = NULL;
1156 }
1157 tx->tx_dma = tx->tx_cpu = 0;
1158 tx->tx_used = 0;
1159 }
1160
1161 STATIC void
1162 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1163 {
1164 int wrap;
1165
1166 KASSERT_RX_MTX(sc, q);
1167 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1168 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1169
1170 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1171 if (wrap > 0) {
1172 count -= wrap;
1173 KASSERT(count > 0);
1174 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1175 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1176 }
1177 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1178 MVXPE_RX_DESC_OFF(sc, q, idx),
1179 sizeof(struct mvxpe_rx_desc) * count, ops);
1180 }
1181
1182 STATIC void
1183 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1184 {
1185 int wrap = 0;
1186
1187 KASSERT_TX_MTX(sc, q);
1188 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1189 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1190
1191 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1192 if (wrap > 0) {
1193 count -= wrap;
1194 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1195 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1196 }
1197 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1198 MVXPE_TX_DESC_OFF(sc, q, idx),
1199 sizeof(struct mvxpe_tx_desc) * count, ops);
1200 }
1201
1202 /*
1203 * Rx/Tx Queue Control
1204 */
1205 STATIC int
1206 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1207 {
1208 struct mvxpe_softc *sc = ifp->if_softc;
1209 uint32_t reg;
1210
1211 KASSERT_RX_MTX(sc, q);
1212 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1213
1214 /* descriptor address */
1215 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1216
1217 /* Rx buffer size and descriptor ring size */
1218 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1219 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1220 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1221 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1222 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1223
1224 /* Rx packet offset address */
1225 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1226 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1227 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1228 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1229
1230 /* Rx DMA SNOOP */
1231 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1232 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1233 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1234
1235 /* if DMA is not working, register is not updated */
1236 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1237 return 0;
1238 }
1239
1240 STATIC int
1241 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1242 {
1243 struct mvxpe_softc *sc = ifp->if_softc;
1244 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1245 uint32_t reg;
1246
1247 KASSERT_TX_MTX(sc, q);
1248 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1249
1250 /* descriptor address */
1251 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1252
1253 /* Tx threshold, and descriptor ring size */
1254 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1255 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1256 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1257 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1258 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1259
1260 /* if DMA is not working, register is not updated */
1261 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1262 return 0;
1263 }
1264
1265 STATIC int
1266 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1267 {
1268 struct mvxpe_softc *sc = ifp->if_softc;
1269 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1270 uint32_t reg;
1271
1272 KASSERT_RX_MTX(sc, q);
1273
1274 /* Set Rx interrupt threshold */
1275 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1276 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1277 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1278
1279 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1280 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1281
1282 /* Unmask RXTX Intr. */
1283 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1284 reg |= MVXPE_PRXTXI_RREQ(q); /* Rx resource error */
1285 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1286
1287 /* Unmask RXTX_TH Intr. */
1288 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1289 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1290 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1291 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1292
1293 /* Enable Rx queue */
1294 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1295 reg |= MVXPE_RQC_ENQ(q);
1296 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1297
1298 return 0;
1299 }
1300
1301 STATIC int
1302 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1303 {
1304 struct mvxpe_softc *sc = ifp->if_softc;
1305 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1306 uint32_t reg;
1307
1308 KASSERT_TX_MTX(sc, q);
1309
1310 /* Set Tx interrupt threshold */
1311 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1312 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1313 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1314 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1315
1316 /* Unmask RXTX_TH Intr. */
1317 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1318 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1319 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1320
1321 /* Don't update MVXPE_TQC here, there is no packet yet. */
1322 return 0;
1323 }
1324
1325 STATIC void
1326 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1327 {
1328 KASSERT(q >= 0);
1329 KASSERT(q < MVXPE_QUEUE_SIZE);
1330 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1331 }
1332
1333 STATIC void
1334 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1335 {
1336 KASSERT(q >= 0);
1337 KASSERT(q < MVXPE_QUEUE_SIZE);
1338 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1339 }
1340
1341 STATIC void
1342 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1343 {
1344 KASSERT(q >= 0);
1345 KASSERT(q < MVXPE_QUEUE_SIZE);
1346 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1347 }
1348
1349 STATIC void
1350 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1351 {
1352 KASSERT(q >= 0);
1353 KASSERT(q < MVXPE_QUEUE_SIZE);
1354 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1355 }
1356
1357 /*
1358 * Interrupt Handlers
1359 */
1360 STATIC void
1361 mvxpe_disable_intr(struct mvxpe_softc *sc)
1362 {
1363 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1364 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1365 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1366 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1367 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1368 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1369 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1370 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1371 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1372 }
1373
1374 STATIC void
1375 mvxpe_enable_intr(struct mvxpe_softc *sc)
1376 {
1377 uint32_t reg;
1378
1379 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1380 reg = MVXPE_READ(sc, MVXPE_PMIM);
1381 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1382 reg |= MVXPE_PMI_LINKCHANGE;
1383 reg |= MVXPE_PMI_IAE;
1384 reg |= MVXPE_PMI_RXOVERRUN;
1385 reg |= MVXPE_PMI_RXCRCERROR;
1386 reg |= MVXPE_PMI_RXLARGEPACKET;
1387 reg |= MVXPE_PMI_TXUNDRN;
1388 reg |= MVXPE_PMI_PRBSERROR;
1389 reg |= MVXPE_PMI_SRSE;
1390 reg |= MVXPE_PMI_TREQ_MASK;
1391 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1392
1393 /* Enable RXTX Intr. (via RXTX_TH Summary bit) */
1394 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1395 reg |= MVXPE_PRXTXI_RREQ_MASK; /* Rx resource error */
1396 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1397
1398 /* Enable Summary Bit to check all interrupt cause. */
1399 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1400 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1401 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1402 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1403 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1404
1405 /* Enable All Queue Interrupt */
1406 reg = MVXPE_READ(sc, MVXPE_PIE);
1407 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1408 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1409 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1410 }
1411
1412 STATIC int
1413 mvxpe_rxtxth_intr(void *arg)
1414 {
1415 struct mvxpe_softc *sc = arg;
1416 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1417 uint32_t ic, queues, datum = 0;
1418
1419 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1420 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1421
1422 mvxpe_sc_lock(sc);
1423 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1424 if (ic == 0)
1425 return 0;
1426 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1427 datum = datum ^ ic;
1428
1429 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1430
1431 /* ack maintance interrupt first */
1432 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1433 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1434 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1435 }
1436 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1437 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1438 mvxpe_misc_intr(sc);
1439 }
1440 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1441 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1442 mvxpe_rxtx_intr(sc);
1443 }
1444 if (!(ifp->if_flags & IFF_RUNNING))
1445 return 1;
1446
1447 /* RxTxTH interrupt */
1448 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1449 if (queues) {
1450 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1451 mvxpe_rx(sc, queues);
1452 }
1453 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1454 if (queues) {
1455 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1456 mvxpe_tx_complete(sc, queues);
1457 }
1458 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1459 if (queues) {
1460 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1461 mvxpe_rx_refill(sc, queues);
1462 }
1463 mvxpe_sc_unlock(sc);
1464
1465 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1466 mvxpe_start(ifp);
1467
1468 rnd_add_uint32(&sc->sc_rnd_source, datum);
1469
1470 return 1;
1471 }
1472
1473 STATIC int
1474 mvxpe_misc_intr(void *arg)
1475 {
1476 struct mvxpe_softc *sc = arg;
1477 #ifdef MVXPE_DEBUG
1478 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1479 #endif
1480 uint32_t ic;
1481 uint32_t datum = 0;
1482 int claimed = 0;
1483
1484 DPRINTSC(sc, 2, "got MISC_INTR\n");
1485 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1486
1487 KASSERT_SC_MTX(sc);
1488
1489 for (;;) {
1490 ic = MVXPE_READ(sc, MVXPE_PMIC);
1491 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1492 if (ic == 0)
1493 break;
1494 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1495 datum = datum ^ ic;
1496 claimed = 1;
1497
1498 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1499 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1500 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1501 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1502 }
1503 if (ic & MVXPE_PMI_LINKCHANGE) {
1504 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1505 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1506 mvxpe_linkupdate(sc);
1507 }
1508 if (ic & MVXPE_PMI_IAE) {
1509 DPRINTIFNET(ifp, 2, "+IAE\n");
1510 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1511 }
1512 if (ic & MVXPE_PMI_RXOVERRUN) {
1513 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1514 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1515 }
1516 if (ic & MVXPE_PMI_RXCRCERROR) {
1517 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1518 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1519 }
1520 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1521 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1522 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1523 }
1524 if (ic & MVXPE_PMI_TXUNDRN) {
1525 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1526 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1527 }
1528 if (ic & MVXPE_PMI_PRBSERROR) {
1529 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1530 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1531 }
1532 if (ic & MVXPE_PMI_TREQ_MASK) {
1533 DPRINTIFNET(ifp, 2, "+TREQ\n");
1534 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1535 }
1536 }
1537 if (datum)
1538 rnd_add_uint32(&sc->sc_rnd_source, datum);
1539
1540 return claimed;
1541 }
1542
1543 STATIC int
1544 mvxpe_rxtx_intr(void *arg)
1545 {
1546 struct mvxpe_softc *sc = arg;
1547 #ifdef MVXPE_DEBUG
1548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1549 #endif
1550 uint32_t datum = 0;
1551 uint32_t prxtxic;
1552 int claimed = 0;
1553
1554 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1555 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1556
1557 KASSERT_SC_MTX(sc);
1558
1559 for (;;) {
1560 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1561 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1562 if (prxtxic == 0)
1563 break;
1564 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1565 datum = datum ^ prxtxic;
1566 claimed = 1;
1567
1568 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1569
1570 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1571 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1572 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1573 }
1574 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1575 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1576 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1577 }
1578 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1579 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1580 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1581 }
1582 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1583 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1584 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1585 }
1586 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1587 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1588 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1589 }
1590 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1591 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1592 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1593 }
1594 }
1595 if (datum)
1596 rnd_add_uint32(&sc->sc_rnd_source, datum);
1597
1598 return claimed;
1599 }
1600
1601 STATIC void
1602 mvxpe_tick(void *arg)
1603 {
1604 struct mvxpe_softc *sc = arg;
1605 struct mii_data *mii = &sc->sc_mii;
1606
1607 mvxpe_sc_lock(sc);
1608
1609 mii_tick(mii);
1610 mii_pollstat(&sc->sc_mii);
1611
1612 /* read mib regisers(clear by read) */
1613 mvxpe_update_mib(sc);
1614
1615 /* read counter registers(clear by read) */
1616 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1617 MVXPE_READ(sc, MVXPE_PDFC));
1618 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1619 MVXPE_READ(sc, MVXPE_POFC));
1620 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1621 MVXPE_READ(sc, MVXPE_TXBADFCS));
1622 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1623 MVXPE_READ(sc, MVXPE_TXDROPPED));
1624 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1625 MVXPE_READ(sc, MVXPE_LPIC));
1626
1627 mvxpe_sc_unlock(sc);
1628
1629 callout_schedule(&sc->sc_tick_ch, hz);
1630 }
1631
1632
1633 /*
1634 * struct ifnet and mii callbacks
1635 */
1636 STATIC void
1637 mvxpe_start(struct ifnet *ifp)
1638 {
1639 struct mvxpe_softc *sc = ifp->if_softc;
1640 struct mbuf *m;
1641 int q;
1642
1643 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1644 DPRINTIFNET(ifp, 1, "not running\n");
1645 return;
1646 }
1647
1648 mvxpe_sc_lock(sc);
1649 if (!MVXPE_IS_LINKUP(sc)) {
1650 /* If Link is DOWN, can't start TX */
1651 DPRINTIFNET(ifp, 1, "link fail\n");
1652 for (;;) {
1653 /*
1654 * discard stale packets all.
1655 * these may confuse DAD, ARP or timer based protocols.
1656 */
1657 IFQ_DEQUEUE(&ifp->if_snd, m);
1658 if (m == NULL)
1659 break;
1660 m_freem(m);
1661 }
1662 mvxpe_sc_unlock(sc);
1663 return;
1664 }
1665 for (;;) {
1666 /*
1667 * don't use IFQ_POLL().
1668 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1669 * on SMP enabled networking stack.
1670 */
1671 IFQ_DEQUEUE(&ifp->if_snd, m);
1672 if (m == NULL)
1673 break;
1674
1675 q = mvxpe_tx_queue_select(sc, m);
1676 if (q < 0)
1677 break;
1678 /* mutex is held in mvxpe_tx_queue_select() */
1679
1680 if (mvxpe_tx_queue(sc, m, q) != 0) {
1681 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1682 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1683 mvxpe_tx_unlockq(sc, q);
1684 break;
1685 }
1686 mvxpe_tx_unlockq(sc, q);
1687 KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1688 KASSERT(sc->sc_tx_ring[q].tx_used <=
1689 sc->sc_tx_ring[q].tx_queue_len);
1690 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1691 sc->sc_tx_pending++;
1692 ifp->if_timer = 1;
1693 sc->sc_wdogsoft = 1;
1694 bpf_mtap(ifp, m);
1695 }
1696 mvxpe_sc_unlock(sc);
1697
1698 return;
1699 }
1700
1701 STATIC int
1702 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1703 {
1704 struct mvxpe_softc *sc = ifp->if_softc;
1705 struct ifreq *ifr = data;
1706 int error = 0;
1707 int s;
1708
1709 switch (cmd) {
1710 case SIOCGIFMEDIA:
1711 case SIOCSIFMEDIA:
1712 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n");
1713 s = splnet(); /* XXX: is there suitable mutex? */
1714 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1715 splx(s);
1716 break;
1717 default:
1718 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1719 error = ether_ioctl(ifp, cmd, data);
1720 if (error == ENETRESET) {
1721 if (ifp->if_flags & IFF_RUNNING) {
1722 mvxpe_sc_lock(sc);
1723 mvxpe_filter_setup(sc);
1724 mvxpe_sc_unlock(sc);
1725 }
1726 error = 0;
1727 }
1728 break;
1729 }
1730
1731 return error;
1732 }
1733
1734 STATIC int
1735 mvxpe_init(struct ifnet *ifp)
1736 {
1737 struct mvxpe_softc *sc = ifp->if_softc;
1738 struct mii_data *mii = &sc->sc_mii;
1739 uint32_t reg;
1740 int q;
1741
1742 mvxpe_sc_lock(sc);
1743
1744 /* Start DMA Engine */
1745 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1746 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1747 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1748
1749 /* Enable port */
1750 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1751 reg |= MVXPE_PMACC0_PORTEN;
1752 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1753
1754 /* Link up */
1755 mvxpe_linkup(sc);
1756
1757 /* Enable All Queue and interrupt of each Queue */
1758 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1759 mvxpe_rx_lockq(sc, q);
1760 mvxpe_rx_queue_enable(ifp, q);
1761 mvxpe_rx_queue_refill(sc, q);
1762 mvxpe_rx_unlockq(sc, q);
1763
1764 mvxpe_tx_lockq(sc, q);
1765 mvxpe_tx_queue_enable(ifp, q);
1766 mvxpe_tx_unlockq(sc, q);
1767 }
1768
1769 /* Enable interrupt */
1770 mvxpe_enable_intr(sc);
1771
1772 /* Set Counter */
1773 callout_schedule(&sc->sc_tick_ch, hz);
1774
1775 /* Media check */
1776 mii_mediachg(mii);
1777
1778 ifp->if_flags |= IFF_RUNNING;
1779 ifp->if_flags &= ~IFF_OACTIVE;
1780
1781 mvxpe_sc_unlock(sc);
1782 return 0;
1783 }
1784
1785 /* ARGSUSED */
1786 STATIC void
1787 mvxpe_stop(struct ifnet *ifp, int disable)
1788 {
1789 struct mvxpe_softc *sc = ifp->if_softc;
1790 uint32_t reg;
1791 int q, cnt;
1792
1793 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1794
1795 mvxpe_sc_lock(sc);
1796
1797 callout_stop(&sc->sc_tick_ch);
1798
1799 /* Link down */
1800 mvxpe_linkdown(sc);
1801
1802 /* Disable Rx interrupt */
1803 reg = MVXPE_READ(sc, MVXPE_PIE);
1804 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1805 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1806
1807 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1808 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1809 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1810 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1811
1812 /* Wait for all Rx activity to terminate. */
1813 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1814 reg = MVXPE_RQC_DIS(reg);
1815 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1816 cnt = 0;
1817 do {
1818 if (cnt >= RX_DISABLE_TIMEOUT) {
1819 aprint_error_ifnet(ifp,
1820 "timeout for RX stopped. rqc 0x%x\n", reg);
1821 break;
1822 }
1823 cnt++;
1824 reg = MVXPE_READ(sc, MVXPE_RQC);
1825 } while (reg & MVXPE_RQC_EN_MASK);
1826
1827 /* Wait for all Tx activety to terminate. */
1828 reg = MVXPE_READ(sc, MVXPE_PIE);
1829 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1830 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1831
1832 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1833 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1834 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1835
1836 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1837 reg = MVXPE_TQC_DIS(reg);
1838 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1839 cnt = 0;
1840 do {
1841 if (cnt >= TX_DISABLE_TIMEOUT) {
1842 aprint_error_ifnet(ifp,
1843 "timeout for TX stopped. tqc 0x%x\n", reg);
1844 break;
1845 }
1846 cnt++;
1847 reg = MVXPE_READ(sc, MVXPE_TQC);
1848 } while (reg & MVXPE_TQC_EN_MASK);
1849
1850 /* Wait for all Tx FIFO is empty */
1851 cnt = 0;
1852 do {
1853 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1854 aprint_error_ifnet(ifp,
1855 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1856 break;
1857 }
1858 cnt++;
1859 reg = MVXPE_READ(sc, MVXPE_PS0);
1860 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1861
1862 /* Reset the MAC Port Enable bit */
1863 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1864 reg &= ~MVXPE_PMACC0_PORTEN;
1865 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1866
1867 /* Disable each of queue */
1868 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1869 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1870
1871 mvxpe_rx_lockq(sc, q);
1872 mvxpe_tx_lockq(sc, q);
1873
1874 /* Disable Rx packet buffer refill request */
1875 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1876 reg |= MVXPE_PRXDQTH_NODT(0);
1877 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1878
1879 if (disable) {
1880 /*
1881 * Hold Reset state of DMA Engine
1882 * (must write 0x0 to restart it)
1883 */
1884 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1885 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1886 mvxpe_ring_flush_queue(sc, q);
1887 }
1888
1889 mvxpe_tx_unlockq(sc, q);
1890 mvxpe_rx_unlockq(sc, q);
1891 }
1892
1893 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1894
1895 mvxpe_sc_unlock(sc);
1896 }
1897
1898 STATIC void
1899 mvxpe_watchdog(struct ifnet *ifp)
1900 {
1901 struct mvxpe_softc *sc = ifp->if_softc;
1902 int q;
1903
1904 mvxpe_sc_lock(sc);
1905
1906 /*
1907 * Reclaim first as there is a possibility of losing Tx completion
1908 * interrupts.
1909 */
1910 mvxpe_tx_complete(sc, 0xff);
1911 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1912 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1913
1914 if (tx->tx_dma != tx->tx_cpu) {
1915 if (sc->sc_wdogsoft) {
1916 /*
1917 * There is race condition between CPU and DMA
1918 * engine. When DMA engine encounters queue end,
1919 * it clears MVXPE_TQC_ENQ bit.
1920 * XXX: how about enhanced mode?
1921 */
1922 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1923 ifp->if_timer = 5;
1924 sc->sc_wdogsoft = 0;
1925 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1926 } else {
1927 aprint_error_ifnet(ifp, "watchdog timeout\n");
1928 ifp->if_oerrors++;
1929 mvxpe_linkreset(sc);
1930 mvxpe_sc_unlock(sc);
1931
1932 /* trigger reinitialize sequence */
1933 mvxpe_stop(ifp, 1);
1934 mvxpe_init(ifp);
1935
1936 mvxpe_sc_lock(sc);
1937 }
1938 }
1939 }
1940 mvxpe_sc_unlock(sc);
1941 }
1942
1943 STATIC int
1944 mvxpe_ifflags_cb(struct ethercom *ec)
1945 {
1946 struct ifnet *ifp = &ec->ec_if;
1947 struct mvxpe_softc *sc = ifp->if_softc;
1948 int change = ifp->if_flags ^ sc->sc_if_flags;
1949
1950 mvxpe_sc_lock(sc);
1951
1952 if (change != 0)
1953 sc->sc_if_flags = ifp->if_flags;
1954
1955 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1956 mvxpe_sc_unlock(sc);
1957 return ENETRESET;
1958 }
1959
1960 if ((change & IFF_PROMISC) != 0)
1961 mvxpe_filter_setup(sc);
1962
1963 if ((change & IFF_UP) != 0)
1964 mvxpe_linkreset(sc);
1965
1966 mvxpe_sc_unlock(sc);
1967 return 0;
1968 }
1969
1970 STATIC int
1971 mvxpe_mediachange(struct ifnet *ifp)
1972 {
1973 return ether_mediachange(ifp);
1974 }
1975
1976 STATIC void
1977 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1978 {
1979 ether_mediastatus(ifp, ifmr);
1980 }
1981
1982 /*
1983 * Link State Notify
1984 */
1985 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
1986 {
1987 int linkup; /* bool */
1988
1989 KASSERT_SC_MTX(sc);
1990
1991 /* tell miibus */
1992 mii_pollstat(&sc->sc_mii);
1993
1994 /* syslog */
1995 linkup = MVXPE_IS_LINKUP(sc);
1996 if (sc->sc_linkstate == linkup)
1997 return;
1998
1999 #ifdef DEBUG
2000 log(LOG_DEBUG,
2001 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2002 #endif
2003 if (linkup)
2004 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2005 else
2006 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2007
2008 sc->sc_linkstate = linkup;
2009 }
2010
2011 STATIC void
2012 mvxpe_linkup(struct mvxpe_softc *sc)
2013 {
2014 uint32_t reg;
2015
2016 KASSERT_SC_MTX(sc);
2017
2018 /* set EEE parameters */
2019 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2020 if (sc->sc_cf.cf_lpi)
2021 reg |= MVXPE_LPIC1_LPIRE;
2022 else
2023 reg &= ~MVXPE_LPIC1_LPIRE;
2024 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2025
2026 /* set auto-negotiation parameters */
2027 reg = MVXPE_READ(sc, MVXPE_PANC);
2028 if (sc->sc_cf.cf_fc) {
2029 /* flow control negotiation */
2030 reg |= MVXPE_PANC_PAUSEADV;
2031 reg |= MVXPE_PANC_ANFCEN;
2032 }
2033 else {
2034 reg &= ~MVXPE_PANC_PAUSEADV;
2035 reg &= ~MVXPE_PANC_ANFCEN;
2036 }
2037 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2038 reg &= ~MVXPE_PANC_FORCELINKPASS;
2039 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2040
2041 mii_mediachg(&sc->sc_mii);
2042 }
2043
2044 STATIC void
2045 mvxpe_linkdown(struct mvxpe_softc *sc)
2046 {
2047 struct mii_softc *mii;
2048 uint32_t reg;
2049
2050 KASSERT_SC_MTX(sc);
2051 return;
2052
2053 reg = MVXPE_READ(sc, MVXPE_PANC);
2054 reg |= MVXPE_PANC_FORCELINKFAIL;
2055 reg &= MVXPE_PANC_FORCELINKPASS;
2056 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2057
2058 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2059 if (mii)
2060 mii_phy_down(mii);
2061 }
2062
2063 STATIC void
2064 mvxpe_linkreset(struct mvxpe_softc *sc)
2065 {
2066 struct mii_softc *mii;
2067
2068 KASSERT_SC_MTX(sc);
2069
2070 /* force reset PHY first */
2071 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2072 if (mii)
2073 mii_phy_reset(mii);
2074
2075 /* reinit MAC and PHY */
2076 mvxpe_linkdown(sc);
2077 if ((sc->sc_if_flags & IFF_UP) != 0)
2078 mvxpe_linkup(sc);
2079 }
2080
2081 /*
2082 * Tx Subroutines
2083 */
2084 STATIC int
2085 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2086 {
2087 int q = 0;
2088
2089 /* XXX: get attribute from ALTQ framework? */
2090 mvxpe_tx_lockq(sc, q);
2091 return 0;
2092 }
2093
2094 STATIC int
2095 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2096 {
2097 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2098 bus_dma_segment_t *txsegs;
2099 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2100 struct mvxpe_tx_desc *t = NULL;
2101 uint32_t ptxsu;
2102 int txnsegs;
2103 int start, used;
2104 int i;
2105
2106 KASSERT_TX_MTX(sc, q);
2107 KASSERT(tx->tx_used >= 0);
2108 KASSERT(tx->tx_used <= tx->tx_queue_len);
2109
2110 /* load mbuf using dmamap of 1st descriptor */
2111 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2112 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2113 m_freem(m);
2114 return ENOBUFS;
2115 }
2116 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2117 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2118 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2119 /* we have no enough descriptors or mbuf is broken */
2120 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2121 m_freem(m);
2122 return ENOBUFS;
2123 }
2124 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2125 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2126
2127 /* remember mbuf using 1st descriptor */
2128 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2129 bus_dmamap_sync(sc->sc_dmat,
2130 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2131 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREWRITE);
2132
2133 /* load to tx descriptors */
2134 start = tx->tx_cpu;
2135 used = 0;
2136 for (i = 0; i < txnsegs; i++) {
2137 if (__predict_false(txsegs[i].ds_len == 0))
2138 continue;
2139 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2140 t->command = 0;
2141 t->l4ichk = 0;
2142 t->flags = 0;
2143 if (i == 0) {
2144 /* 1st descriptor */
2145 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2146 t->command |= MVXPE_TX_CMD_PADDING;
2147 t->command |= MVXPE_TX_CMD_F;
2148 mvxpe_tx_set_csumflag(ifp, t, m);
2149 }
2150 t->bufptr = txsegs[i].ds_addr;
2151 t->bytecnt = txsegs[i].ds_len;
2152 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2153 tx->tx_used++;
2154 used++;
2155 }
2156 /* t is last descriptor here */
2157 KASSERT(t != NULL);
2158 t->command |= MVXPE_TX_CMD_L;
2159
2160 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2161 #ifdef MVXPE_DEBUG
2162 if (mvxpe_debug > 2)
2163 for (i = start; i <= tx->tx_cpu; i++) {
2164 t = MVXPE_TX_DESC(sc, q, i);
2165 mvxpe_dump_txdesc(t, i);
2166 }
2167 #endif
2168 mvxpe_ring_sync_tx(sc, q, start, used,
2169 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2170
2171 while (used > 255) {
2172 ptxsu = MVXPE_PTXSU_NOWD(255);
2173 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2174 used -= 255;
2175 }
2176 if (used > 0) {
2177 ptxsu = MVXPE_PTXSU_NOWD(used);
2178 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2179 }
2180 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2181
2182 DPRINTSC(sc, 2,
2183 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2184 DPRINTSC(sc, 2,
2185 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2186 DPRINTSC(sc, 2,
2187 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2188 DPRINTSC(sc, 2,
2189 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2190 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2191 DPRINTIFNET(ifp, 2,
2192 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2193 tx->tx_cpu, tx->tx_dma, tx->tx_used);
2194 return 0;
2195 }
2196
2197 STATIC void
2198 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2199 struct mvxpe_tx_desc *t, struct mbuf *m)
2200 {
2201 struct ether_header *eh;
2202 int csum_flags;
2203 uint32_t iphl = 0, ipoff = 0;
2204
2205
2206 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2207
2208 eh = mtod(m, struct ether_header *);
2209 switch (htons(eh->ether_type)) {
2210 case ETHERTYPE_IP:
2211 case ETHERTYPE_IPV6:
2212 ipoff = ETHER_HDR_LEN;
2213 break;
2214 case ETHERTYPE_VLAN:
2215 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2216 break;
2217 }
2218
2219 if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2220 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2221 t->command |= MVXPE_TX_CMD_L3_IP4;
2222 }
2223 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2224 iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
2225 t->command |= MVXPE_TX_CMD_L3_IP6;
2226 }
2227 else {
2228 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2229 return;
2230 }
2231
2232
2233 /* L3 */
2234 if (csum_flags & M_CSUM_IPv4) {
2235 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2236 }
2237
2238 /* L4 */
2239 if ((csum_flags &
2240 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) {
2241 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2242 }
2243 else if (csum_flags & M_CSUM_TCPv4) {
2244 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2245 t->command |= MVXPE_TX_CMD_L4_TCP;
2246 }
2247 else if (csum_flags & M_CSUM_UDPv4) {
2248 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2249 t->command |= MVXPE_TX_CMD_L4_UDP;
2250 }
2251 else if (csum_flags & M_CSUM_TCPv6) {
2252 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2253 t->command |= MVXPE_TX_CMD_L4_TCP;
2254 }
2255 else if (csum_flags & M_CSUM_UDPv6) {
2256 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2257 t->command |= MVXPE_TX_CMD_L4_UDP;
2258 }
2259
2260 t->l4ichk = 0;
2261 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2262 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2263 }
2264
2265 STATIC void
2266 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2267 {
2268 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2269 int q;
2270
2271 DPRINTSC(sc, 2, "tx completed.\n");
2272
2273 KASSERT_SC_MTX(sc);
2274
2275 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2276 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2277 continue;
2278 mvxpe_tx_lockq(sc, q);
2279 mvxpe_tx_queue_complete(sc, q);
2280 mvxpe_tx_unlockq(sc, q);
2281 }
2282 KASSERT(sc->sc_tx_pending >= 0);
2283 if (sc->sc_tx_pending == 0)
2284 ifp->if_timer = 0;
2285 }
2286
2287 STATIC void
2288 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2289 {
2290 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2291 struct mvxpe_tx_desc *t;
2292 uint32_t ptxs, ptxsu, ndesc;
2293 int i;
2294
2295 KASSERT_TX_MTX(sc, q);
2296
2297 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2298 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2299 if (ndesc == 0)
2300 return;
2301
2302 DPRINTSC(sc, 2,
2303 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2304
2305 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2306 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2307
2308 for (i = 0; i < ndesc; i++) {
2309 int error = 0;
2310
2311 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2312 if (t->flags & MVXPE_TX_F_ES) {
2313 DPRINTSC(sc, 1,
2314 "tx error queue %d desc %d\n",
2315 q, tx->tx_dma);
2316 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2317 case MVXPE_TX_F_EC_LC:
2318 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2319 case MVXPE_TX_F_EC_UR:
2320 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2321 case MVXPE_TX_F_EC_RL:
2322 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2323 default:
2324 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2325 }
2326 error = 1;
2327 }
2328 if (MVXPE_TX_MBUF(sc, q, tx->tx_dma) != NULL) {
2329 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2330 bus_dmamap_unload(sc->sc_dmat,
2331 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2332 m_freem(MVXPE_TX_MBUF(sc, q, tx->tx_dma));
2333 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2334 sc->sc_tx_pending--;
2335 }
2336 else
2337 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2338 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2339 tx->tx_used--;
2340 if (error)
2341 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2342 else
2343 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2344 }
2345 KASSERT(tx->tx_used >= 0);
2346 KASSERT(tx->tx_used <= tx->tx_queue_len);
2347 while (ndesc > 255) {
2348 ptxsu = MVXPE_PTXSU_NORB(255);
2349 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2350 ndesc -= 255;
2351 }
2352 if (ndesc > 0) {
2353 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2354 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2355 }
2356 DPRINTSC(sc, 2,
2357 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2358 q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2359 }
2360
2361 /*
2362 * Rx Subroutines
2363 */
2364 STATIC void
2365 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2366 {
2367 int q, npkt;
2368
2369 KASSERT_SC_MTX(sc);
2370
2371 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2372 /* mutex is held by rx_queue_select */
2373 mvxpe_rx_queue(sc, q, npkt);
2374 mvxpe_rx_unlockq(sc, q);
2375 }
2376 }
2377
2378 STATIC void
2379 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2380 {
2381 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2382 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2383 struct mvxpe_rx_desc *r;
2384 struct mvxpbm_chunk *chunk;
2385 struct mbuf *m;
2386 uint32_t prxsu;
2387 int error = 0;
2388 int i;
2389
2390 KASSERT_RX_MTX(sc, q);
2391
2392 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2393 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2394
2395 for (i = 0; i < npkt; i++) {
2396 /* get descriptor and packet */
2397 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2398 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2399 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2400 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2401
2402 /* check errors */
2403 if (r->status & MVXPE_RX_ES) {
2404 switch (r->status & MVXPE_RX_EC_MASK) {
2405 case MVXPE_RX_EC_CE:
2406 DPRINTIFNET(ifp, 1, "CRC error\n");
2407 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2408 break;
2409 case MVXPE_RX_EC_OR:
2410 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2411 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2412 break;
2413 case MVXPE_RX_EC_MF:
2414 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2415 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2416 break;
2417 case MVXPE_RX_EC_RE:
2418 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2419 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2420 break;
2421 }
2422 error = 1;
2423 goto rx_done;
2424 }
2425 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2426 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2427 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2428 error = 1;
2429 goto rx_done;
2430 }
2431
2432 if (chunk == NULL) {
2433 device_printf(sc->sc_dev,
2434 "got rx interrupt, but no chunk\n");
2435 error = 1;
2436 goto rx_done;
2437 }
2438
2439 /* extract packet buffer */
2440 if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2441 error = 1;
2442 goto rx_done;
2443 }
2444 m = chunk->m;
2445 m->m_pkthdr.rcvif = ifp;
2446 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2447 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2448 mvxpe_rx_set_csumflag(ifp, r, m);
2449 ifp->if_ipackets++;
2450 bpf_mtap(ifp, m);
2451 if_percpuq_enqueue(ifp->if_percpuq, m);
2452 chunk = NULL; /* the BM chunk goes to networking stack now */
2453 rx_done:
2454 if (chunk) {
2455 /* rx error. just return the chunk to BM. */
2456 mvxpbm_free_chunk(chunk);
2457 }
2458 if (error)
2459 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2460 else
2461 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2462 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2463 }
2464 /* DMA status update */
2465 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2466 while (npkt > 255) {
2467 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2468 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2469 npkt -= 255;
2470 }
2471 if (npkt > 0) {
2472 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2473 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2474 }
2475
2476 DPRINTSC(sc, 2,
2477 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2478 DPRINTSC(sc, 2,
2479 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2480 DPRINTSC(sc, 2,
2481 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2482 DPRINTSC(sc, 2,
2483 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2484 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2485 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2486 rx->rx_cpu, rx->rx_dma);
2487 }
2488
2489 STATIC int
2490 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2491 {
2492 uint32_t prxs, npkt;
2493 int q;
2494
2495 KASSERT_SC_MTX(sc);
2496 KASSERT(queue != NULL);
2497 DPRINTSC(sc, 2, "selecting rx queue\n");
2498
2499 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2500 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2501 continue;
2502
2503 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2504 npkt = MVXPE_PRXS_GET_ODC(prxs);
2505 if (npkt == 0)
2506 continue;
2507
2508 DPRINTSC(sc, 2,
2509 "queue %d selected: prxs=%#x, %u pakcet received.\n",
2510 q, prxs, npkt);
2511 *queue = q;
2512 mvxpe_rx_lockq(sc, q);
2513 return npkt;
2514 }
2515
2516 return 0;
2517 }
2518
2519 STATIC void
2520 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2521 {
2522 int q;
2523
2524 KASSERT_SC_MTX(sc);
2525
2526 /* XXX: check rx bit array */
2527 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2528 if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2529 continue;
2530
2531 mvxpe_rx_lockq(sc, q);
2532 mvxpe_rx_queue_refill(sc, q);
2533 mvxpe_rx_unlockq(sc, q);
2534 }
2535 }
2536
2537 STATIC void
2538 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2539 {
2540 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2541 uint32_t prxs, prxsu, ndesc;
2542 int idx, refill = 0;
2543 int npkt;
2544
2545 KASSERT_RX_MTX(sc, q);
2546
2547 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2548 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2549 refill = rx->rx_queue_len - ndesc;
2550 if (refill <= 0)
2551 return;
2552 DPRINTPRXS(2, q);
2553 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2554
2555 idx = rx->rx_cpu;
2556 for (npkt = 0; npkt < refill; npkt++)
2557 if (mvxpe_rx_queue_add(sc, q) != 0)
2558 break;
2559 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2560 if (npkt == 0)
2561 return;
2562
2563 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2564 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2565
2566 while (npkt > 255) {
2567 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2568 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2569 npkt -= 255;
2570 }
2571 if (npkt > 0) {
2572 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2573 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2574 }
2575 DPRINTPRXS(2, q);
2576 return;
2577 }
2578
2579 STATIC int
2580 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2581 {
2582 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2583 struct mvxpe_rx_desc *r;
2584 struct mvxpbm_chunk *chunk = NULL;
2585
2586 KASSERT_RX_MTX(sc, q);
2587
2588 /* Allocate the packet buffer */
2589 chunk = mvxpbm_alloc(sc->sc_bm);
2590 if (chunk == NULL) {
2591 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2592 return ENOBUFS;
2593 }
2594
2595 /* Add the packet to descritor */
2596 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2597 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2598 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2599
2600 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2601 r->bufptr = chunk->buf_pa;
2602 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2603 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2604 return 0;
2605 }
2606
2607 STATIC void
2608 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2609 struct mvxpe_rx_desc *r, struct mbuf *m0)
2610 {
2611 uint32_t csum_flags = 0;
2612
2613 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0)
2614 return; /* not a IP packet */
2615
2616 /* L3 */
2617 if (r->status & MVXPE_RX_L3_IP) {
2618 csum_flags |= M_CSUM_IPv4;
2619 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0) {
2620 csum_flags |= M_CSUM_IPv4_BAD;
2621 goto finish;
2622 }
2623 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2624 /*
2625 * r->l4chk has partial checksum of each framgment.
2626 * but there is no way to use it in NetBSD.
2627 */
2628 return;
2629 }
2630 }
2631
2632 /* L4 */
2633 switch (r->status & MVXPE_RX_L4_MASK) {
2634 case MVXPE_RX_L4_TCP:
2635 if (r->status & MVXPE_RX_L3_IP)
2636 csum_flags |= M_CSUM_TCPv4;
2637 else
2638 csum_flags |= M_CSUM_TCPv6;
2639 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2640 csum_flags |= M_CSUM_TCP_UDP_BAD;
2641 break;
2642 case MVXPE_RX_L4_UDP:
2643 if (r->status & MVXPE_RX_L3_IP)
2644 csum_flags |= M_CSUM_UDPv4;
2645 else
2646 csum_flags |= M_CSUM_UDPv6;
2647 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2648 csum_flags |= M_CSUM_TCP_UDP_BAD;
2649 break;
2650 case MVXPE_RX_L4_OTH:
2651 default:
2652 break;
2653 }
2654 finish:
2655 m0->m_pkthdr.csum_flags |= (csum_flags & ifp->if_csum_flags_rx);
2656 }
2657
2658 /*
2659 * MAC address filter
2660 */
2661 STATIC uint8_t
2662 mvxpe_crc8(const uint8_t *data, size_t size)
2663 {
2664 int bit;
2665 uint8_t byte;
2666 uint8_t crc = 0;
2667 const uint8_t poly = 0x07;
2668
2669 while(size--)
2670 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2671 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2672
2673 return crc;
2674 }
2675
2676 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2677
2678 STATIC void
2679 mvxpe_filter_setup(struct mvxpe_softc *sc)
2680 {
2681 struct ethercom *ec = &sc->sc_ethercom;
2682 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2683 struct ether_multi *enm;
2684 struct ether_multistep step;
2685 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2686 uint32_t pxc;
2687 int i;
2688 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2689
2690 KASSERT_SC_MTX(sc);
2691
2692 memset(dfut, 0, sizeof(dfut));
2693 memset(dfsmt, 0, sizeof(dfsmt));
2694 memset(dfomt, 0, sizeof(dfomt));
2695
2696 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2697 goto allmulti;
2698 }
2699
2700 ETHER_FIRST_MULTI(step, ec, enm);
2701 while (enm != NULL) {
2702 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2703 /* ranges are complex and somewhat rare */
2704 goto allmulti;
2705 }
2706 /* chip handles some IPv4 multicast specially */
2707 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2708 i = enm->enm_addrlo[5];
2709 dfsmt[i>>2] |=
2710 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2711 } else {
2712 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2713 dfomt[i>>2] |=
2714 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2715 }
2716
2717 ETHER_NEXT_MULTI(step, enm);
2718 }
2719 goto set;
2720
2721 allmulti:
2722 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2723 for (i = 0; i < MVXPE_NDFSMT; i++) {
2724 dfsmt[i] = dfomt[i] =
2725 MVXPE_DF(0, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2726 MVXPE_DF(1, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2727 MVXPE_DF(2, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2728 MVXPE_DF(3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2729 }
2730 }
2731
2732 set:
2733 pxc = MVXPE_READ(sc, MVXPE_PXC);
2734 pxc &= ~MVXPE_PXC_UPM;
2735 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2736 if (ifp->if_flags & IFF_BROADCAST) {
2737 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2738 }
2739 if (ifp->if_flags & IFF_PROMISC) {
2740 pxc |= MVXPE_PXC_UPM;
2741 }
2742 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2743
2744 /* Set Destination Address Filter Unicast Table */
2745 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2746 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2747 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2748
2749 /* Set Destination Address Filter Multicast Tables */
2750 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2751 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2752 }
2753
2754 /*
2755 * sysctl(9)
2756 */
2757 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2758 {
2759 int rc;
2760 const struct sysctlnode *node;
2761
2762 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2763 0, CTLTYPE_NODE, "mvxpe",
2764 SYSCTL_DESCR("mvxpe interface controls"),
2765 NULL, 0, NULL, 0,
2766 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2767 goto err;
2768 }
2769
2770 mvxpe_root_num = node->sysctl_num;
2771 return;
2772
2773 err:
2774 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2775 }
2776
2777 STATIC int
2778 sysctl_read_mib(SYSCTLFN_ARGS)
2779 {
2780 struct mvxpe_sysctl_mib *arg;
2781 struct mvxpe_softc *sc;
2782 struct sysctlnode node;
2783 uint64_t val;
2784 int err;
2785
2786 node = *rnode;
2787 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2788 if (arg == NULL)
2789 return EINVAL;
2790
2791 sc = arg->sc;
2792 if (sc == NULL)
2793 return EINVAL;
2794 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2795 return EINVAL;
2796
2797 mvxpe_sc_lock(sc);
2798 val = arg->counter;
2799 mvxpe_sc_unlock(sc);
2800
2801 node.sysctl_data = &val;
2802 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2803 if (err)
2804 return err;
2805 if (newp)
2806 return EINVAL;
2807
2808 return 0;
2809 }
2810
2811
2812 STATIC int
2813 sysctl_clear_mib(SYSCTLFN_ARGS)
2814 {
2815 struct mvxpe_softc *sc;
2816 struct sysctlnode node;
2817 int val;
2818 int err;
2819
2820 node = *rnode;
2821 sc = (struct mvxpe_softc *)rnode->sysctl_data;
2822 if (sc == NULL)
2823 return EINVAL;
2824
2825 val = 0;
2826 node.sysctl_data = &val;
2827 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2828 if (err || newp == NULL)
2829 return err;
2830 if (val < 0 || val > 1)
2831 return EINVAL;
2832 if (val == 1) {
2833 mvxpe_sc_lock(sc);
2834 mvxpe_clear_mib(sc);
2835 mvxpe_sc_unlock(sc);
2836 }
2837
2838 return 0;
2839 }
2840
2841 STATIC int
2842 sysctl_set_queue_length(SYSCTLFN_ARGS)
2843 {
2844 struct mvxpe_sysctl_queue *arg;
2845 struct mvxpe_rx_ring *rx = NULL;
2846 struct mvxpe_tx_ring *tx = NULL;
2847 struct mvxpe_softc *sc;
2848 struct sysctlnode node;
2849 uint32_t reg;
2850 int val;
2851 int err;
2852
2853 node = *rnode;
2854
2855 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2856 if (arg == NULL)
2857 return EINVAL;
2858 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2859 return EINVAL;
2860 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2861 return EINVAL;
2862
2863 sc = arg->sc;
2864 if (sc == NULL)
2865 return EINVAL;
2866
2867 /* read queue length */
2868 mvxpe_sc_lock(sc);
2869 switch (arg->rxtx) {
2870 case MVXPE_SYSCTL_RX:
2871 mvxpe_rx_lockq(sc, arg->queue);
2872 rx = MVXPE_RX_RING(sc, arg->queue);
2873 val = rx->rx_queue_len;
2874 mvxpe_rx_unlockq(sc, arg->queue);
2875 break;
2876 case MVXPE_SYSCTL_TX:
2877 mvxpe_tx_lockq(sc, arg->queue);
2878 tx = MVXPE_TX_RING(sc, arg->queue);
2879 val = tx->tx_queue_len;
2880 mvxpe_tx_unlockq(sc, arg->queue);
2881 break;
2882 }
2883
2884 node.sysctl_data = &val;
2885 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2886 if (err || newp == NULL) {
2887 mvxpe_sc_unlock(sc);
2888 return err;
2889 }
2890
2891 /* update queue length */
2892 if (val < 8 || val > MVXPE_RX_RING_CNT) {
2893 mvxpe_sc_unlock(sc);
2894 return EINVAL;
2895 }
2896 switch (arg->rxtx) {
2897 case MVXPE_SYSCTL_RX:
2898 mvxpe_rx_lockq(sc, arg->queue);
2899 rx->rx_queue_len = val;
2900 rx->rx_queue_th_received =
2901 rx->rx_queue_len / MVXPE_RXTH_RATIO;
2902 rx->rx_queue_th_free =
2903 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2904
2905 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2906 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2907 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2908
2909 mvxpe_rx_unlockq(sc, arg->queue);
2910 break;
2911 case MVXPE_SYSCTL_TX:
2912 mvxpe_tx_lockq(sc, arg->queue);
2913 tx->tx_queue_len = val;
2914 tx->tx_queue_th_free =
2915 tx->tx_queue_len / MVXPE_TXTH_RATIO;
2916
2917 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2918 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2919 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2920
2921 mvxpe_tx_unlockq(sc, arg->queue);
2922 break;
2923 }
2924 mvxpe_sc_unlock(sc);
2925
2926 return 0;
2927 }
2928
2929 STATIC int
2930 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2931 {
2932 struct mvxpe_sysctl_queue *arg;
2933 struct mvxpe_rx_ring *rx = NULL;
2934 struct mvxpe_softc *sc;
2935 struct sysctlnode node;
2936 extern uint32_t mvTclk;
2937 uint32_t reg, time_mvtclk;
2938 int time_us;
2939 int err;
2940
2941 node = *rnode;
2942
2943 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2944 if (arg == NULL)
2945 return EINVAL;
2946 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2947 return EINVAL;
2948 if (arg->rxtx != MVXPE_SYSCTL_RX)
2949 return EINVAL;
2950
2951 sc = arg->sc;
2952 if (sc == NULL)
2953 return EINVAL;
2954
2955 /* read queue length */
2956 mvxpe_sc_lock(sc);
2957 mvxpe_rx_lockq(sc, arg->queue);
2958 rx = MVXPE_RX_RING(sc, arg->queue);
2959 time_mvtclk = rx->rx_queue_th_time;
2960 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
2961 node.sysctl_data = &time_us;
2962 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
2963 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
2964 err = sysctl_lookup(SYSCTLFN_CALL(&node));
2965 if (err || newp == NULL) {
2966 mvxpe_rx_unlockq(sc, arg->queue);
2967 mvxpe_sc_unlock(sc);
2968 return err;
2969 }
2970
2971 /* update queue length (0[sec] - 1[sec]) */
2972 if (time_us < 0 || time_us > (1000 * 1000)) {
2973 mvxpe_rx_unlockq(sc, arg->queue);
2974 mvxpe_sc_unlock(sc);
2975 return EINVAL;
2976 }
2977 time_mvtclk =
2978 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
2979 rx->rx_queue_th_time = time_mvtclk;
2980 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
2981 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
2982 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
2983 mvxpe_rx_unlockq(sc, arg->queue);
2984 mvxpe_sc_unlock(sc);
2985
2986 return 0;
2987 }
2988
2989
2990 STATIC void
2991 sysctl_mvxpe_init(struct mvxpe_softc *sc)
2992 {
2993 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2994 const struct sysctlnode *node;
2995 int mvxpe_nodenum;
2996 int mvxpe_mibnum;
2997 int mvxpe_rxqueuenum;
2998 int mvxpe_txqueuenum;
2999 int q, i;
3000
3001 /* hw.mvxpe.mvxpe[unit] */
3002 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3003 0, CTLTYPE_NODE, ifp->if_xname,
3004 SYSCTL_DESCR("mvxpe per-controller controls"),
3005 NULL, 0, NULL, 0,
3006 CTL_HW, mvxpe_root_num, CTL_CREATE,
3007 CTL_EOL) != 0) {
3008 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3009 return;
3010 }
3011 mvxpe_nodenum = node->sysctl_num;
3012
3013 /* hw.mvxpe.mvxpe[unit].mib */
3014 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3015 0, CTLTYPE_NODE, "mib",
3016 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3017 NULL, 0, NULL, 0,
3018 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3019 CTL_EOL) != 0) {
3020 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3021 return;
3022 }
3023 mvxpe_mibnum = node->sysctl_num;
3024
3025 /* hw.mvxpe.mvxpe[unit].rx */
3026 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3027 0, CTLTYPE_NODE, "rx",
3028 SYSCTL_DESCR("Rx Queues"),
3029 NULL, 0, NULL, 0,
3030 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3031 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3032 return;
3033 }
3034 mvxpe_rxqueuenum = node->sysctl_num;
3035
3036 /* hw.mvxpe.mvxpe[unit].tx */
3037 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3038 0, CTLTYPE_NODE, "tx",
3039 SYSCTL_DESCR("Tx Queues"),
3040 NULL, 0, NULL, 0,
3041 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3042 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3043 return;
3044 }
3045 mvxpe_txqueuenum = node->sysctl_num;
3046
3047 #ifdef MVXPE_DEBUG
3048 /* hw.mvxpe.debug */
3049 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3050 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3051 SYSCTL_DESCR("mvgbe device driver debug control"),
3052 NULL, 0, &mvxpe_debug, 0,
3053 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3054 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3055 return;
3056 }
3057 #endif
3058 /*
3059 * MIB access
3060 */
3061 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3062 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3063 const char *name = mvxpe_mib_list[i].sysctl_name;
3064 const char *desc = mvxpe_mib_list[i].desc;
3065 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3066
3067 mib_arg->sc = sc;
3068 mib_arg->index = i;
3069 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3070 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3071 sysctl_read_mib, 0, (void *)mib_arg, 0,
3072 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3073 CTL_CREATE, CTL_EOL) != 0) {
3074 aprint_normal_dev(sc->sc_dev,
3075 "couldn't create sysctl node\n");
3076 break;
3077 }
3078 }
3079
3080 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3081 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3082 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3083 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3084 static const char *sysctl_queue_names[] = {
3085 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3086 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3087 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3088 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3089 };
3090 #undef MVXPE_SYSCTL_NAME
3091 #ifdef SYSCTL_INCLUDE_DESCR
3092 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3093 static const char *sysctl_queue_descrs[] = {
3094 MVXPE_SYSCTL_DESC(0), MVXPE_SYSCTL_DESC(1),
3095 MVXPE_SYSCTL_DESC(2), MVXPE_SYSCTL_DESC(3),
3096 MVXPE_SYSCTL_DESC(4), MVXPE_SYSCTL_DESC(5),
3097 MVXPE_SYSCTL_DESC(6), MVXPE_SYSCTL_DESC(7),
3098 };
3099 #undef MVXPE_SYSCTL_DESCR
3100 #endif /* SYSCTL_INCLUDE_DESCR */
3101 int mvxpe_curnum;
3102
3103 rxarg->sc = txarg->sc = sc;
3104 rxarg->queue = txarg->queue = q;
3105 rxarg->rxtx = MVXPE_SYSCTL_RX;
3106 txarg->rxtx = MVXPE_SYSCTL_TX;
3107
3108 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3109 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3110 0, CTLTYPE_NODE,
3111 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3112 NULL, 0, NULL, 0,
3113 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3114 CTL_CREATE, CTL_EOL) != 0) {
3115 aprint_normal_dev(sc->sc_dev,
3116 "couldn't create sysctl node\n");
3117 break;
3118 }
3119 mvxpe_curnum = node->sysctl_num;
3120
3121 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3122 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3123 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3124 SYSCTL_DESCR("maximum length of the queue"),
3125 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3126 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3127 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3128 aprint_normal_dev(sc->sc_dev,
3129 "couldn't create sysctl node\n");
3130 break;
3131 }
3132
3133 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3134 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3135 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3136 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3137 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3138 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3139 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3140 aprint_normal_dev(sc->sc_dev,
3141 "couldn't create sysctl node\n");
3142 break;
3143 }
3144
3145 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3146 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3147 0, CTLTYPE_NODE,
3148 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3149 NULL, 0, NULL, 0,
3150 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3151 CTL_CREATE, CTL_EOL) != 0) {
3152 aprint_normal_dev(sc->sc_dev,
3153 "couldn't create sysctl node\n");
3154 break;
3155 }
3156 mvxpe_curnum = node->sysctl_num;
3157
3158 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3159 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3160 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3161 SYSCTL_DESCR("maximum length of the queue"),
3162 sysctl_set_queue_length, 0, (void *)txarg, 0,
3163 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3164 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3165 aprint_normal_dev(sc->sc_dev,
3166 "couldn't create sysctl node\n");
3167 break;
3168 }
3169 }
3170
3171 /* hw.mvxpe.mvxpe[unit].clear_mib */
3172 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3173 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3174 SYSCTL_DESCR("mvgbe device driver debug control"),
3175 sysctl_clear_mib, 0, (void *)sc, 0,
3176 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3177 CTL_EOL) != 0) {
3178 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3179 return;
3180 }
3181
3182 }
3183
3184 /*
3185 * MIB
3186 */
3187 STATIC void
3188 mvxpe_clear_mib(struct mvxpe_softc *sc)
3189 {
3190 int i;
3191
3192 KASSERT_SC_MTX(sc);
3193
3194 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3195 if (mvxpe_mib_list[i].reg64)
3196 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3197 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3198 sc->sc_sysctl_mib[i].counter = 0;
3199 }
3200 }
3201
3202 STATIC void
3203 mvxpe_update_mib(struct mvxpe_softc *sc)
3204 {
3205 int i;
3206
3207 KASSERT_SC_MTX(sc);
3208
3209 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3210 uint32_t val_hi;
3211 uint32_t val_lo;
3212
3213 if (mvxpe_mib_list[i].reg64) {
3214 /* XXX: implement bus_space_read_8() */
3215 val_lo = MVXPE_READ_MIB(sc,
3216 (mvxpe_mib_list[i].regnum + 4));
3217 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3218 }
3219 else {
3220 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3221 val_hi = 0;
3222 }
3223
3224 if ((val_lo | val_hi) == 0)
3225 continue;
3226
3227 sc->sc_sysctl_mib[i].counter +=
3228 ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3229 }
3230 }
3231
3232 /*
3233 * for Debug
3234 */
3235 STATIC void
3236 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3237 {
3238 #define DESC_PRINT(X) \
3239 if (X) \
3240 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3241
3242 DESC_PRINT(desc->command);
3243 DESC_PRINT(desc->l4ichk);
3244 DESC_PRINT(desc->bytecnt);
3245 DESC_PRINT(desc->bufptr);
3246 DESC_PRINT(desc->flags);
3247 #undef DESC_PRINT
3248 }
3249
3250 STATIC void
3251 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3252 {
3253 #define DESC_PRINT(X) \
3254 if (X) \
3255 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3256
3257 DESC_PRINT(desc->status);
3258 DESC_PRINT(desc->bytecnt);
3259 DESC_PRINT(desc->bufptr);
3260 DESC_PRINT(desc->l4chk);
3261 #undef DESC_PRINT
3262 }
3263