if_mvxpe.c revision 1.1 1 /* $NetBSD: if_mvxpe.c,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/evcnt.h>
39 #include <sys/kernel.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
45 #include <sys/rndsource.h>
46
47 #include <net/if.h>
48 #include <net/if_ether.h>
49 #include <net/if_media.h>
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/ip.h>
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/marvell/marvellreg.h>
60 #include <dev/marvell/marvellvar.h>
61 #include <dev/marvell/if_mvxpereg.h>
62 #include <dev/marvell/if_mvxpevar.h>
63
64 #include "locators.h"
65
66 #if BYTE_ORDER == BIG_ENDIAN
67 #error "BIG ENDIAN not supported"
68 #endif
69
70 #ifdef MVXPE_DEBUG
71 #define STATIC /* nothing */
72 #else
73 #define STATIC static
74 #endif
75
76 /* autoconf(9) */
77 STATIC int mvxpe_match(device_t, struct cfdata *, void *);
78 STATIC void mvxpe_attach(device_t, device_t, void *);
79 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
80 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
81 mvxpe_match, mvxpe_attach, NULL, NULL);
82 STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
83 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
84
85 /* MII */
86 STATIC int mvxpe_miibus_readreg(device_t, int, int);
87 STATIC void mvxpe_miibus_writereg(device_t, int, int, int);
88 STATIC void mvxpe_miibus_statchg(struct ifnet *);
89
90 /* Addres Decoding Window */
91 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
92
93 /* Device Register Initialization */
94 STATIC int mvxpe_initreg(struct ifnet *);
95
96 /* Descriptor Ring Control for each of queues */
97 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
98 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
99 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
100 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
101 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
102 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
103 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
104
105 /* Rx/Tx Queue Control */
106 STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
107 STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
108 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
109 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
110 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
111 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
112 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
113 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
114
115 /* Interrupt Handlers */
116 STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
117 STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
118 STATIC int mvxpe_rxtxth_intr(void *);
119 STATIC int mvxpe_misc_intr(void *);
120 STATIC int mvxpe_rxtx_intr(void *);
121 STATIC void mvxpe_tick(void *);
122
123 /* struct ifnet and mii callbacks*/
124 STATIC void mvxpe_start(struct ifnet *);
125 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
126 STATIC int mvxpe_init(struct ifnet *);
127 STATIC void mvxpe_stop(struct ifnet *, int);
128 STATIC void mvxpe_watchdog(struct ifnet *);
129 STATIC int mvxpe_ifflags_cb(struct ethercom *);
130 STATIC int mvxpe_mediachange(struct ifnet *);
131 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
132
133 /* Link State Notify */
134 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
135 STATIC void mvxpe_linkup(struct mvxpe_softc *);
136 STATIC void mvxpe_linkdown(struct mvxpe_softc *);
137 STATIC void mvxpe_linkreset(struct mvxpe_softc *);
138
139 /* Packet Buffer Manager(BM) */
140 STATIC int mvxpe_bm_init(struct mvxpe_softc *);
141 STATIC int mvxpe_bm_init_mbuf_hdr(struct mvxpe_bm_chunk *);
142 STATIC struct mvxpe_bm_chunk *mvxpe_bm_alloc(struct mvxpe_softc *);
143 STATIC void mvxpe_bm_free_mbuf(struct mbuf *, void *, size_t, void *);
144 STATIC void mvxpe_bm_free_chunk(struct mvxpe_bm_chunk *);
145 STATIC void mvxpe_bm_sync(struct mvxpe_bm_chunk *, size_t, int);
146 STATIC void mvxpe_bm_lock(struct mvxpe_softc *);
147 STATIC void mvxpe_bm_unlock(struct mvxpe_softc *);
148
149 /* Tx Subroutines */
150 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
151 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
152 STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
153 struct mvxpe_tx_desc *, struct mbuf *);
154 STATIC void mvxpe_tx_complete(struct mvxpe_softc *);
155 STATIC void mvxpe_tx_queue_del(struct mvxpe_softc *, int);
156
157 /* Rx Subroutines */
158 STATIC void mvxpe_rx(struct mvxpe_softc *);
159 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
160 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, int *);
161 STATIC void mvxpe_rx_reload(struct mvxpe_softc *);
162 STATIC void mvxpe_rx_queue_reload(struct mvxpe_softc *, int);
163 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
164 STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
165 struct mvxpe_rx_desc *, struct mbuf *);
166
167 /* MAC address filter */
168 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
169 STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
170
171 /* sysctl(9) */
172 STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
173 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
174 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
175 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
176 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
177
178 /* MIB */
179 STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
180 STATIC void mvxpe_update_mib(struct mvxpe_softc *);
181
182 /* for Debug */
183 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
184 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
185
186 STATIC int mvxpe_root_num;
187 STATIC kmutex_t mii_mutex;
188 STATIC int mii_init = 0;
189 #ifdef MVXPE_DEBUG
190 STATIC int mvxpe_debug = MVXPE_DEBUG;
191 #endif
192
193 /*
194 * List of MIB register and names
195 */
196 STATIC struct mvxpe_mib_def {
197 uint32_t regnum;
198 int reg64;
199 const char *sysctl_name;
200 const char *desc;
201 } mvxpe_mib_list[] = {
202 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct",
203 "Good Octets Rx"},
204 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct",
205 "Bad Octets Rx"},
206 {MVXPE_MIB_RX_MAC_TRNS_ERR, 0, "rx_mac_err",
207 "MAC Transmit Error"},
208 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame",
209 "Good Frames Rx"},
210 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame",
211 "Bad Frames Rx"},
212 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame",
213 "Broadcast Frames Rx"},
214 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame",
215 "Multicast Frames Rx"},
216 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64",
217 "Frame Size 1 - 64"},
218 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127",
219 "Frame Size 65 - 127"},
220 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255",
221 "Frame Size 128 - 255"},
222 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511",
223 "Frame Size 256 - 511"},
224 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023",
225 "Frame Size 512 - 1023"},
226 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max",
227 "Frame Size 1024 - Max"},
228 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct",
229 "Good Octets Tx"},
230 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame",
231 "Good Frames Tx"},
232 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision",
233 "Excessive Collision"},
234 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame",
235 "Multicast Frames Tx"},
236 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame",
237 "Broadcast Frames Tx"},
238 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err",
239 "Unknown MAC Control"},
240 {MVXPE_MIB_FC_SENT, 0, "fc_tx",
241 "Flow Control Tx"},
242 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good",
243 "Good Flow Control Rx"},
244 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad",
245 "Bad Flow Control Rx"},
246 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize",
247 "Undersized Packets Rx"},
248 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment",
249 "Fragmented Packets Rx"},
250 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize",
251 "Oversized Packets Rx"},
252 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber",
253 "Jabber Packets Rx"},
254 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err",
255 "MAC Rx Errors"},
256 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err",
257 "MAC CRC Errors"},
258 {MVXPE_MIB_MAC_COL, 0, "mac_collision",
259 "MAC Collision"},
260 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision",
261 "MAC Late Collision"},
262 };
263
264 /*
265 * autoconf(9)
266 */
267 /* ARGSUSED */
268 STATIC int
269 mvxpe_match(device_t parent, cfdata_t match, void *aux)
270 {
271 struct marvell_attach_args *mva = aux;
272 bus_size_t pv_off;
273 uint32_t pv;
274
275 if (strcmp(mva->mva_name, match->cf_name) != 0)
276 return 0;
277 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
278 return 0;
279
280 /* check port version */
281 pv_off = mva->mva_offset + MVXPE_PV;
282 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
283 if (MVXPE_PV_GET_VERSION(pv) < 0x10)
284 return 0; /* old version is not supported */
285
286 return 1;
287 }
288
289 /* ARGSUSED */
290 STATIC void
291 mvxpe_attach(device_t parent, device_t self, void *aux)
292 {
293 struct mvxpe_softc *sc = device_private(self);
294 struct mii_softc *mii;
295 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
296 struct marvell_attach_args *mva = aux;
297 prop_dictionary_t dict;
298 prop_data_t enaddrp = NULL;
299 uint32_t phyaddr, maddrh, maddrl;
300 uint8_t enaddr[ETHER_ADDR_LEN];
301 int q;
302
303 aprint_naive("\n");
304 aprint_normal(": Marvell ARMADA GbE Controller\n");
305 memset(sc, 0, sizeof(*sc));
306 sc->sc_dev = self;
307 sc->sc_port = mva->mva_unit;
308 sc->sc_iot = mva->mva_iot;
309 sc->sc_dmat = mva->mva_dmat;
310 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
311 callout_init(&sc->sc_tick_ch, 0);
312 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
313
314 /*
315 * BUS space
316 */
317 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
318 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
319 aprint_error_dev(self, "Cannot map registers\n");
320 goto fail;
321 }
322 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
323 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
324 &sc->sc_mibh)) {
325 aprint_error_dev(self,
326 "Cannot map destination address filter registers\n");
327 goto fail;
328 }
329 sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
330 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
331
332 /*
333 * Software based Buffer Manager(BM) subsystem.
334 * Try to allocate special memory chunks for Rx packets.
335 * Some version of SoC has hardware based BM(not supported yet)
336 */
337 if (mvxpe_bm_init(sc) != 0) {
338 aprint_error_dev(self, "BM pool allocation failure\n");
339 goto fail;
340 }
341
342 /*
343 * make sure DMA engines are in reset state
344 */
345 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
346 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
347
348 /*
349 * Address decoding window
350 */
351 mvxpe_wininit(sc, mva->mva_tags);
352
353 /*
354 * MAC address
355 */
356 dict = device_properties(self);
357 if (dict)
358 enaddrp = prop_dictionary_get(dict, "mac-address");
359 if (enaddrp) {
360 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
361 maddrh = enaddr[0] << 24;
362 maddrh |= enaddr[1] << 16;
363 maddrh |= enaddr[2] << 8;
364 maddrh |= enaddr[3];
365 maddrl = enaddr[4] << 8;
366 maddrl |= enaddr[5];
367 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
368 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
369 }
370 else {
371 /*
372 * even if enaddr is not found in dictionary,
373 * the port may be initialized by IPL program such as U-BOOT.
374 */
375 maddrh = MVXPE_READ(sc, MVXPE_MACAH);
376 maddrl = MVXPE_READ(sc, MVXPE_MACAL);
377 if ((maddrh | maddrl) == 0) {
378 aprint_error_dev(self, "No Ethernet address\n");
379 return;
380 }
381 }
382 sc->sc_enaddr[0] = maddrh >> 24;
383 sc->sc_enaddr[1] = maddrh >> 16;
384 sc->sc_enaddr[2] = maddrh >> 8;
385 sc->sc_enaddr[3] = maddrh >> 0;
386 sc->sc_enaddr[4] = maddrl >> 8;
387 sc->sc_enaddr[5] = maddrl >> 0;
388 aprint_normal_dev(self, "Ethernet address %s\n",
389 ether_sprintf(sc->sc_enaddr));
390
391 /*
392 * Register interrupt handlers
393 * XXX: handle Ethernet unit intr. and Error intr.
394 */
395 mvxpe_disable_intr(sc);
396 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
397
398 /*
399 * MIB buffer allocation
400 */
401 sc->sc_sysctl_mib_size =
402 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
403 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
404 if (sc->sc_sysctl_mib == NULL)
405 goto fail;
406 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
407
408 /*
409 * Device DMA Buffer allocation
410 */
411 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
412 if (mvxpe_ring_alloc_queue(sc, q) != 0)
413 goto fail;
414 mvxpe_ring_init_queue(sc, q);
415 }
416
417 /*
418 * We can support 802.1Q VLAN-sized frames and jumbo
419 * Ethernet frames.
420 */
421 sc->sc_ethercom.ec_capabilities |=
422 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
423 ifp->if_softc = sc;
424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
425 ifp->if_start = mvxpe_start;
426 ifp->if_ioctl = mvxpe_ioctl;
427 ifp->if_init = mvxpe_init;
428 ifp->if_stop = mvxpe_stop;
429 ifp->if_watchdog = mvxpe_watchdog;
430
431 /*
432 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
433 */
434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
435 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
437 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
439 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
441 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
443 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
444
445 /*
446 * Initialize struct ifnet
447 */
448 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
449 IFQ_SET_READY(&ifp->if_snd);
450 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
451
452 /*
453 * Enable DMA engines and Initiazlie Device Regisers.
454 */
455 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
456 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
457 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
458 mvxpe_sc_lock(sc); /* XXX */
459 mvxpe_filter_setup(sc);
460 mvxpe_sc_unlock(sc);
461 mvxpe_initreg(ifp);
462
463 /*
464 * Now MAC is working, setup MII.
465 */
466 if (mii_init == 0) {
467 /*
468 * MII bus is shared by all MACs and all PHYs in SoC.
469 * serializing the bus access should be safe.
470 */
471 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
472 mii_init = 1;
473 }
474 sc->sc_mii.mii_ifp = ifp;
475 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg;
476 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg;
477 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg;
478
479 sc->sc_ethercom.ec_mii = &sc->sc_mii;
480 ifmedia_init(&sc->sc_mii.mii_media, 0,
481 mvxpe_mediachange, mvxpe_mediastatus);
482 /*
483 * XXX: phy addressing highly depends on Board Design.
484 * we assume phyaddress == MAC unit number here,
485 * but some boards may not.
486 */
487 mii_attach(self, &sc->sc_mii, 0xffffffff,
488 MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
489 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
490 if (mii == NULL) {
491 aprint_error_dev(self, "no PHY found!\n");
492 ifmedia_add(&sc->sc_mii.mii_media,
493 IFM_ETHER|IFM_MANUAL, 0, NULL);
494 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
495 } else {
496 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
497 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
498 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
499 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
500 }
501
502 /*
503 * Call MI attach routines.
504 */
505 if_attach(ifp);
506
507 ether_ifattach(ifp, sc->sc_enaddr);
508 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
509
510 sysctl_mvxpe_init(sc);
511 mvxpe_evcnt_attach(sc);
512 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
513 RND_TYPE_NET, RND_FLAG_DEFAULT);
514
515 return;
516
517 fail:
518 for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
519 mvxpe_ring_dealloc_queue(sc, q);
520 if (sc->sc_sysctl_mib)
521 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
522
523 return;
524 }
525
526 STATIC int
527 mvxpe_evcnt_attach(struct mvxpe_softc *sc)
528 {
529 int q;
530
531 #ifdef MVXPE_EVENT_COUNTERS
532 /* Master Interrupt Handler */
533 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
534 NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
535 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
536 NULL, device_xname(sc->sc_dev), "RxTx Intr.");
537 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
538 NULL, device_xname(sc->sc_dev), "MISC Intr.");
539
540 /* RXTXTH Interrupt */
541 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
542 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
543
544 /* MISC Interrupt */
545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
546 NULL, device_xname(sc->sc_dev), "MISC phy status changed");
547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
548 NULL, device_xname(sc->sc_dev), "MISC link status changed");
549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
550 NULL, device_xname(sc->sc_dev), "MISC internal address error");
551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
552 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
554 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
556 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
557 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
558 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
559 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
560 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
561 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
562 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
563 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
564 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
565
566 /* RxTx Interrupt */
567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
568 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
570 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet");
571 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
572 NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
573 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
574 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
575 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
576 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
577 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
578 NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
579
580 /* Link */
581 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
582 NULL, device_xname(sc->sc_dev), "link up");
583 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
584 NULL, device_xname(sc->sc_dev), "link down");
585
586 /* Rx Descriptor */
587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
588 NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
589 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
590 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
591 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
592 NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
593 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
594 NULL, device_xname(sc->sc_dev), "Rx resource error counter");
595 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
596 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
597
598 /* Tx Descriptor */
599 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
600 NULL, device_xname(sc->sc_dev), "Tx late collision counter");
601 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
602 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
603 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
604 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
605 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
606 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
607
608 /* Status Registers */
609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
610 NULL, device_xname(sc->sc_dev), "Rx discard counter");
611 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
612 NULL, device_xname(sc->sc_dev), "Rx overrun counter");
613 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
614 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
615 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
616 NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
617 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
618 NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
619
620 /* Device Driver Errors */
621 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
622 NULL, device_xname(sc->sc_dev), "watchdog timer expired");
623 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
624 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
625 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
626 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
627 static const char *rxq_desc[] = {
628 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
629 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
630 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
631 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
632 };
633 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
634 NULL, device_xname(sc->sc_dev), rxq_desc[q]);
635 }
636 #undef MVXPE_QUEUE_DESC
637 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
638 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
639 static const char *txq_desc[] = {
640 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
641 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
642 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
643 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
644 };
645 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
646 NULL, device_xname(sc->sc_dev), txq_desc[q]);
647 }
648 #undef MVXPE_QUEUE_DESC
649 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
650 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
651 static const char *rxqe_desc[] = {
652 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
653 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
654 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
655 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
656 };
657 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
658 NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
659 }
660 #undef MVXPE_QUEUE_DESC
661 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
662 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
663 static const char *txqe_desc[] = {
664 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
665 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
666 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
667 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
668 };
669 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
670 NULL, device_xname(sc->sc_dev), txqe_desc[q]);
671 }
672 #undef MVXPE_QUEUE_DESC
673
674 #endif /* MVXPE_EVENT_COUNTERS */
675 return 0;
676 }
677
678 STATIC void
679 mvxpe_sc_lock(struct mvxpe_softc *sc)
680 {
681 mutex_enter(&sc->sc_mtx);
682 }
683
684 STATIC void
685 mvxpe_sc_unlock(struct mvxpe_softc *sc)
686 {
687 mutex_exit(&sc->sc_mtx);
688 }
689
690 /*
691 * MII
692 */
693 STATIC int
694 mvxpe_miibus_readreg(device_t dev, int phy, int reg)
695 {
696 struct mvxpe_softc *sc = device_private(dev);
697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
698 uint32_t smi, val;
699 int i;
700
701 mutex_enter(&mii_mutex);
702
703 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
704 DELAY(1);
705 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
706 break;
707 }
708 if (i == MVXPE_PHY_TIMEOUT) {
709 aprint_error_ifnet(ifp, "SMI busy timeout\n");
710 mutex_exit(&mii_mutex);
711 return -1;
712 }
713
714 smi =
715 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
716 MVXPE_WRITE(sc, MVXPE_SMI, smi);
717
718 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
719 DELAY(1);
720 smi = MVXPE_READ(sc, MVXPE_SMI);
721 if (smi & MVXPE_SMI_READVALID)
722 break;
723 }
724
725 mutex_exit(&mii_mutex);
726
727 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
728
729 val = smi & MVXPE_SMI_DATA_MASK;
730
731 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#x\n", phy, reg, val);
732
733 return val;
734 }
735
736 STATIC void
737 mvxpe_miibus_writereg(device_t dev, int phy, int reg, int val)
738 {
739 struct mvxpe_softc *sc = device_private(dev);
740 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
741 uint32_t smi;
742 int i;
743
744 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#x\n", phy, reg, val);
745
746 mutex_enter(&mii_mutex);
747
748 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
749 DELAY(1);
750 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
751 break;
752 }
753 if (i == MVXPE_PHY_TIMEOUT) {
754 aprint_error_ifnet(ifp, "SMI busy timeout\n");
755 mutex_exit(&mii_mutex);
756 return;
757 }
758
759 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
760 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
761 MVXPE_WRITE(sc, MVXPE_SMI, smi);
762
763 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
764 DELAY(1);
765 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
766 break;
767 }
768
769 mutex_exit(&mii_mutex);
770
771 if (i == MVXPE_PHY_TIMEOUT)
772 aprint_error_ifnet(ifp, "phy write timed out\n");
773 }
774
775 STATIC void
776 mvxpe_miibus_statchg(struct ifnet *ifp)
777 {
778
779 /* nothing to do */
780 }
781
782 /*
783 * Address Decoding Window
784 */
785 STATIC void
786 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
787 {
788 device_t pdev = device_parent(sc->sc_dev);
789 uint64_t base;
790 uint32_t en, ac, size;
791 int window, target, attr, rv, i;
792
793 /* First disable all address decode windows */
794 en = MVXPE_BARE_EN_MASK;
795 MVXPE_WRITE(sc, MVXPE_BARE, en);
796
797 ac = 0;
798 for (window = 0, i = 0;
799 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
800 rv = marvell_winparams_by_tag(pdev, tags[i],
801 &target, &attr, &base, &size);
802 if (rv != 0 || size == 0)
803 continue;
804
805 if (base > 0xffffffffULL) {
806 if (window >= MVXPE_NREMAP) {
807 aprint_error_dev(sc->sc_dev,
808 "can't remap window %d\n", window);
809 continue;
810 }
811 MVXPE_WRITE(sc, MVXPE_HA(window),
812 (base >> 32) & 0xffffffff);
813 }
814
815 MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
816 MVXPE_BASEADDR_TARGET(target) |
817 MVXPE_BASEADDR_ATTR(attr) |
818 MVXPE_BASEADDR_BASE(base));
819 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
820
821 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
822 window, base, size);
823
824 en &= ~(1 << window);
825 /* set full access (r/w) */
826 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
827 window++;
828 }
829 /* allow to access decode window */
830 MVXPE_WRITE(sc, MVXPE_EPAP, ac);
831
832 MVXPE_WRITE(sc, MVXPE_BARE, en);
833 }
834
835 /*
836 * Device Register Initialization
837 * reset device registers to device driver default value.
838 * the device is not enabled here.
839 */
840 STATIC int
841 mvxpe_initreg(struct ifnet *ifp)
842 {
843 struct mvxpe_softc *sc = ifp->if_softc;
844 int serdes = 0;
845 uint32_t reg;
846 int q, i;
847
848 DPRINTIFNET(ifp, 1, "initializing device register\n");
849
850 /* Init TX/RX Queue Registers */
851 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
852 mvxpe_rx_lockq(sc, q);
853 if (mvxpe_rx_queue_init(ifp, q) != 0) {
854 aprint_error_ifnet(ifp,
855 "initialization failed: cannot initialize queue\n");
856 mvxpe_rx_unlockq(sc, q);
857 mvxpe_tx_unlockq(sc, q);
858 return ENOBUFS;
859 }
860 mvxpe_rx_unlockq(sc, q);
861
862 mvxpe_tx_lockq(sc, q);
863 if (mvxpe_tx_queue_init(ifp, q) != 0) {
864 aprint_error_ifnet(ifp,
865 "initialization failed: cannot initialize queue\n");
866 mvxpe_rx_unlockq(sc, q);
867 mvxpe_tx_unlockq(sc, q);
868 return ENOBUFS;
869 }
870 mvxpe_tx_unlockq(sc, q);
871 }
872
873 /* Tx MTU Limit */
874 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
875
876 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
877 reg = MVXPE_READ(sc, MVXPE_PMACC0);
878 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
879 serdes = 1;
880
881 /* Ethernet Unit Control */
882 reg = MVXPE_READ(sc, MVXPE_EUC);
883 reg |= MVXPE_EUC_POLLING;
884 MVXPE_WRITE(sc, MVXPE_EUC, reg);
885
886 /* Auto Negotiation */
887 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */
888 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
889 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */
890 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */
891 if (serdes) {
892 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
893 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
894 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
895 }
896 MVXPE_WRITE(sc, MVXPE_PANC, reg);
897
898 /* EEE: Low Power Idle */
899 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
900 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
901 MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
902
903 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
904 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
905
906 reg = MVXPE_LPIC2_MUSTSET;
907 MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
908
909 /* Port MAC Control set 0 */
910 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */
911 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */
912 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
913 if (serdes)
914 reg |= MVXPE_PMACC0_PORTTYPE;
915 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
916
917 /* Port MAC Control set 1 is only used for loop-back test */
918
919 /* Port MAC Control set 2 */
920 reg = MVXPE_READ(sc, MVXPE_PMACC2);
921 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
922 reg |= MVXPE_PMACC2_MUSTSET;
923 MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
924
925 /* Port MAC Control set 3 is used for IPG tune */
926
927 /* Port MAC Control set 4 is not used */
928
929 /* Port Configuration Extended: enable Tx CRC generation */
930 reg = MVXPE_READ(sc, MVXPE_PXCX);
931 reg &= ~MVXPE_PXCX_TXCRCDIS;
932 MVXPE_WRITE(sc, MVXPE_PXCX, reg);
933
934 /* clear MIB counter registers(clear by read) */
935 for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
936 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
937
938 /* Set SDC register except IPGINT bits */
939 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS;
940 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
941 reg |= MVXPE_SDC_BLMR;
942 reg |= MVXPE_SDC_BLMT;
943 MVXPE_WRITE(sc, MVXPE_SDC, reg);
944
945 return 0;
946 }
947
948 /*
949 * Descriptor Ring Controls for each of queues
950 */
951 STATIC void *
952 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
953 {
954 bus_dma_segment_t segs;
955 void *kva = NULL;
956 int nsegs;
957
958 /*
959 * Allocate the descriptor queues.
960 * struct mvxpe_ring_data contians array of descriptor per queue.
961 */
962 if (bus_dmamem_alloc(sc->sc_dmat,
963 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
964 aprint_error_dev(sc->sc_dev,
965 "can't alloc device memory (%zu bytes)\n", size);
966 return NULL;
967 }
968 if (bus_dmamem_map(sc->sc_dmat,
969 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
970 aprint_error_dev(sc->sc_dev,
971 "can't map dma buffers (%zu bytes)\n", size);
972 goto fail1;
973 }
974
975 if (bus_dmamap_create(sc->sc_dmat,
976 size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
977 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
978 goto fail2;
979 }
980 if (bus_dmamap_load(sc->sc_dmat,
981 *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
982 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
983 goto fail3;
984 }
985 memset(kva, 0, size);
986 return kva;
987
988 fail3:
989 bus_dmamap_destroy(sc->sc_dmat, *map);
990 memset(map, 0, sizeof(*map));
991 fail2:
992 bus_dmamem_unmap(sc->sc_dmat, kva, size);
993 fail1:
994 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
995 return NULL;
996 }
997
998 STATIC int
999 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
1000 {
1001 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1002 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1003
1004 /*
1005 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1006 * queue length. real queue length is limited by
1007 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1008 *
1009 * because descriptor ring reallocation needs reprogramming of
1010 * DMA registers, we allocate enough descriptor for hard limit
1011 * of queue length.
1012 */
1013 rx->rx_descriptors =
1014 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1015 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1016 if (rx->rx_descriptors == NULL)
1017 goto fail;
1018
1019 tx->tx_descriptors =
1020 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1021 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1022 if (tx->tx_descriptors == NULL)
1023 goto fail;
1024
1025 return 0;
1026 fail:
1027 mvxpe_ring_dealloc_queue(sc, q);
1028 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1029 return ENOMEM;
1030 }
1031
1032 STATIC void
1033 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1034 {
1035 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1036 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1037 bus_dma_segment_t *segs;
1038 bus_size_t size;
1039 void *kva;
1040 int nsegs;
1041
1042 /* Rx */
1043 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1044 if (kva) {
1045 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1046 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1047 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1048
1049 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1050 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1051 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1052 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1053 }
1054
1055 /* Tx */
1056 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1057 if (kva) {
1058 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1059 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1060 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1061
1062 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1063 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1064 bus_dmamem_unmap(sc->sc_dmat, kva, size);
1065 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1066 }
1067
1068 /* Clear doungling pointers all */
1069 memset(rx, 0, sizeof(*rx));
1070 memset(tx, 0, sizeof(*tx));
1071 }
1072
1073 STATIC void
1074 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1075 {
1076 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1077 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1078 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1079 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1080 static const int rx_default_queue_len[] = {
1081 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1082 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1083 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1084 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1085 };
1086 static const int tx_default_queue_len[] = {
1087 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1088 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1089 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1090 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1091 };
1092 extern uint32_t mvTclk;
1093 int i;
1094
1095 /* Rx handle */
1096 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1097 MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1098 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1099 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1100 }
1101 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1102 rx->rx_dma = rx->rx_cpu = 0;
1103 rx->rx_queue_len = rx_default_queue_len[q];
1104 if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1105 rx->rx_queue_len = MVXPE_RX_RING_CNT;
1106 rx->rx_queue_th_received = rx->rx_queue_len / 4;
1107 rx->rx_queue_th_free = rx->rx_queue_len / 2;
1108 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1109
1110 /* Tx handle */
1111 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1112 MVXPE_TX_DESC(sc, q, i) = &txd[i];
1113 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1114 MVXPE_TX_MBUF(sc, q, i) = NULL;
1115 /* Tx handle needs DMA map for busdma_load_mbuf() */
1116 if (bus_dmamap_create(sc->sc_dmat, sc->sc_bm.bm_chunk_size,
1117 MVXPE_TX_SEGLIMIT, sc->sc_bm.bm_chunk_size, 0,
1118 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
1119 &MVXPE_TX_MAP(sc, q, i))) {
1120 aprint_error_dev(sc->sc_dev,
1121 "can't create dma map (tx ring %d)\n", i);
1122 }
1123 }
1124 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1125 tx->tx_dma = tx->tx_cpu = 0;
1126 tx->tx_queue_len = tx_default_queue_len[q];
1127 if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1128 tx->tx_queue_len = MVXPE_TX_RING_CNT;
1129 tx->tx_free_cnt = tx->tx_queue_len;
1130 tx->tx_queue_th_free = tx->tx_queue_len / 2;
1131 }
1132
1133 STATIC void
1134 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1135 {
1136 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1137 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1138 int i;
1139
1140 KASSERT_RX_MTX(sc, q);
1141 KASSERT_TX_MTX(sc, q);
1142
1143 /* Rx handle */
1144 for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1145 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1146 continue;
1147 mvxpe_bm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1148 MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1149 }
1150 rx->rx_dma = rx->rx_cpu = 0;
1151
1152 /* Tx handle */
1153 for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1154 if (MVXPE_TX_MBUF(sc, q, i) == NULL)
1155 continue;
1156 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1157 m_freem(MVXPE_TX_MBUF(sc, q, i));
1158 MVXPE_TX_MBUF(sc, q, i) = NULL;
1159 }
1160 tx->tx_dma = tx->tx_cpu = 0;
1161 tx->tx_free_cnt = tx->tx_queue_len;
1162 }
1163
1164 STATIC void
1165 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1166 {
1167 int wrap;
1168
1169 KASSERT_RX_MTX(sc, q);
1170 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1171 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1172
1173 wrap = (idx + count) - MVXPE_RX_RING_CNT;
1174 if (wrap > 0) {
1175 count -= wrap;
1176 KASSERT(count > 0);
1177 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1178 0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1179 }
1180 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1181 MVXPE_RX_DESC_OFF(sc, q, idx),
1182 sizeof(struct mvxpe_rx_desc) * count, ops);
1183 }
1184
1185 STATIC void
1186 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1187 {
1188 int wrap = 0;
1189
1190 KASSERT_TX_MTX(sc, q);
1191 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1192 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1193
1194 wrap = (idx + count) - MVXPE_TX_RING_CNT;
1195 if (wrap > 0) {
1196 count -= wrap;
1197 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1198 0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1199 }
1200 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1201 MVXPE_TX_DESC_OFF(sc, q, idx),
1202 sizeof(struct mvxpe_tx_desc) * count, ops);
1203 }
1204
1205 /*
1206 * Rx/Tx Queue Control
1207 */
1208 STATIC int
1209 mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1210 {
1211 struct mvxpe_softc *sc = ifp->if_softc;
1212 uint32_t reg;
1213
1214 KASSERT_RX_MTX(sc, q);
1215 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1216
1217 /* descriptor address */
1218 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1219
1220 /* Rx buffer size and descriptor ring size */
1221 reg = MVXPE_PRXDQS_BUFFERSIZE(sc->sc_bm.bm_chunk_size >> 3);
1222 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1223 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1224 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1225 q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1226
1227 /* Rx packet offset address */
1228 reg = MVXPE_PRXC_PACKETOFFSET(sc->sc_bm.bm_chunk_packet_offset >> 3);
1229 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1230 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1231 q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1232
1233 /* if DMA is not working, register is not updated */
1234 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1235 return 0;
1236 }
1237
1238 STATIC int
1239 mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1240 {
1241 struct mvxpe_softc *sc = ifp->if_softc;
1242 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1243 uint32_t reg;
1244
1245 KASSERT_TX_MTX(sc, q);
1246 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1247
1248 /* descriptor address */
1249 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1250
1251 /* Tx threshold, and descriptor ring size */
1252 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1253 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1254 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1255 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1256 q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1257
1258 /* if DMA is not working, register is not updated */
1259 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1260 return 0;
1261 }
1262
1263 STATIC int
1264 mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1265 {
1266 struct mvxpe_softc *sc = ifp->if_softc;
1267 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1268 uint32_t reg;
1269
1270 KASSERT_RX_MTX(sc, q);
1271
1272 /* Set Rx interrupt threshold */
1273 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1274 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1275 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1276
1277 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1278 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1279
1280 /* Unmask RXTX Intr. */
1281 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1282 reg |= MVXPE_PRXTXI_RREQ(q); /* Rx resource error */
1283 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1284
1285 /* Unmask RXTX_TH Intr. */
1286 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1287 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1288 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1289 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1290
1291 /* Enable Rx queue */
1292 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1293 reg |= MVXPE_RQC_ENQ(q);
1294 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1295
1296 return 0;
1297 }
1298
1299 STATIC int
1300 mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1301 {
1302 struct mvxpe_softc *sc = ifp->if_softc;
1303 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1304 uint32_t reg;
1305
1306 KASSERT_TX_MTX(sc, q);
1307
1308 /* Set Tx interrupt threshold */
1309 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1310 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1311 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1312 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1313
1314 /* Unmask RXTX_TH Intr. */
1315 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1316 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1317 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1318
1319 /* Don't update MVXPE_TQC here, there is no packet yet. */
1320 return 0;
1321 }
1322
1323 STATIC void
1324 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1325 {
1326 KASSERT(q >= 0);
1327 KASSERT(q < MVXPE_QUEUE_SIZE);
1328 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1329 }
1330
1331 STATIC void
1332 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1333 {
1334 KASSERT(q >= 0);
1335 KASSERT(q < MVXPE_QUEUE_SIZE);
1336 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1337 }
1338
1339 STATIC void
1340 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1341 {
1342 KASSERT(q >= 0);
1343 KASSERT(q < MVXPE_QUEUE_SIZE);
1344 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1345 }
1346
1347 STATIC void
1348 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1349 {
1350 KASSERT(q >= 0);
1351 KASSERT(q < MVXPE_QUEUE_SIZE);
1352 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1353 }
1354
1355 /*
1356 * Interrupt Handlers
1357 */
1358 STATIC void
1359 mvxpe_disable_intr(struct mvxpe_softc *sc)
1360 {
1361 MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1362 MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1363 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1364 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1365 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1366 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1367 MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1368 MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1369 MVXPE_WRITE(sc, MVXPE_PIE, 0);
1370 }
1371
1372 STATIC void
1373 mvxpe_enable_intr(struct mvxpe_softc *sc)
1374 {
1375 uint32_t reg;
1376
1377 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1378 reg = MVXPE_READ(sc, MVXPE_PMIM);
1379 reg |= MVXPE_PMI_PHYSTATUSCHNG;
1380 reg |= MVXPE_PMI_LINKCHANGE;
1381 reg |= MVXPE_PMI_IAE;
1382 reg |= MVXPE_PMI_RXOVERRUN;
1383 reg |= MVXPE_PMI_RXCRCERROR;
1384 reg |= MVXPE_PMI_RXLARGEPACKET;
1385 reg |= MVXPE_PMI_TXUNDRN;
1386 reg |= MVXPE_PMI_PRBSERROR;
1387 reg |= MVXPE_PMI_SRSE;
1388 reg |= MVXPE_PMI_TREQ_MASK;
1389 MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1390
1391 /* Enable RXTX Intr. (via RXTX_TH Summary bit) */
1392 reg = MVXPE_READ(sc, MVXPE_PRXTXIM);
1393 reg |= MVXPE_PRXTXI_RREQ_MASK; /* Rx resource error */
1394 MVXPE_WRITE(sc, MVXPE_PRXTXIM, reg);
1395
1396 /* Enable Summary Bit to check all interrupt cause. */
1397 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1398 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1399 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1400 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1401 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1402
1403 /* Enable All Queue Interrupt */
1404 reg = MVXPE_READ(sc, MVXPE_PIE);
1405 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1406 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1407 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1408 }
1409
1410 STATIC int
1411 mvxpe_rxtxth_intr(void *arg)
1412 {
1413 struct mvxpe_softc *sc = arg;
1414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1415 uint32_t ic, datum = 0;
1416 int claimed = 0;
1417
1418
1419 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1420 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1421
1422 mvxpe_sc_lock(sc);
1423 for (;;) {
1424 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1425 if (ic == 0)
1426 break;
1427 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1428 datum = datum ^ ic;
1429 claimed = 1;
1430
1431 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1432
1433 /* route maintance interrupt first */
1434 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1435 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1436 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1437 }
1438 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1439 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1440 mvxpe_misc_intr(sc);
1441 }
1442 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1443 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1444 mvxpe_rxtx_intr(sc);
1445 }
1446 if (!(ifp->if_flags & IFF_RUNNING))
1447 break;
1448
1449 /* RxTx interrupt */
1450 if (ic & (MVXPE_PRXTXTI_RBICTAPQ_MASK)) {
1451 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1452 mvxpe_rx(sc);
1453 }
1454
1455 if (ic & MVXPE_PRXTXTI_TBTCQ_MASK) {
1456 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1457 mvxpe_tx_complete(sc);
1458 }
1459
1460 if (ic & MVXPE_PRXTXTI_RDTAQ_MASK) {
1461 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1462 mvxpe_rx_reload(sc);
1463 }
1464
1465 /* don' loop here. we are using interrupt coalescing */
1466 break;
1467 }
1468 mvxpe_sc_unlock(sc);
1469
1470 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1471 mvxpe_start(ifp);
1472
1473 rnd_add_uint32(&sc->sc_rnd_source, datum);
1474
1475 return claimed;
1476 }
1477
1478 STATIC int
1479 mvxpe_misc_intr(void *arg)
1480 {
1481 struct mvxpe_softc *sc = arg;
1482 #ifdef MVXPE_DEBUG
1483 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1484 #endif
1485 uint32_t ic;
1486 uint32_t datum = 0;
1487 int claimed = 0;
1488
1489 DPRINTSC(sc, 2, "got MISC_INTR\n");
1490 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1491
1492 KASSERT_SC_MTX(sc);
1493
1494 for (;;) {
1495 ic = MVXPE_READ(sc, MVXPE_PMIC);
1496 ic &= MVXPE_READ(sc, MVXPE_PMIM);
1497 if (ic == 0)
1498 break;
1499 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1500 datum = datum ^ ic;
1501 claimed = 1;
1502
1503 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1504 if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1505 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1506 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1507 }
1508 if (ic & MVXPE_PMI_LINKCHANGE) {
1509 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1510 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1511 mvxpe_linkupdate(sc);
1512 }
1513 if (ic & MVXPE_PMI_IAE) {
1514 DPRINTIFNET(ifp, 2, "+IAE\n");
1515 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1516 }
1517 if (ic & MVXPE_PMI_RXOVERRUN) {
1518 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1519 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1520 }
1521 if (ic & MVXPE_PMI_RXCRCERROR) {
1522 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1523 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1524 }
1525 if (ic & MVXPE_PMI_RXLARGEPACKET) {
1526 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1527 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1528 }
1529 if (ic & MVXPE_PMI_TXUNDRN) {
1530 DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1531 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1532 }
1533 if (ic & MVXPE_PMI_PRBSERROR) {
1534 DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1535 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1536 }
1537 if (ic & MVXPE_PMI_TREQ_MASK) {
1538 DPRINTIFNET(ifp, 2, "+TREQ\n");
1539 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1540 }
1541 }
1542 if (datum)
1543 rnd_add_uint32(&sc->sc_rnd_source, datum);
1544
1545 return claimed;
1546 }
1547
1548 STATIC int
1549 mvxpe_rxtx_intr(void *arg)
1550 {
1551 struct mvxpe_softc *sc = arg;
1552 #ifdef MVXPE_DEBUG
1553 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1554 #endif
1555 uint32_t datum = 0;
1556 uint32_t prxtxic;
1557 int claimed = 0;
1558
1559 DPRINTSC(sc, 2, "got RXTX_Intr\n");
1560 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1561
1562 KASSERT_SC_MTX(sc);
1563
1564 for (;;) {
1565 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1566 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1567 if (prxtxic == 0)
1568 break;
1569 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1570 datum = datum ^ prxtxic;
1571 claimed = 1;
1572
1573 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1574
1575 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1576 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1577 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1578 }
1579 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1580 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1581 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1582 }
1583 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1584 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1585 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1586 }
1587 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1588 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1589 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1590 }
1591 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1592 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1593 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1594 }
1595 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1596 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1597 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1598 }
1599 }
1600 if (datum)
1601 rnd_add_uint32(&sc->sc_rnd_source, datum);
1602
1603 return claimed;
1604 }
1605
1606 STATIC void
1607 mvxpe_tick(void *arg)
1608 {
1609 struct mvxpe_softc *sc = arg;
1610 struct mii_data *mii = &sc->sc_mii;
1611
1612 mvxpe_sc_lock(sc);
1613
1614 mii_tick(mii);
1615 mii_pollstat(&sc->sc_mii);
1616
1617 /* read mib regisers(clear by read) */
1618 mvxpe_update_mib(sc);
1619
1620 /* read counter registers(clear by read) */
1621 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1622 MVXPE_READ(sc, MVXPE_PDFC));
1623 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1624 MVXPE_READ(sc, MVXPE_POFC));
1625 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1626 MVXPE_READ(sc, MVXPE_TXBADFCS));
1627 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1628 MVXPE_READ(sc, MVXPE_TXDROPPED));
1629 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1630 MVXPE_READ(sc, MVXPE_LPIC));
1631
1632 mvxpe_sc_unlock(sc);
1633
1634 callout_schedule(&sc->sc_tick_ch, hz);
1635 }
1636
1637
1638 /*
1639 * struct ifnet and mii callbacks
1640 */
1641 STATIC void
1642 mvxpe_start(struct ifnet *ifp)
1643 {
1644 struct mvxpe_softc *sc = ifp->if_softc;
1645 struct mbuf *m;
1646 int q;
1647
1648 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1649 DPRINTIFNET(ifp, 1, "not running\n");
1650 return;
1651 }
1652
1653 mvxpe_sc_lock(sc);
1654 if (!MVXPE_IS_LINKUP(sc)) {
1655 /* If Link is DOWN, can't start TX */
1656 DPRINTIFNET(ifp, 1, "link fail\n");
1657 for (;;) {
1658 /*
1659 * discard stale packets all.
1660 * these may confuse DAD, ARP or timer based protocols.
1661 */
1662 IFQ_DEQUEUE(&ifp->if_snd, m);
1663 if (m == NULL)
1664 break;
1665 m_freem(m);
1666 }
1667 mvxpe_sc_unlock(sc);
1668 return;
1669 }
1670 for (;;) {
1671 /*
1672 * don't use IFQ_POLL().
1673 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1674 * on SMP enabled networking stack.
1675 */
1676 IFQ_DEQUEUE(&ifp->if_snd, m);
1677 if (m == NULL)
1678 break;
1679
1680 q = mvxpe_tx_queue_select(sc, m);
1681 if (q < 0)
1682 break;
1683 /* mutex is held in mvxpe_tx_queue_select() */
1684
1685 if (mvxpe_tx_queue(sc, m, q) != 0) {
1686 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1687 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1688 mvxpe_tx_unlockq(sc, q);
1689 break;
1690 }
1691 mvxpe_tx_unlockq(sc, q);
1692 KASSERT(sc->sc_tx_ring[q].tx_free_cnt >= 0);
1693 KASSERT(sc->sc_tx_ring[q].tx_free_cnt <=
1694 sc->sc_tx_ring[q].tx_queue_len);
1695 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1696 sc->sc_tx_pending++;
1697 ifp->if_timer = 1;
1698 sc->sc_wdogsoft = 1;
1699 bpf_mtap(ifp, m);
1700 }
1701 mvxpe_sc_unlock(sc);
1702
1703 return;
1704 }
1705
1706 STATIC int
1707 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1708 {
1709 struct mvxpe_softc *sc = ifp->if_softc;
1710 struct ifreq *ifr = data;
1711 int error = 0;
1712 int s;
1713
1714 switch (cmd) {
1715 case SIOCGIFMEDIA:
1716 case SIOCSIFMEDIA:
1717 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n");
1718 s = splnet(); /* XXX: is there suitable mutex? */
1719 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1720 splx(s);
1721 break;
1722 default:
1723 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1724 error = ether_ioctl(ifp, cmd, data);
1725 if (error == ENETRESET) {
1726 if (ifp->if_flags & IFF_RUNNING) {
1727 mvxpe_sc_lock(sc);
1728 mvxpe_filter_setup(sc);
1729 mvxpe_sc_unlock(sc);
1730 }
1731 error = 0;
1732 }
1733 break;
1734 }
1735
1736 return error;
1737 }
1738
1739 STATIC int
1740 mvxpe_init(struct ifnet *ifp)
1741 {
1742 struct mvxpe_softc *sc = ifp->if_softc;
1743 struct mii_data *mii = &sc->sc_mii;
1744 uint32_t reg;
1745 int q;
1746
1747 mvxpe_sc_lock(sc);
1748
1749 /* Start DMA Engine */
1750 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1751 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1752 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1753
1754 /* Enable port */
1755 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1756 reg |= MVXPE_PMACC0_PORTEN;
1757 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1758
1759 /* Link up */
1760 mvxpe_linkup(sc);
1761
1762 /* Enable All Queue and interrupt of each Queue */
1763 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1764 mvxpe_rx_lockq(sc, q);
1765 mvxpe_rx_queue_enable(ifp, q);
1766 mvxpe_rx_queue_reload(sc, q);
1767 mvxpe_rx_unlockq(sc, q);
1768
1769 mvxpe_tx_lockq(sc, q);
1770 mvxpe_tx_queue_enable(ifp, q);
1771 mvxpe_tx_unlockq(sc, q);
1772 }
1773
1774 /* Enable interrupt */
1775 mvxpe_enable_intr(sc);
1776
1777 /* Set Counter */
1778 callout_schedule(&sc->sc_tick_ch, hz);
1779
1780 /* Media check */
1781 mii_mediachg(mii);
1782
1783 ifp->if_flags |= IFF_RUNNING;
1784 ifp->if_flags &= ~IFF_OACTIVE;
1785
1786 mvxpe_sc_unlock(sc);
1787 return 0;
1788 }
1789
1790 /* ARGSUSED */
1791 STATIC void
1792 mvxpe_stop(struct ifnet *ifp, int disable)
1793 {
1794 struct mvxpe_softc *sc = ifp->if_softc;
1795 uint32_t reg;
1796 int q, cnt;
1797
1798 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1799
1800 mvxpe_sc_lock(sc);
1801
1802 callout_stop(&sc->sc_tick_ch);
1803
1804 /* Link down */
1805 mvxpe_linkdown(sc);
1806
1807 /* Disable Rx interrupt */
1808 reg = MVXPE_READ(sc, MVXPE_PIE);
1809 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1810 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1811
1812 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1813 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1814 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1815 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1816
1817 /* Wait for all Rx activity to terminate. */
1818 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1819 reg = MVXPE_RQC_DIS(reg);
1820 MVXPE_WRITE(sc, MVXPE_RQC, reg);
1821 cnt = 0;
1822 do {
1823 if (cnt >= RX_DISABLE_TIMEOUT) {
1824 aprint_error_ifnet(ifp,
1825 "timeout for RX stopped. rqc 0x%x\n", reg);
1826 break;
1827 }
1828 cnt++;
1829 reg = MVXPE_READ(sc, MVXPE_RQC);
1830 } while (reg & MVXPE_RQC_EN_MASK);
1831
1832 /* Wait for all Tx activety to terminate. */
1833 reg = MVXPE_READ(sc, MVXPE_PIE);
1834 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1835 MVXPE_WRITE(sc, MVXPE_PIE, reg);
1836
1837 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1838 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1839 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1840
1841 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1842 reg = MVXPE_TQC_DIS(reg);
1843 MVXPE_WRITE(sc, MVXPE_TQC, reg);
1844 cnt = 0;
1845 do {
1846 if (cnt >= TX_DISABLE_TIMEOUT) {
1847 aprint_error_ifnet(ifp,
1848 "timeout for TX stopped. tqc 0x%x\n", reg);
1849 break;
1850 }
1851 cnt++;
1852 reg = MVXPE_READ(sc, MVXPE_TQC);
1853 } while (reg & MVXPE_TQC_EN_MASK);
1854
1855 /* Wait for all Tx FIFO is empty */
1856 cnt = 0;
1857 do {
1858 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1859 aprint_error_ifnet(ifp,
1860 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1861 break;
1862 }
1863 cnt++;
1864 reg = MVXPE_READ(sc, MVXPE_PS0);
1865 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1866
1867 /* Reset the MAC Port Enable bit */
1868 reg = MVXPE_READ(sc, MVXPE_PMACC0);
1869 reg &= ~MVXPE_PMACC0_PORTEN;
1870 MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1871
1872 /* Disable each of queue */
1873 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1874 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1875
1876 mvxpe_rx_lockq(sc, q);
1877 mvxpe_tx_lockq(sc, q);
1878
1879 /* Disable Rx packet buffer reloading */
1880 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1881 reg |= MVXPE_PRXDQTH_NODT(0);
1882 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1883
1884 if (disable) {
1885 /*
1886 * Hold Reset state of DMA Engine
1887 * (must write 0x0 to restart it)
1888 */
1889 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1890 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1891 mvxpe_ring_flush_queue(sc, q);
1892 }
1893
1894 mvxpe_tx_unlockq(sc, q);
1895 mvxpe_rx_unlockq(sc, q);
1896 }
1897
1898 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1899
1900 mvxpe_sc_unlock(sc);
1901 }
1902
1903 STATIC void
1904 mvxpe_watchdog(struct ifnet *ifp)
1905 {
1906 struct mvxpe_softc *sc = ifp->if_softc;
1907 int q;
1908
1909 mvxpe_sc_lock(sc);
1910
1911 /*
1912 * Reclaim first as there is a possibility of losing Tx completion
1913 * interrupts.
1914 */
1915 mvxpe_tx_complete(sc);
1916 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1917 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1918
1919 if (tx->tx_dma != tx->tx_cpu) {
1920 if (sc->sc_wdogsoft) {
1921 /*
1922 * There is race condition between CPU and DMA
1923 * engine. When DMA engine encounters queue end,
1924 * it clears MVXPE_TQC_ENQ bit.
1925 * XXX: how about enhanced mode?
1926 */
1927 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1928 ifp->if_timer = 5;
1929 sc->sc_wdogsoft = 0;
1930 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1931 } else {
1932 aprint_error_ifnet(ifp, "watchdog timeout\n");
1933 ifp->if_oerrors++;
1934 mvxpe_linkreset(sc);
1935 mvxpe_sc_unlock(sc);
1936
1937 /* trigger reinitialize sequence */
1938 mvxpe_stop(ifp, 1);
1939 mvxpe_init(ifp);
1940
1941 mvxpe_sc_lock(sc);
1942 }
1943 }
1944 }
1945 mvxpe_sc_unlock(sc);
1946 }
1947
1948 STATIC int
1949 mvxpe_ifflags_cb(struct ethercom *ec)
1950 {
1951 struct ifnet *ifp = &ec->ec_if;
1952 struct mvxpe_softc *sc = ifp->if_softc;
1953 int change = ifp->if_flags ^ sc->sc_if_flags;
1954
1955 mvxpe_sc_lock(sc);
1956
1957 if (change != 0)
1958 sc->sc_if_flags = ifp->if_flags;
1959
1960 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1961 mvxpe_sc_unlock(sc);
1962 return ENETRESET;
1963 }
1964
1965 if ((change & IFF_PROMISC) != 0)
1966 mvxpe_filter_setup(sc);
1967
1968 if ((change & IFF_UP) != 0)
1969 mvxpe_linkreset(sc);
1970
1971 mvxpe_sc_unlock(sc);
1972 return 0;
1973 }
1974
1975 STATIC int
1976 mvxpe_mediachange(struct ifnet *ifp)
1977 {
1978 return ether_mediachange(ifp);
1979 }
1980
1981 STATIC void
1982 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1983 {
1984 ether_mediastatus(ifp, ifmr);
1985 }
1986
1987 /*
1988 * Link State Notify
1989 */
1990 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
1991 {
1992 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1993 int linkup; /* bool */
1994
1995 KASSERT_SC_MTX(sc);
1996
1997 /* tell miibus */
1998 mii_pollstat(&sc->sc_mii);
1999
2000 /* syslog */
2001 linkup = MVXPE_IS_LINKUP(sc);
2002 if (sc->sc_linkstate == linkup)
2003 return;
2004
2005 log(LOG_CRIT, "%s: link %s\n", ifp->if_xname, linkup ? "up" : "down");
2006 if (linkup)
2007 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2008 else
2009 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2010
2011 sc->sc_linkstate = linkup;
2012 }
2013
2014 STATIC void
2015 mvxpe_linkup(struct mvxpe_softc *sc)
2016 {
2017 uint32_t reg;
2018
2019 KASSERT_SC_MTX(sc);
2020
2021 /* set EEE parameters */
2022 reg = MVXPE_READ(sc, MVXPE_LPIC1);
2023 if (sc->sc_cf.cf_lpi)
2024 reg |= MVXPE_LPIC1_LPIRE;
2025 else
2026 reg &= ~MVXPE_LPIC1_LPIRE;
2027 MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2028
2029 /* set auto-negotiation parameters */
2030 reg = MVXPE_READ(sc, MVXPE_PANC);
2031 if (sc->sc_cf.cf_fc) {
2032 /* flow control negotiation */
2033 reg |= MVXPE_PANC_PAUSEADV;
2034 reg |= MVXPE_PANC_ANFCEN;
2035 }
2036 else {
2037 reg &= ~MVXPE_PANC_PAUSEADV;
2038 reg &= ~MVXPE_PANC_ANFCEN;
2039 }
2040 reg &= ~MVXPE_PANC_FORCELINKFAIL;
2041 reg &= ~MVXPE_PANC_FORCELINKPASS;
2042 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2043
2044 mii_mediachg(&sc->sc_mii);
2045 }
2046
2047 STATIC void
2048 mvxpe_linkdown(struct mvxpe_softc *sc)
2049 {
2050 struct mii_softc *mii;
2051 uint32_t reg;
2052
2053 KASSERT_SC_MTX(sc);
2054 return;
2055
2056 reg = MVXPE_READ(sc, MVXPE_PANC);
2057 reg |= MVXPE_PANC_FORCELINKFAIL;
2058 reg &= MVXPE_PANC_FORCELINKPASS;
2059 MVXPE_WRITE(sc, MVXPE_PANC, reg);
2060
2061 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2062 if (mii)
2063 mii_phy_down(mii);
2064 }
2065
2066 STATIC void
2067 mvxpe_linkreset(struct mvxpe_softc *sc)
2068 {
2069 struct mii_softc *mii;
2070
2071 KASSERT_SC_MTX(sc);
2072
2073 /* force reset PHY first */
2074 mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2075 if (mii)
2076 mii_phy_reset(mii);
2077
2078 /* reinit MAC and PHY */
2079 mvxpe_linkdown(sc);
2080 if ((sc->sc_if_flags & IFF_UP) != 0)
2081 mvxpe_linkup(sc);
2082 }
2083
2084 /*
2085 * Packet Buffer Manager(BM)
2086 */
2087 STATIC int
2088 mvxpe_bm_init(struct mvxpe_softc *sc)
2089 {
2090 struct mvxpe_bm_softc *bm = &sc->sc_bm;
2091 bus_dma_segment_t segs;
2092 char *kva, *ptr, *ptr_next, *ptr_data;
2093 char *bm_buf_end;
2094 paddr_t bm_buf_pa;
2095 uint32_t align, pad;
2096 size_t bm_buf_size;
2097 int nsegs, error;
2098
2099 error = 0;
2100
2101 memset(bm, 0, sizeof(*bm));
2102 bm->bm_dmat = sc->sc_dmat;
2103 bm->bm_chunk_count = 0;
2104 bm->bm_chunk_size = MVXPE_BM_SIZE;
2105 bm->bm_chunk_header_size = sizeof(struct mvxpe_bm_chunk);
2106 bm->bm_chunk_packet_offset = 0;
2107 mutex_init(&bm->bm_mtx, MUTEX_DEFAULT, IPL_NET);
2108 LIST_INIT(&bm->bm_free);
2109 LIST_INIT(&bm->bm_inuse);
2110
2111 /*
2112 * adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
2113 * to satisfy alignemnt restrictions.
2114 *
2115 * <---------------- bm_slotsize [oct.] ------------------>
2116 * <--- bm_chunk_size[oct.] ---->
2117 * <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] ----->
2118 * +-----------------+--------+---------+-----------------+--+
2119 * | bm_chunk hdr |pad |pkt_off | packet data | |
2120 * +-----------------+--------+---------+-----------------+--+
2121 * ^ ^ ^ ^
2122 * | | | |
2123 * ptr ptr_data DMA here ptr_next
2124 *
2125 * Restrictions:
2126 * - ptr must be aligned to MVXPE_BM_ADDR_ALIGN
2127 * - data must be aligned to MVXPE_RXBUF_ALIGN
2128 * - data size X must be multiple of 8.
2129 */
2130 /* assume start of buffer at 0x0000.0000 */
2131 ptr = (char *)0;
2132 /* align start of packet data */
2133 ptr_data = ptr + bm->bm_chunk_header_size;
2134 align = (unsigned long)ptr_data & MVXPE_RXBUF_MASK;
2135 if (align != 0) {
2136 pad = MVXPE_RXBUF_ALIGN - align;
2137 bm->bm_chunk_header_size += pad;
2138 DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
2139 }
2140 /* align size of packet data */
2141 ptr_data = ptr + bm->bm_chunk_header_size;
2142 ptr_next = ptr_data + MVXPE_BM_SIZE;
2143 align = (unsigned long)ptr_next & MVXPE_BM_ADDR_MASK;
2144 if (align != 0) {
2145 pad = MVXPE_BM_ADDR_ALIGN - align;
2146 ptr_next += pad;
2147 DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
2148 }
2149 bm->bm_slotsize = ptr_next - ptr;
2150 bm->bm_chunk_size = ptr_next - ptr_data;
2151 KASSERT((bm->bm_chunk_size % 8) == 0);
2152 /* align total buffer size to page boundary */
2153 bm_buf_size = bm->bm_slotsize * MVXPE_BM_SLOTS;
2154 align = (unsigned long)bm_buf_size & (PAGE_SIZE - 1);
2155 if (align != 0) {
2156 pad = PAGE_SIZE - align;
2157 bm_buf_size += pad;
2158 DPRINTSC(sc, 1,
2159 "expand buffer to fit page boundary, %u bytes\n", pad);
2160 }
2161
2162 /*
2163 * get the aligned buffer from busdma(9) framework
2164 */
2165 if (bus_dmamem_alloc(bm->bm_dmat, bm_buf_size, PAGE_SIZE, 0,
2166 &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
2167 aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
2168 return ENOBUFS;
2169 }
2170 if (bus_dmamem_map(bm->bm_dmat, &segs, nsegs, bm_buf_size,
2171 (void **)&kva, BUS_DMA_NOWAIT)) {
2172 aprint_error_dev(sc->sc_dev,
2173 "can't map dma buffers (%zu bytes)\n", bm_buf_size);
2174 error = ENOBUFS;
2175 goto fail1;
2176 }
2177 KASSERT(((unsigned long)kva & MVXPE_BM_ADDR_MASK) == 0);
2178 if (bus_dmamap_create(bm->bm_dmat, bm_buf_size, 1, bm_buf_size, 0,
2179 BUS_DMA_NOWAIT, &bm->bm_map)) {
2180 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
2181 error = ENOBUFS;
2182 goto fail2;
2183 }
2184 if (bus_dmamap_load(bm->bm_dmat, bm->bm_map,
2185 kva, bm_buf_size, NULL, BUS_DMA_NOWAIT)) {
2186 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
2187 error = ENOBUFS;
2188 goto fail3;
2189 }
2190 bm->bm_buf = (void *)kva;
2191 bm_buf_end = (void *)(kva + bm_buf_size);
2192 bm_buf_pa = segs.ds_addr;
2193 DPRINTSC(sc, 1, "memory pool at %p\n", bm->bm_buf);
2194
2195 /* slice the buffer */
2196 mvxpe_bm_lock(sc);
2197 for (ptr = bm->bm_buf; ptr + bm->bm_slotsize <= bm_buf_end;
2198 ptr += bm->bm_slotsize) {
2199 struct mvxpe_bm_chunk *chunk;
2200
2201 /* initialzie chunk */
2202 ptr_data = ptr + bm->bm_chunk_header_size;
2203 chunk = (struct mvxpe_bm_chunk *)ptr;
2204 chunk->m = NULL;
2205 chunk->sc = sc;
2206 chunk->off = (ptr - bm->bm_buf);
2207 chunk->pa = (paddr_t)(bm_buf_pa + chunk->off);
2208 chunk->buf_off = (ptr_data - bm->bm_buf);
2209 chunk->buf_pa = (paddr_t)(bm_buf_pa + chunk->buf_off);
2210 chunk->buf_va = (vaddr_t)(bm->bm_buf + chunk->buf_off);
2211 chunk->buf_size = bm->bm_chunk_size;
2212
2213 /* add to array */
2214 bm->bm_slots[bm->bm_chunk_count++] = chunk;
2215
2216 /* add to free list (for software management) */
2217 LIST_INSERT_HEAD(&bm->bm_free, chunk, link);
2218 mvxpe_bm_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2219
2220 DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
2221 }
2222 mvxpe_bm_unlock(sc);
2223 aprint_normal_dev(sc->sc_dev,
2224 "%zu bytes packet buffer, %zu bytes * %zu entries allocated.\n",
2225 bm_buf_size, bm->bm_chunk_size, bm->bm_chunk_count);
2226 return 0;
2227
2228 fail3:
2229 bus_dmamap_destroy(bm->bm_dmat, bm->bm_map);
2230 fail2:
2231 bus_dmamem_unmap(bm->bm_dmat, kva, bm_buf_size);
2232 fail1:
2233 bus_dmamem_free(bm->bm_dmat, &segs, nsegs);
2234
2235 return error;
2236 }
2237
2238 STATIC int
2239 mvxpe_bm_init_mbuf_hdr(struct mvxpe_bm_chunk *chunk)
2240 {
2241 struct mvxpe_softc *sc = chunk->sc;
2242
2243 KASSERT(chunk->m == NULL);
2244
2245 /* add mbuf header */
2246 MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
2247 if (chunk->m == NULL) {
2248 aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
2249 return ENOBUFS;
2250 }
2251 MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
2252 mvxpe_bm_free_mbuf, chunk);
2253 chunk->m->m_flags |= M_EXT_RW;
2254 chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
2255 if (sc->sc_bm.bm_chunk_packet_offset)
2256 m_adj(chunk->m, sc->sc_bm.bm_chunk_packet_offset);
2257
2258 return 0;
2259 }
2260
2261 STATIC struct mvxpe_bm_chunk *
2262 mvxpe_bm_alloc(struct mvxpe_softc *sc)
2263 {
2264 struct mvxpe_bm_chunk *chunk;
2265 struct mvxpe_bm_softc *bm = &sc->sc_bm;
2266
2267 mvxpe_bm_lock(sc);
2268
2269 chunk = LIST_FIRST(&bm->bm_free);
2270 if (chunk == NULL) {
2271 mvxpe_bm_unlock(sc);
2272 return NULL;
2273 }
2274
2275 LIST_REMOVE(chunk, link);
2276 LIST_INSERT_HEAD(&bm->bm_inuse, chunk, link);
2277
2278 mvxpe_bm_unlock(sc);
2279 return chunk;
2280 }
2281
2282 STATIC void
2283 mvxpe_bm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
2284 {
2285 struct mvxpe_bm_chunk *chunk = (struct mvxpe_bm_chunk *)arg;
2286 int s;
2287
2288 KASSERT(m != NULL);
2289 KASSERT(arg != NULL);
2290
2291 DPRINTFN(3, "free packet %p\n", m);
2292 if (m->m_flags & M_PKTHDR)
2293 m_tag_delete_chain((m), NULL);
2294 chunk->m = NULL;
2295 s = splvm();
2296 pool_cache_put(mb_cache, m);
2297 splx(s);
2298 return mvxpe_bm_free_chunk(chunk);
2299 }
2300
2301 STATIC void
2302 mvxpe_bm_free_chunk(struct mvxpe_bm_chunk *chunk)
2303 {
2304 struct mvxpe_softc *sc = chunk->sc;
2305 struct mvxpe_bm_softc *bm = &sc->sc_bm;
2306
2307 DPRINTFN(3, "bm chunk free\n");
2308
2309 mvxpe_bm_lock(sc);
2310
2311 LIST_REMOVE(chunk, link);
2312 LIST_INSERT_HEAD(&bm->bm_free, chunk, link);
2313
2314 mvxpe_bm_unlock(sc);
2315 }
2316
2317 STATIC void
2318 mvxpe_bm_sync(struct mvxpe_bm_chunk *chunk, size_t size, int ops)
2319 {
2320 struct mvxpe_softc *sc = (struct mvxpe_softc *)chunk->sc;
2321 struct mvxpe_bm_softc *bm = &sc->sc_bm;
2322
2323 KASSERT(size <= chunk->buf_size);
2324 if (size == 0)
2325 size = chunk->buf_size;
2326
2327 bus_dmamap_sync(bm->bm_dmat, bm->bm_map, chunk->buf_off, size, ops);
2328 }
2329
2330 STATIC void
2331 mvxpe_bm_lock(struct mvxpe_softc *sc)
2332 {
2333 mutex_enter(&sc->sc_bm.bm_mtx);
2334 }
2335
2336 STATIC void
2337 mvxpe_bm_unlock(struct mvxpe_softc *sc)
2338 {
2339 mutex_exit(&sc->sc_bm.bm_mtx);
2340 }
2341
2342 /*
2343 * Tx Subroutines
2344 */
2345 STATIC int
2346 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2347 {
2348 int q = 0;
2349
2350 /* XXX: get attribute from ALTQ framework? */
2351 mvxpe_tx_lockq(sc, q);
2352 return 0;
2353 }
2354
2355 STATIC int
2356 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2357 {
2358 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2359 bus_dma_segment_t *txsegs;
2360 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2361 struct mvxpe_tx_desc *t = NULL;
2362 uint32_t ptxsu;
2363 int txnsegs;
2364 int start, used;
2365 int i;
2366
2367 KASSERT(mutex_owned(&tx->tx_ring_mtx));
2368 KASSERT(tx->tx_free_cnt >= 0);
2369 KASSERT(tx->tx_free_cnt <= tx->tx_queue_len);
2370
2371 /* load mbuf using dmamap of 1st descriptor */
2372 if (bus_dmamap_load_mbuf(sc->sc_dmat,
2373 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2374 m_freem(m);
2375 return ENOBUFS;
2376 }
2377 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2378 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2379 if (txnsegs <= 0 || txnsegs > tx->tx_free_cnt) {
2380 /* we have no enough descriptors or mbuf is broken */
2381 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2382 m_freem(m);
2383 return ENOBUFS;
2384 }
2385 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2386 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2387
2388 /* remember mbuf using 1st descriptor */
2389 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2390 bus_dmamap_sync(sc->sc_dmat,
2391 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2392 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREWRITE);
2393
2394 /* load to tx descriptors */
2395 start = tx->tx_cpu;
2396 used = 0;
2397 for (i = 0; i < txnsegs; i++) {
2398 if (__predict_false(txsegs[i].ds_len == 0))
2399 continue;
2400 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2401 t->command = 0;
2402 t->l4ichk = 0;
2403 t->flags = 0;
2404 if (i == 0) {
2405 /* 1st descriptor */
2406 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2407 t->command |= MVXPE_TX_CMD_PADDING;
2408 t->command |= MVXPE_TX_CMD_F;
2409 mvxpe_tx_set_csumflag(ifp, t, m);
2410 }
2411 t->bufptr = txsegs[i].ds_addr;
2412 t->bytecnt = txsegs[i].ds_len;
2413 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2414 tx->tx_free_cnt--;
2415 used++;
2416 }
2417 /* t is last descriptor here */
2418 KASSERT(t != NULL);
2419 t->command |= MVXPE_TX_CMD_L;
2420
2421 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2422 #ifdef MVXPE_DEBUG
2423 if (mvxpe_debug > 2)
2424 for (i = start; i <= tx->tx_cpu; i++) {
2425 t = MVXPE_TX_DESC(sc, q, i);
2426 mvxpe_dump_txdesc(t, i);
2427 }
2428 #endif
2429 mvxpe_ring_sync_tx(sc, q, start, used,
2430 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2431
2432 while (used > 255) {
2433 ptxsu = MVXPE_PTXSU_NOWD(255);
2434 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2435 used -= 255;
2436 }
2437 if (used > 0) {
2438 ptxsu = MVXPE_PTXSU_NOWD(used);
2439 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2440 }
2441 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2442
2443 DPRINTSC(sc, 2,
2444 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2445 DPRINTSC(sc, 2,
2446 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2447 DPRINTSC(sc, 2,
2448 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2449 DPRINTSC(sc, 2,
2450 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2451 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2452 DPRINTIFNET(ifp, 2,
2453 "Tx: tx_cpu = %d, tx_dma = %d, tx_free_cnt = %d\n",
2454 tx->tx_cpu, tx->tx_dma, tx->tx_free_cnt);
2455 return 0;
2456 }
2457
2458 STATIC void
2459 mvxpe_tx_set_csumflag(struct ifnet *ifp,
2460 struct mvxpe_tx_desc *t, struct mbuf *m)
2461 {
2462 int csum_flags;
2463 uint32_t iphl = 0, ipoff = 0;
2464
2465
2466 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2467
2468 if (csum_flags & (M_CSUM_IPv4| M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2469 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2470 ipoff = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data);
2471 }
2472 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2473 iphl = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
2474 ipoff = M_CSUM_DATA_IPv6_OFFSET(m->m_pkthdr.csum_data);
2475 }
2476 else {
2477 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2478 return;
2479 }
2480
2481 /* L3 */
2482 if (csum_flags & M_CSUM_IPv4) {
2483 t->command |= MVXPE_TX_CMD_L3_IP4;
2484 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2485 }
2486
2487 /* L4 */
2488 if (csum_flags & M_CSUM_TCPv4) {
2489 t->command |= MVXPE_TX_CMD_L3_IP4;
2490 t->command |= MVXPE_TX_CMD_L4_TCP;
2491 }
2492 else if (csum_flags & M_CSUM_UDPv4) {
2493 t->command |= MVXPE_TX_CMD_L3_IP4;
2494 t->command |= MVXPE_TX_CMD_L4_UDP;
2495 }
2496 else if (csum_flags & M_CSUM_TCPv6) {
2497 t->command |= MVXPE_TX_CMD_L3_IP6;
2498 t->command |= MVXPE_TX_CMD_L4_TCP;
2499 }
2500 else if (csum_flags & M_CSUM_UDPv6) {
2501 t->command |= MVXPE_TX_CMD_L3_IP6;
2502 t->command |= MVXPE_TX_CMD_L4_UDP;
2503 }
2504
2505 /*
2506 * NetBSD's networking stack is not request H/W csum on fragmented
2507 * packets.
2508 */
2509 t->l4ichk = 0;
2510 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2511 t->command |= MVXPE_TX_CMD_W_IP_HEADER_LEN(iphl >> 2);
2512 t->command |= MVXPE_TX_CMD_W_L3_OFFSET(ipoff);
2513 }
2514
2515 STATIC void
2516 mvxpe_tx_complete(struct mvxpe_softc *sc)
2517 {
2518 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2519 int q;
2520
2521 DPRINTSC(sc, 2, "tx completed.\n");
2522
2523 KASSERT_SC_MTX(sc);
2524
2525 /* XXX: check queue bit array */
2526 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2527 mvxpe_tx_lockq(sc, q);
2528 mvxpe_tx_queue_del(sc, q);
2529 mvxpe_tx_unlockq(sc, q);
2530 }
2531 KASSERT(sc->sc_tx_pending >= 0);
2532 if (sc->sc_tx_pending == 0)
2533 ifp->if_timer = 0;
2534 }
2535
2536 STATIC void
2537 mvxpe_tx_queue_del(struct mvxpe_softc *sc, int q)
2538 {
2539 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2540 struct mvxpe_tx_desc *t;
2541 uint32_t ptxs, ptxsu, ndesc;
2542 int i;
2543
2544 KASSERT_TX_MTX(sc, q);
2545
2546 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2547 ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2548 if (ndesc == 0)
2549 return;
2550
2551 DPRINTSC(sc, 2,
2552 "tx complete queue %d, %d descriptors.\n", q, ndesc);
2553
2554 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2555 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2556
2557 for (i = 0; i < ndesc; i++) {
2558 int error = 0;
2559
2560 t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2561 if (t->flags & MVXPE_TX_F_ES) {
2562 DPRINTSC(sc, 1,
2563 "tx error queue %d desc %d\n",
2564 q, tx->tx_dma);
2565 switch (t->flags & MVXPE_TX_F_EC_MASK) {
2566 case MVXPE_TX_F_EC_LC:
2567 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2568 case MVXPE_TX_F_EC_UR:
2569 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2570 case MVXPE_TX_F_EC_RL:
2571 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2572 default:
2573 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2574 }
2575 error = 1;
2576 }
2577 if (MVXPE_TX_MBUF(sc, q, tx->tx_dma) != NULL) {
2578 KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2579 bus_dmamap_unload(sc->sc_dmat,
2580 MVXPE_TX_MAP(sc, q, tx->tx_dma));
2581 m_freem(MVXPE_TX_MBUF(sc, q, tx->tx_dma));
2582 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2583 sc->sc_tx_pending--;
2584 }
2585 else
2586 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2587 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2588 tx->tx_free_cnt++;
2589 if (error)
2590 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2591 else
2592 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2593 }
2594 KASSERT(tx->tx_free_cnt >= 0);
2595 KASSERT(tx->tx_free_cnt <= tx->tx_queue_len);
2596 while (ndesc > 255) {
2597 ptxsu = MVXPE_PTXSU_NORB(255);
2598 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2599 ndesc -= 255;
2600 }
2601 if (ndesc > 0) {
2602 ptxsu = MVXPE_PTXSU_NORB(ndesc);
2603 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2604 }
2605 DPRINTSC(sc, 2,
2606 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_free_cnt = %d\n",
2607 q, tx->tx_cpu, tx->tx_dma, tx->tx_free_cnt);
2608 }
2609
2610 /*
2611 * Rx Subroutines
2612 */
2613 STATIC void
2614 mvxpe_rx(struct mvxpe_softc *sc)
2615 {
2616 int q, npkt;
2617
2618 KASSERT_SC_MTX(sc);
2619
2620 while ( (npkt = mvxpe_rx_queue_select(sc, &q))) {
2621 /* mutex is held by rx_queue_sel */
2622 mvxpe_rx_queue(sc, q, npkt);
2623 mvxpe_rx_unlockq(sc, q);
2624 }
2625 }
2626
2627 STATIC void
2628 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2629 {
2630 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2631 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2632 struct mvxpe_rx_desc *r;
2633 struct mvxpe_bm_chunk *chunk;
2634 struct mbuf *m;
2635 uint32_t prxsu;
2636 int error = 0;
2637 int i;
2638
2639 KASSERT_RX_MTX(sc, q);
2640
2641 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2642 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2643
2644 for (i = 0; i < npkt; i++) {
2645 /* get descriptor and packet */
2646 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2647 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2648 r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2649 mvxpe_bm_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2650
2651 /* check errors */
2652 if (r->status & MVXPE_RX_ES) {
2653 switch (r->status & MVXPE_RX_EC_MASK) {
2654 case MVXPE_RX_EC_CE:
2655 DPRINTIFNET(ifp, 1, "CRC error\n");
2656 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2657 break;
2658 case MVXPE_RX_EC_OR:
2659 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2660 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2661 break;
2662 case MVXPE_RX_EC_MF:
2663 DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2664 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2665 break;
2666 case MVXPE_RX_EC_RE:
2667 DPRINTIFNET(ifp, 1, "Rx resource error\n");
2668 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2669 break;
2670 }
2671 error = 1;
2672 goto rx_done;
2673 }
2674 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2675 DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2676 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2677 error = 1;
2678 goto rx_done;
2679 }
2680
2681 if (chunk == NULL) {
2682 device_printf(sc->sc_dev,
2683 "got rx interrupt, but no chunk\n");
2684 error = 1;
2685 goto rx_done;
2686 }
2687
2688 /* extract packet buffer */
2689 mvxpe_bm_init_mbuf_hdr(chunk);
2690 m = chunk->m;
2691 m->m_pkthdr.rcvif = ifp;
2692 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2693 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2694 mvxpe_rx_set_csumflag(ifp, r, m);
2695 ifp->if_ipackets++;
2696 bpf_mtap(ifp, m);
2697 (*ifp->if_input)(ifp, m);
2698 chunk = NULL; /* the BM chunk goes to networking stack now */
2699 rx_done:
2700 if (chunk) {
2701 /* rx error. just return the chunk to BM. */
2702 mvxpe_bm_free_chunk(chunk);
2703 }
2704 if (error)
2705 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2706 else
2707 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2708 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2709 }
2710 /* DMA status update */
2711 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2712 while (npkt > 255) {
2713 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2714 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2715 npkt -= 255;
2716 }
2717 if (npkt > 0) {
2718 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2719 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2720 }
2721
2722 DPRINTSC(sc, 2,
2723 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2724 DPRINTSC(sc, 2,
2725 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2726 DPRINTSC(sc, 2,
2727 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2728 DPRINTSC(sc, 2,
2729 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2730 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2731 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2732 rx->rx_cpu, rx->rx_dma);
2733 }
2734
2735 STATIC int
2736 mvxpe_rx_queue_select(struct mvxpe_softc *sc, int *queue)
2737 {
2738 uint32_t prxs, npkt;
2739 int q;
2740
2741 KASSERT_SC_MTX(sc);
2742 KASSERT(queue != NULL);
2743 DPRINTSC(sc, 2, "selecting rx queue\n");
2744
2745 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2746 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2747 npkt = MVXPE_PRXS_GET_ODC(prxs);
2748 if (npkt == 0)
2749 continue;
2750
2751 DPRINTSC(sc, 2,
2752 "queue %d selected: prxs=%#x, %u pakcet received.\n",
2753 q, prxs, npkt);
2754 *queue = q;
2755 mvxpe_rx_lockq(sc, q);
2756 return npkt;
2757 }
2758
2759 return 0;
2760 }
2761
2762 STATIC void
2763 mvxpe_rx_reload(struct mvxpe_softc *sc)
2764 {
2765 int q;
2766
2767 KASSERT_SC_MTX(sc);
2768
2769 /* XXX: check rx bit array */
2770 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2771 mvxpe_rx_lockq(sc, q);
2772
2773 mvxpe_rx_queue_reload(sc, q);
2774
2775 mvxpe_rx_unlockq(sc, q);
2776 }
2777 }
2778
2779 STATIC void
2780 mvxpe_rx_queue_reload(struct mvxpe_softc *sc, int q)
2781 {
2782 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2783 uint32_t prxs, prxsu, ndesc;
2784 int idx, reload = 0;
2785 int npkt;
2786
2787 KASSERT_RX_MTX(sc, q);
2788
2789 prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2790 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2791 reload = rx->rx_queue_len - ndesc;
2792 if (reload <= 0)
2793 return;
2794 DPRINTPRXS(2, q);
2795 DPRINTSC(sc, 2, "%d buffers to reload.\n", reload);
2796
2797 idx = rx->rx_cpu;
2798 for (npkt = 0; npkt < reload; npkt++)
2799 if (mvxpe_rx_queue_add(sc, q) != 0)
2800 break;
2801 DPRINTSC(sc, 2, "queue %d, %d buffer reloaded.\n", q, npkt);
2802 if (npkt == 0)
2803 return;
2804
2805 mvxpe_ring_sync_rx(sc, q, idx, npkt,
2806 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2807
2808 while (npkt > 255) {
2809 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2810 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2811 npkt -= 255;
2812 }
2813 if (npkt > 0) {
2814 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2815 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2816 }
2817 DPRINTPRXS(2, q);
2818 return;
2819 }
2820
2821 STATIC int
2822 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2823 {
2824 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2825 struct mvxpe_rx_desc *r;
2826 struct mvxpe_bm_chunk *chunk = NULL;
2827
2828 KASSERT_RX_MTX(sc, q);
2829
2830 /* Allocate the packet buffer */
2831 chunk = mvxpe_bm_alloc(sc);
2832 if (chunk == NULL) {
2833 DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2834 return ENOBUFS;
2835 }
2836
2837 /* Add the packet to descritor */
2838 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2839 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2840 mvxpe_bm_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2841
2842 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2843 r->bufptr = chunk->buf_pa;
2844 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2845 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2846 return 0;
2847 }
2848
2849 STATIC void
2850 mvxpe_rx_set_csumflag(struct ifnet *ifp,
2851 struct mvxpe_rx_desc *r, struct mbuf *m0)
2852 {
2853 uint32_t csum_flags = 0;
2854
2855 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0)
2856 return; /* not a IP packet */
2857
2858 /* L3 */
2859 if (r->status & MVXPE_RX_L3_IP) {
2860 csum_flags |= M_CSUM_IPv4;
2861 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0) {
2862 csum_flags |= M_CSUM_IPv4_BAD;
2863 goto finish;
2864 }
2865 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2866 /*
2867 * r->l4chk has partial checksum of each framgment.
2868 * but there is no way to use it in NetBSD.
2869 */
2870 return;
2871 }
2872 }
2873
2874 /* L4 */
2875 switch (r->status & MVXPE_RX_L4_MASK) {
2876 case MVXPE_RX_L4_TCP:
2877 if (r->status & MVXPE_RX_L3_IP)
2878 csum_flags |= M_CSUM_TCPv4;
2879 else
2880 csum_flags |= M_CSUM_TCPv6;
2881 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2882 csum_flags |= M_CSUM_TCP_UDP_BAD;
2883 break;
2884 case MVXPE_RX_L4_UDP:
2885 if (r->status & MVXPE_RX_L3_IP)
2886 csum_flags |= M_CSUM_UDPv4;
2887 else
2888 csum_flags |= M_CSUM_UDPv6;
2889 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0)
2890 csum_flags |= M_CSUM_TCP_UDP_BAD;
2891 break;
2892 case MVXPE_RX_L4_OTH:
2893 default:
2894 break;
2895 }
2896 finish:
2897 m0->m_pkthdr.csum_flags |= (csum_flags & ifp->if_csum_flags_rx);
2898 }
2899
2900 /*
2901 * MAC address filter
2902 */
2903 STATIC uint8_t
2904 mvxpe_crc8(const uint8_t *data, size_t size)
2905 {
2906 int bit;
2907 uint8_t byte;
2908 uint8_t crc = 0;
2909 const uint8_t poly = 0x07;
2910
2911 while(size--)
2912 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2913 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2914
2915 return crc;
2916 }
2917
2918 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2919
2920 STATIC void
2921 mvxpe_filter_setup(struct mvxpe_softc *sc)
2922 {
2923 struct ethercom *ec = &sc->sc_ethercom;
2924 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2925 struct ether_multi *enm;
2926 struct ether_multistep step;
2927 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2928 uint32_t pxc;
2929 int i;
2930 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2931
2932 KASSERT_SC_MTX(sc);
2933
2934 memset(dfut, 0, sizeof(dfut));
2935 memset(dfsmt, 0, sizeof(dfsmt));
2936 memset(dfomt, 0, sizeof(dfomt));
2937
2938 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2939 goto allmulti;
2940 }
2941
2942 ETHER_FIRST_MULTI(step, ec, enm);
2943 while (enm != NULL) {
2944 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2945 /* ranges are complex and somewhat rare */
2946 goto allmulti;
2947 }
2948 /* chip handles some IPv4 multicast specially */
2949 if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2950 i = enm->enm_addrlo[5];
2951 dfsmt[i>>2] |=
2952 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2953 } else {
2954 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2955 dfomt[i>>2] |=
2956 MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2957 }
2958
2959 ETHER_NEXT_MULTI(step, enm);
2960 }
2961 goto set;
2962
2963 allmulti:
2964 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
2965 for (i = 0; i < MVXPE_NDFSMT; i++) {
2966 dfsmt[i] = dfomt[i] =
2967 MVXPE_DF(0, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2968 MVXPE_DF(1, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2969 MVXPE_DF(2, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS) |
2970 MVXPE_DF(3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2971 }
2972 }
2973
2974 set:
2975 pxc = MVXPE_READ(sc, MVXPE_PXC);
2976 pxc &= ~MVXPE_PXC_UPM;
2977 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2978 if (ifp->if_flags & IFF_BROADCAST) {
2979 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2980 }
2981 if (ifp->if_flags & IFF_PROMISC) {
2982 pxc |= MVXPE_PXC_UPM;
2983 }
2984 MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2985
2986 /* Set Destination Address Filter Unicast Table */
2987 i = sc->sc_enaddr[5] & 0xf; /* last nibble */
2988 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE_ALL | MVXPE_DF_PASS);
2989 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2990
2991 /* Set Destination Address Filter Multicast Tables */
2992 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2993 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2994 }
2995
2996 /*
2997 * sysctl(9)
2998 */
2999 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
3000 {
3001 int rc;
3002 const struct sysctlnode *node;
3003
3004 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3005 0, CTLTYPE_NODE, "mvxpe",
3006 SYSCTL_DESCR("mvxpe interface controls"),
3007 NULL, 0, NULL, 0,
3008 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
3009 goto err;
3010 }
3011
3012 mvxpe_root_num = node->sysctl_num;
3013 return;
3014
3015 err:
3016 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
3017 }
3018
3019 STATIC int
3020 sysctl_read_mib(SYSCTLFN_ARGS)
3021 {
3022 struct mvxpe_sysctl_mib *arg;
3023 struct mvxpe_softc *sc;
3024 struct sysctlnode node;
3025 uint64_t val;
3026 int err;
3027
3028 node = *rnode;
3029 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
3030 if (arg == NULL)
3031 return EINVAL;
3032
3033 sc = arg->sc;
3034 if (sc == NULL)
3035 return EINVAL;
3036 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
3037 return EINVAL;
3038
3039 mvxpe_sc_lock(sc);
3040 val = arg->counter;
3041 mvxpe_sc_unlock(sc);
3042
3043 node.sysctl_data = &val;
3044 err = sysctl_lookup(SYSCTLFN_CALL(&node));
3045 if (err)
3046 return err;
3047 if (newp)
3048 return EINVAL;
3049
3050 return 0;
3051 }
3052
3053
3054 STATIC int
3055 sysctl_clear_mib(SYSCTLFN_ARGS)
3056 {
3057 struct mvxpe_softc *sc;
3058 struct sysctlnode node;
3059 int val;
3060 int err;
3061
3062 node = *rnode;
3063 sc = (struct mvxpe_softc *)rnode->sysctl_data;
3064 if (sc == NULL)
3065 return EINVAL;
3066
3067 val = 0;
3068 node.sysctl_data = &val;
3069 err = sysctl_lookup(SYSCTLFN_CALL(&node));
3070 if (err || newp == NULL)
3071 return err;
3072 if (val < 0 || val > 1)
3073 return EINVAL;
3074 if (val == 1) {
3075 mvxpe_sc_lock(sc);
3076 mvxpe_clear_mib(sc);
3077 mvxpe_sc_unlock(sc);
3078 }
3079
3080 return 0;
3081 }
3082
3083 STATIC int
3084 sysctl_set_queue_length(SYSCTLFN_ARGS)
3085 {
3086 struct mvxpe_sysctl_queue *arg;
3087 struct mvxpe_rx_ring *rx = NULL;
3088 struct mvxpe_tx_ring *tx = NULL;
3089 struct mvxpe_softc *sc;
3090 struct sysctlnode node;
3091 uint32_t reg;
3092 int val;
3093 int err;
3094
3095 node = *rnode;
3096
3097 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
3098 if (arg == NULL)
3099 return EINVAL;
3100 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
3101 return EINVAL;
3102 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
3103 return EINVAL;
3104
3105 sc = arg->sc;
3106 if (sc == NULL)
3107 return EINVAL;
3108
3109 /* read queue length */
3110 mvxpe_sc_lock(sc);
3111 switch (arg->rxtx) {
3112 case MVXPE_SYSCTL_RX:
3113 mvxpe_rx_lockq(sc, arg->queue);
3114 rx = MVXPE_RX_RING(sc, arg->queue);
3115 val = rx->rx_queue_len;
3116 mvxpe_rx_unlockq(sc, arg->queue);
3117 break;
3118 case MVXPE_SYSCTL_TX:
3119 mvxpe_tx_lockq(sc, arg->queue);
3120 tx = MVXPE_TX_RING(sc, arg->queue);
3121 val = tx->tx_queue_len;
3122 mvxpe_tx_unlockq(sc, arg->queue);
3123 break;
3124 }
3125
3126 node.sysctl_data = &val;
3127 err = sysctl_lookup(SYSCTLFN_CALL(&node));
3128 if (err || newp == NULL) {
3129 mvxpe_sc_unlock(sc);
3130 return err;
3131 }
3132
3133 /* update queue length */
3134 if (val < 8 || val > MVXPE_RX_RING_CNT) {
3135 mvxpe_sc_unlock(sc);
3136 return EINVAL;
3137 }
3138 switch (arg->rxtx) {
3139 case MVXPE_SYSCTL_RX:
3140 mvxpe_rx_lockq(sc, arg->queue);
3141 rx->rx_queue_len = val;
3142 rx->rx_queue_th_received = rx->rx_queue_len / 4;
3143 rx->rx_queue_th_free = rx->rx_queue_len / 2;
3144
3145 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
3146 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
3147 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
3148
3149 mvxpe_rx_unlockq(sc, arg->queue);
3150 break;
3151 case MVXPE_SYSCTL_TX:
3152 mvxpe_tx_lockq(sc, arg->queue);
3153 tx->tx_queue_len = val;
3154 tx->tx_queue_th_free = tx->tx_queue_len / 2;
3155
3156 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
3157 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
3158 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
3159
3160 mvxpe_tx_unlockq(sc, arg->queue);
3161 break;
3162 }
3163 mvxpe_sc_unlock(sc);
3164
3165 return 0;
3166 }
3167
3168 STATIC int
3169 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
3170 {
3171 struct mvxpe_sysctl_queue *arg;
3172 struct mvxpe_rx_ring *rx = NULL;
3173 struct mvxpe_softc *sc;
3174 struct sysctlnode node;
3175 extern uint32_t mvTclk;
3176 uint32_t reg, time_mvtclk;
3177 int time_us;
3178 int err;
3179
3180 node = *rnode;
3181
3182 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
3183 if (arg == NULL)
3184 return EINVAL;
3185 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
3186 return EINVAL;
3187 if (arg->rxtx != MVXPE_SYSCTL_RX)
3188 return EINVAL;
3189
3190 sc = arg->sc;
3191 if (sc == NULL)
3192 return EINVAL;
3193
3194 /* read queue length */
3195 mvxpe_sc_lock(sc);
3196 mvxpe_rx_lockq(sc, arg->queue);
3197 rx = MVXPE_RX_RING(sc, arg->queue);
3198 time_mvtclk = rx->rx_queue_th_time;
3199 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
3200 node.sysctl_data = &time_us;
3201 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
3202 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
3203 err = sysctl_lookup(SYSCTLFN_CALL(&node));
3204 if (err || newp == NULL) {
3205 mvxpe_rx_unlockq(sc, arg->queue);
3206 mvxpe_sc_unlock(sc);
3207 return err;
3208 }
3209
3210 /* update queue length (0[sec] - 1[sec]) */
3211 if (time_us < 0 || time_us > (1000 * 1000)) {
3212 mvxpe_rx_unlockq(sc, arg->queue);
3213 mvxpe_sc_unlock(sc);
3214 return EINVAL;
3215 }
3216 time_mvtclk =
3217 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
3218 rx->rx_queue_th_time = time_mvtclk;
3219 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
3220 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
3221 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
3222 mvxpe_rx_unlockq(sc, arg->queue);
3223 mvxpe_sc_unlock(sc);
3224
3225 return 0;
3226 }
3227
3228
3229 STATIC void
3230 sysctl_mvxpe_init(struct mvxpe_softc *sc)
3231 {
3232 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3233 const struct sysctlnode *node;
3234 int mvxpe_nodenum;
3235 int mvxpe_mibnum;
3236 int mvxpe_rxqueuenum;
3237 int mvxpe_txqueuenum;
3238 int q, i;
3239
3240 /* hw.mvxpe.mvxpe[unit] */
3241 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3242 0, CTLTYPE_NODE, ifp->if_xname,
3243 SYSCTL_DESCR("mvxpe per-controller controls"),
3244 NULL, 0, NULL, 0,
3245 CTL_HW, mvxpe_root_num, CTL_CREATE,
3246 CTL_EOL) != 0) {
3247 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3248 return;
3249 }
3250 mvxpe_nodenum = node->sysctl_num;
3251
3252 /* hw.mvxpe.mvxpe[unit].mib */
3253 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3254 0, CTLTYPE_NODE, "mib",
3255 SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3256 NULL, 0, NULL, 0,
3257 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3258 CTL_EOL) != 0) {
3259 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3260 return;
3261 }
3262 mvxpe_mibnum = node->sysctl_num;
3263
3264 /* hw.mvxpe.mvxpe[unit].rx */
3265 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3266 0, CTLTYPE_NODE, "rx",
3267 SYSCTL_DESCR("Rx Queues"),
3268 NULL, 0, NULL, 0,
3269 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3270 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3271 return;
3272 }
3273 mvxpe_rxqueuenum = node->sysctl_num;
3274
3275 /* hw.mvxpe.mvxpe[unit].tx */
3276 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3277 0, CTLTYPE_NODE, "tx",
3278 SYSCTL_DESCR("Tx Queues"),
3279 NULL, 0, NULL, 0,
3280 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3281 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3282 return;
3283 }
3284 mvxpe_txqueuenum = node->sysctl_num;
3285
3286 #ifdef MVXPE_DEBUG
3287 /* hw.mvxpe.debug */
3288 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3289 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3290 SYSCTL_DESCR("mvgbe device driver debug control"),
3291 NULL, 0, &mvxpe_debug, 0,
3292 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3293 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3294 return;
3295 }
3296 #endif
3297 /*
3298 * MIB access
3299 */
3300 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3301 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3302 const char *name = mvxpe_mib_list[i].sysctl_name;
3303 const char *desc = mvxpe_mib_list[i].desc;
3304 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3305
3306 mib_arg->sc = sc;
3307 mib_arg->index = i;
3308 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3309 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3310 sysctl_read_mib, 0, (void *)mib_arg, 0,
3311 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3312 CTL_CREATE, CTL_EOL) != 0) {
3313 aprint_normal_dev(sc->sc_dev,
3314 "couldn't create sysctl node\n");
3315 break;
3316 }
3317 }
3318
3319 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3320 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3321 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3322 #define MVXPE_SYSCTL_NAME(num) "queue" # num
3323 static const char *sysctl_queue_names[] = {
3324 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3325 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3326 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3327 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3328 };
3329 #undef MVXPE_SYSCTL_NAME
3330 #ifdef SYSCTL_INCLUDE_DESCR
3331 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3332 static const char *sysctl_queue_descrs[] = {
3333 MVXPE_SYSCTL_DESC(0), MVXPE_SYSCTL_DESC(1),
3334 MVXPE_SYSCTL_DESC(2), MVXPE_SYSCTL_DESC(3),
3335 MVXPE_SYSCTL_DESC(4), MVXPE_SYSCTL_DESC(5),
3336 MVXPE_SYSCTL_DESC(6), MVXPE_SYSCTL_DESC(7),
3337 };
3338 #undef MVXPE_SYSCTL_DESCR
3339 #endif /* SYSCTL_INCLUDE_DESCR */
3340 int mvxpe_curnum;
3341
3342 rxarg->sc = txarg->sc = sc;
3343 rxarg->queue = txarg->queue = q;
3344 rxarg->rxtx = MVXPE_SYSCTL_RX;
3345 txarg->rxtx = MVXPE_SYSCTL_TX;
3346
3347 /* hw.mvxpe.mvxpe[unit].rx.[queue] */
3348 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3349 0, CTLTYPE_NODE,
3350 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3351 NULL, 0, NULL, 0,
3352 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3353 CTL_CREATE, CTL_EOL) != 0) {
3354 aprint_normal_dev(sc->sc_dev,
3355 "couldn't create sysctl node\n");
3356 break;
3357 }
3358 mvxpe_curnum = node->sysctl_num;
3359
3360 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3361 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3362 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3363 SYSCTL_DESCR("maximum length of the queue"),
3364 sysctl_set_queue_length, 0, (void *)rxarg, 0,
3365 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3366 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3367 aprint_normal_dev(sc->sc_dev,
3368 "couldn't create sysctl node\n");
3369 break;
3370 }
3371
3372 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3373 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3374 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3375 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3376 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3377 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3378 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3379 aprint_normal_dev(sc->sc_dev,
3380 "couldn't create sysctl node\n");
3381 break;
3382 }
3383
3384 /* hw.mvxpe.mvxpe[unit].tx.[queue] */
3385 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3386 0, CTLTYPE_NODE,
3387 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3388 NULL, 0, NULL, 0,
3389 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3390 CTL_CREATE, CTL_EOL) != 0) {
3391 aprint_normal_dev(sc->sc_dev,
3392 "couldn't create sysctl node\n");
3393 break;
3394 }
3395 mvxpe_curnum = node->sysctl_num;
3396
3397 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3398 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3399 CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3400 SYSCTL_DESCR("maximum length of the queue"),
3401 sysctl_set_queue_length, 0, (void *)txarg, 0,
3402 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3403 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3404 aprint_normal_dev(sc->sc_dev,
3405 "couldn't create sysctl node\n");
3406 break;
3407 }
3408 }
3409
3410 /* hw.mvxpe.mvxpe[unit].clear_mib */
3411 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3412 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3413 SYSCTL_DESCR("mvgbe device driver debug control"),
3414 sysctl_clear_mib, 0, (void *)sc, 0,
3415 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3416 CTL_EOL) != 0) {
3417 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3418 return;
3419 }
3420
3421 }
3422
3423 /*
3424 * MIB
3425 */
3426 STATIC void
3427 mvxpe_clear_mib(struct mvxpe_softc *sc)
3428 {
3429 int i;
3430
3431 KASSERT_SC_MTX(sc);
3432
3433 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3434 if (mvxpe_mib_list[i].reg64)
3435 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3436 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3437 sc->sc_sysctl_mib[i].counter = 0;
3438 }
3439 }
3440
3441 STATIC void
3442 mvxpe_update_mib(struct mvxpe_softc *sc)
3443 {
3444 int i;
3445
3446 KASSERT_SC_MTX(sc);
3447
3448 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3449 uint32_t val_hi;
3450 uint32_t val_lo;
3451
3452 if (mvxpe_mib_list[i].reg64) {
3453 /* XXX: implement bus_space_read_8() */
3454 val_lo = MVXPE_READ_MIB(sc,
3455 (mvxpe_mib_list[i].regnum + 4));
3456 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3457 }
3458 else {
3459 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3460 val_hi = 0;
3461 }
3462
3463 if ((val_lo | val_hi) == 0)
3464 continue;
3465
3466 sc->sc_sysctl_mib[i].counter +=
3467 ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3468 }
3469 }
3470
3471 /*
3472 * for Debug
3473 */
3474 STATIC void
3475 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3476 {
3477 #define DESC_PRINT(X) \
3478 if (X) \
3479 printf("txdesc[%d]." #X "=%#x\n", idx, X);
3480
3481 DESC_PRINT(desc->command);
3482 DESC_PRINT(desc->l4ichk);
3483 DESC_PRINT(desc->bytecnt);
3484 DESC_PRINT(desc->bufptr);
3485 DESC_PRINT(desc->flags);
3486 #undef DESC_PRINT
3487 }
3488
3489 STATIC void
3490 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3491 {
3492 #define DESC_PRINT(X) \
3493 if (X) \
3494 printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3495
3496 DESC_PRINT(desc->status);
3497 DESC_PRINT(desc->bytecnt);
3498 DESC_PRINT(desc->bufptr);
3499 DESC_PRINT(desc->l4chk);
3500 #undef DESC_PRINT
3501 }
3502