Home | History | Annotate | Line # | Download | only in dev
pq3etsec.c revision 1.16.16.3
      1  1.16.16.3  skrll /*	$NetBSD: pq3etsec.c,v 1.16.16.3 2016/07/09 20:24:55 skrll Exp $	*/
      2        1.2   matt /*-
      3        1.2   matt  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      4        1.2   matt  * All rights reserved.
      5        1.2   matt  *
      6        1.2   matt  * This code is derived from software contributed to The NetBSD Foundation
      7        1.2   matt  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      8        1.2   matt  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
      9        1.2   matt  *
     10        1.2   matt  * This material is based upon work supported by the Defense Advanced Research
     11        1.2   matt  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     12        1.2   matt  * Contract No. N66001-09-C-2073.
     13        1.2   matt  * Approved for Public Release, Distribution Unlimited
     14        1.2   matt  *
     15        1.2   matt  * Redistribution and use in source and binary forms, with or without
     16        1.2   matt  * modification, are permitted provided that the following conditions
     17        1.2   matt  * are met:
     18        1.2   matt  * 1. Redistributions of source code must retain the above copyright
     19        1.2   matt  *    notice, this list of conditions and the following disclaimer.
     20        1.2   matt  * 2. Redistributions in binary form must reproduce the above copyright
     21        1.2   matt  *    notice, this list of conditions and the following disclaimer in the
     22        1.2   matt  *    documentation and/or other materials provided with the distribution.
     23        1.2   matt  *
     24        1.2   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25        1.2   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26        1.2   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27        1.2   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28        1.2   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29        1.2   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30        1.2   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31        1.2   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32        1.2   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33        1.2   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34        1.2   matt  * POSSIBILITY OF SUCH DAMAGE.
     35        1.2   matt  */
     36        1.2   matt 
     37        1.2   matt #include "opt_inet.h"
     38       1.15   matt #include "opt_mpc85xx.h"
     39  1.16.16.1  skrll #include "opt_multiprocessor.h"
     40  1.16.16.1  skrll #include "opt_net_mpsafe.h"
     41        1.2   matt 
     42        1.2   matt #include <sys/cdefs.h>
     43        1.2   matt 
     44  1.16.16.3  skrll __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.16.16.3 2016/07/09 20:24:55 skrll Exp $");
     45        1.7   matt 
     46        1.2   matt #include <sys/param.h>
     47        1.2   matt #include <sys/cpu.h>
     48        1.2   matt #include <sys/device.h>
     49        1.2   matt #include <sys/mbuf.h>
     50        1.2   matt #include <sys/ioctl.h>
     51        1.2   matt #include <sys/intr.h>
     52        1.2   matt #include <sys/bus.h>
     53        1.2   matt #include <sys/kernel.h>
     54        1.2   matt #include <sys/kmem.h>
     55        1.2   matt #include <sys/proc.h>
     56        1.2   matt #include <sys/atomic.h>
     57        1.2   matt #include <sys/callout.h>
     58  1.16.16.1  skrll #include <sys/sysctl.h>
     59        1.2   matt 
     60        1.2   matt #include <net/if.h>
     61        1.2   matt #include <net/if_dl.h>
     62        1.2   matt #include <net/if_ether.h>
     63        1.2   matt #include <net/if_media.h>
     64        1.2   matt 
     65        1.2   matt #include <dev/mii/miivar.h>
     66        1.2   matt 
     67        1.2   matt #include <net/bpf.h>
     68        1.2   matt 
     69        1.2   matt #ifdef INET
     70        1.2   matt #include <netinet/in.h>
     71        1.2   matt #include <netinet/in_systm.h>
     72        1.2   matt #include <netinet/ip.h>
     73        1.2   matt #include <netinet/in_offload.h>
     74        1.2   matt #endif /* INET */
     75        1.2   matt #ifdef INET6
     76        1.2   matt #include <netinet6/in6.h>
     77        1.2   matt #include <netinet/ip6.h>
     78        1.2   matt #endif
     79        1.2   matt #include <netinet6/in6_offload.h>
     80        1.2   matt 
     81        1.2   matt #include <powerpc/spr.h>
     82        1.2   matt #include <powerpc/booke/spr.h>
     83        1.2   matt 
     84        1.2   matt #include <powerpc/booke/cpuvar.h>
     85        1.2   matt #include <powerpc/booke/e500var.h>
     86        1.2   matt #include <powerpc/booke/e500reg.h>
     87        1.2   matt #include <powerpc/booke/etsecreg.h>
     88        1.2   matt 
     89        1.2   matt #define	M_HASFCB		M_LINK2	/* tx packet has FCB prepended */
     90        1.2   matt 
     91        1.2   matt #define	ETSEC_MAXTXMBUFS	30
     92        1.2   matt #define	ETSEC_NTXSEGS		30
     93        1.2   matt #define	ETSEC_MAXRXMBUFS	511
     94        1.2   matt #define	ETSEC_MINRXMBUFS	32
     95        1.2   matt #define	ETSEC_NRXSEGS		1
     96        1.2   matt 
     97        1.2   matt #define	IFCAP_RCTRL_IPCSEN	IFCAP_CSUM_IPv4_Rx
     98        1.2   matt #define	IFCAP_RCTRL_TUCSEN	(IFCAP_CSUM_TCPv4_Rx\
     99        1.2   matt 				 |IFCAP_CSUM_UDPv4_Rx\
    100        1.2   matt 				 |IFCAP_CSUM_TCPv6_Rx\
    101        1.2   matt 				 |IFCAP_CSUM_UDPv6_Rx)
    102        1.2   matt 
    103        1.2   matt #define	IFCAP_TCTRL_IPCSEN	IFCAP_CSUM_IPv4_Tx
    104        1.2   matt #define	IFCAP_TCTRL_TUCSEN	(IFCAP_CSUM_TCPv4_Tx\
    105        1.2   matt 				 |IFCAP_CSUM_UDPv4_Tx\
    106        1.2   matt 				 |IFCAP_CSUM_TCPv6_Tx\
    107        1.2   matt 				 |IFCAP_CSUM_UDPv6_Tx)
    108        1.2   matt 
    109        1.2   matt #define	IFCAP_ETSEC		(IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\
    110        1.2   matt 				 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN)
    111        1.2   matt 
    112        1.2   matt #define	M_CSUM_IP	(M_CSUM_CIP|M_CSUM_CTU)
    113        1.2   matt #define	M_CSUM_IP6	(M_CSUM_TCPv6|M_CSUM_UDPv6)
    114        1.2   matt #define	M_CSUM_TUP	(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
    115        1.2   matt #define	M_CSUM_UDP	(M_CSUM_UDPv4|M_CSUM_UDPv6)
    116        1.2   matt #define	M_CSUM_IP4	(M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4)
    117        1.2   matt #define	M_CSUM_CIP	(M_CSUM_IPv4)
    118        1.2   matt #define	M_CSUM_CTU	(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
    119        1.2   matt 
    120        1.2   matt struct pq3etsec_txqueue {
    121        1.2   matt 	bus_dmamap_t txq_descmap;
    122        1.2   matt 	volatile struct txbd *txq_consumer;
    123        1.2   matt 	volatile struct txbd *txq_producer;
    124        1.2   matt 	volatile struct txbd *txq_first;
    125        1.2   matt 	volatile struct txbd *txq_last;
    126        1.2   matt 	struct ifqueue txq_mbufs;
    127        1.2   matt 	struct mbuf *txq_next;
    128        1.2   matt #ifdef ETSEC_DEBUG
    129        1.2   matt 	struct mbuf *txq_lmbufs[512];
    130        1.2   matt #endif
    131        1.2   matt 	uint32_t txq_qmask;
    132        1.2   matt 	uint32_t txq_free;
    133        1.2   matt 	uint32_t txq_threshold;
    134        1.2   matt 	uint32_t txq_lastintr;
    135        1.2   matt 	bus_size_t txq_reg_tbase;
    136        1.2   matt 	bus_dma_segment_t txq_descmap_seg;
    137        1.2   matt };
    138        1.2   matt 
    139        1.2   matt struct pq3etsec_rxqueue {
    140        1.2   matt 	bus_dmamap_t rxq_descmap;
    141        1.2   matt 	volatile struct rxbd *rxq_consumer;
    142        1.2   matt 	volatile struct rxbd *rxq_producer;
    143        1.2   matt 	volatile struct rxbd *rxq_first;
    144        1.2   matt 	volatile struct rxbd *rxq_last;
    145        1.2   matt 	struct mbuf *rxq_mhead;
    146        1.2   matt 	struct mbuf **rxq_mtail;
    147        1.2   matt 	struct mbuf *rxq_mconsumer;
    148        1.2   matt #ifdef ETSEC_DEBUG
    149        1.2   matt 	struct mbuf *rxq_mbufs[512];
    150        1.2   matt #endif
    151        1.2   matt 	uint32_t rxq_qmask;
    152        1.2   matt 	uint32_t rxq_inuse;
    153        1.2   matt 	uint32_t rxq_threshold;
    154        1.2   matt 	bus_size_t rxq_reg_rbase;
    155        1.2   matt 	bus_size_t rxq_reg_rbptr;
    156        1.2   matt 	bus_dma_segment_t rxq_descmap_seg;
    157        1.2   matt };
    158        1.2   matt 
    159        1.2   matt struct pq3etsec_mapcache {
    160        1.2   matt 	u_int dmc_nmaps;
    161        1.2   matt 	u_int dmc_maxseg;
    162        1.2   matt 	u_int dmc_maxmaps;
    163        1.2   matt 	u_int dmc_maxmapsize;
    164        1.2   matt 	bus_dmamap_t dmc_maps[0];
    165        1.2   matt };
    166        1.2   matt 
    167        1.2   matt struct pq3etsec_softc {
    168        1.2   matt 	device_t sc_dev;
    169       1.16   matt 	device_t sc_mdio_dev;
    170        1.2   matt 	struct ethercom sc_ec;
    171        1.2   matt #define sc_if		sc_ec.ec_if
    172        1.2   matt 	struct mii_data sc_mii;
    173        1.2   matt 	bus_space_tag_t sc_bst;
    174        1.2   matt 	bus_space_handle_t sc_bsh;
    175       1.15   matt 	bus_space_handle_t sc_mdio_bsh;
    176        1.2   matt 	bus_dma_tag_t sc_dmat;
    177        1.2   matt 	int sc_phy_addr;
    178        1.2   matt 	prop_dictionary_t sc_intrmap;
    179        1.2   matt 	uint32_t sc_intrmask;
    180        1.2   matt 
    181        1.2   matt 	uint32_t sc_soft_flags;
    182        1.2   matt #define	SOFT_RESET		0x0001
    183        1.2   matt #define	SOFT_RXINTR		0x0010
    184        1.2   matt #define	SOFT_RXBSY		0x0020
    185        1.2   matt #define	SOFT_TXINTR		0x0100
    186        1.2   matt #define	SOFT_TXERROR		0x0200
    187        1.2   matt 
    188        1.2   matt 	struct pq3etsec_txqueue sc_txq;
    189        1.2   matt 	struct pq3etsec_rxqueue sc_rxq;
    190        1.2   matt 	uint32_t sc_txerrors;
    191        1.2   matt 	uint32_t sc_rxerrors;
    192        1.2   matt 
    193        1.2   matt 	size_t sc_rx_adjlen;
    194        1.2   matt 
    195        1.2   matt 	/*
    196        1.2   matt 	 * Copies of various ETSEC registers.
    197        1.2   matt 	 */
    198        1.2   matt 	uint32_t sc_imask;
    199        1.2   matt 	uint32_t sc_maccfg1;
    200        1.2   matt 	uint32_t sc_maccfg2;
    201        1.2   matt 	uint32_t sc_maxfrm;
    202        1.2   matt 	uint32_t sc_ecntrl;
    203        1.2   matt 	uint32_t sc_dmactrl;
    204        1.2   matt 	uint32_t sc_macstnaddr1;
    205        1.2   matt 	uint32_t sc_macstnaddr2;
    206        1.2   matt 	uint32_t sc_tctrl;
    207        1.2   matt 	uint32_t sc_rctrl;
    208        1.2   matt 	uint32_t sc_gaddr[16];
    209        1.2   matt 	uint64_t sc_macaddrs[15];
    210        1.2   matt 
    211        1.2   matt 	void *sc_tx_ih;
    212        1.2   matt 	void *sc_rx_ih;
    213        1.2   matt 	void *sc_error_ih;
    214        1.2   matt 	void *sc_soft_ih;
    215        1.2   matt 
    216        1.2   matt 	kmutex_t *sc_lock;
    217  1.16.16.1  skrll 	kmutex_t *sc_hwlock;
    218        1.2   matt 
    219        1.2   matt 	struct evcnt sc_ev_tx_stall;
    220        1.2   matt 	struct evcnt sc_ev_tx_intr;
    221        1.2   matt 	struct evcnt sc_ev_rx_stall;
    222        1.2   matt 	struct evcnt sc_ev_rx_intr;
    223        1.2   matt 	struct evcnt sc_ev_error_intr;
    224        1.2   matt 	struct evcnt sc_ev_soft_intr;
    225        1.2   matt 	struct evcnt sc_ev_tx_pause;
    226        1.2   matt 	struct evcnt sc_ev_rx_pause;
    227        1.2   matt 	struct evcnt sc_ev_mii_ticks;
    228        1.2   matt 
    229        1.2   matt 	struct callout sc_mii_callout;
    230        1.2   matt 	uint64_t sc_mii_last_tick;
    231        1.2   matt 
    232        1.2   matt 	struct ifqueue sc_rx_bufcache;
    233        1.2   matt 	struct pq3etsec_mapcache *sc_rx_mapcache;
    234        1.2   matt 	struct pq3etsec_mapcache *sc_tx_mapcache;
    235  1.16.16.1  skrll 
    236  1.16.16.1  skrll 	/* Interrupt Coalescing parameters */
    237  1.16.16.1  skrll 	int sc_ic_rx_time;
    238  1.16.16.1  skrll 	int sc_ic_rx_count;
    239  1.16.16.1  skrll 	int sc_ic_tx_time;
    240  1.16.16.1  skrll 	int sc_ic_tx_count;
    241        1.2   matt };
    242        1.2   matt 
    243  1.16.16.1  skrll #define	ETSEC_IC_RX_ENABLED(sc)						\
    244  1.16.16.1  skrll 	((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0)
    245  1.16.16.1  skrll #define	ETSEC_IC_TX_ENABLED(sc)						\
    246  1.16.16.1  skrll 	((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0)
    247  1.16.16.1  skrll 
    248       1.16   matt struct pq3mdio_softc {
    249       1.16   matt 	device_t mdio_dev;
    250       1.16   matt 
    251       1.16   matt 	kmutex_t *mdio_lock;
    252       1.16   matt 
    253       1.16   matt 	bus_space_tag_t mdio_bst;
    254       1.16   matt 	bus_space_handle_t mdio_bsh;
    255       1.16   matt };
    256       1.16   matt 
    257        1.2   matt static int pq3etsec_match(device_t, cfdata_t, void *);
    258        1.2   matt static void pq3etsec_attach(device_t, device_t, void *);
    259        1.2   matt 
    260       1.16   matt static int pq3mdio_match(device_t, cfdata_t, void *);
    261       1.16   matt static void pq3mdio_attach(device_t, device_t, void *);
    262       1.16   matt 
    263        1.2   matt static void pq3etsec_ifstart(struct ifnet *);
    264        1.2   matt static void pq3etsec_ifwatchdog(struct ifnet *);
    265        1.2   matt static int pq3etsec_ifinit(struct ifnet *);
    266        1.2   matt static void pq3etsec_ifstop(struct ifnet *, int);
    267        1.2   matt static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
    268        1.2   matt 
    269        1.2   matt static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
    270       1.10   matt     struct pq3etsec_mapcache **, size_t, size_t, size_t);
    271        1.2   matt static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
    272        1.2   matt     struct pq3etsec_mapcache *);
    273        1.2   matt static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
    274        1.2   matt     struct pq3etsec_mapcache *);
    275        1.2   matt static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
    276        1.2   matt     struct pq3etsec_mapcache *, bus_dmamap_t);
    277        1.2   matt 
    278        1.2   matt static int pq3etsec_txq_attach(struct pq3etsec_softc *,
    279        1.2   matt     struct pq3etsec_txqueue *, u_int);
    280        1.2   matt static void pq3etsec_txq_purge(struct pq3etsec_softc *,
    281        1.2   matt     struct pq3etsec_txqueue *);
    282        1.2   matt static void pq3etsec_txq_reset(struct pq3etsec_softc *,
    283        1.2   matt     struct pq3etsec_txqueue *);
    284        1.2   matt static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
    285        1.2   matt     struct pq3etsec_txqueue *);
    286        1.2   matt static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
    287        1.2   matt     struct pq3etsec_txqueue *, struct mbuf *m);
    288        1.2   matt static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
    289        1.2   matt     struct pq3etsec_txqueue *);
    290        1.2   matt 
    291        1.2   matt static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
    292        1.2   matt     struct pq3etsec_rxqueue *, u_int);
    293        1.2   matt static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
    294        1.2   matt     struct pq3etsec_rxqueue *);
    295        1.2   matt static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
    296        1.2   matt     struct pq3etsec_rxqueue *, bool);
    297        1.2   matt static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
    298        1.2   matt     struct pq3etsec_rxqueue *);
    299        1.2   matt 
    300        1.2   matt static void pq3etsec_mc_setup(struct pq3etsec_softc *);
    301        1.2   matt 
    302        1.2   matt static void pq3etsec_mii_tick(void *);
    303        1.2   matt static int pq3etsec_rx_intr(void *);
    304        1.2   matt static int pq3etsec_tx_intr(void *);
    305        1.2   matt static int pq3etsec_error_intr(void *);
    306        1.2   matt static void pq3etsec_soft_intr(void *);
    307        1.2   matt 
    308  1.16.16.1  skrll static void pq3etsec_set_ic_rx(struct pq3etsec_softc *);
    309  1.16.16.1  skrll static void pq3etsec_set_ic_tx(struct pq3etsec_softc *);
    310  1.16.16.1  skrll 
    311  1.16.16.1  skrll static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *);
    312  1.16.16.1  skrll 
    313        1.2   matt CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
    314        1.2   matt     pq3etsec_match, pq3etsec_attach, NULL, NULL);
    315        1.2   matt 
    316       1.16   matt CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc),
    317       1.16   matt     pq3mdio_match, pq3mdio_attach, NULL, NULL);
    318       1.16   matt 
    319       1.16   matt CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc),
    320       1.16   matt     pq3mdio_match, pq3mdio_attach, NULL, NULL);
    321       1.16   matt 
    322       1.16   matt static inline uint32_t
    323       1.16   matt etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off)
    324        1.2   matt {
    325       1.16   matt 	return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off);
    326       1.16   matt }
    327        1.2   matt 
    328       1.16   matt static inline void
    329       1.16   matt etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data)
    330       1.16   matt {
    331       1.16   matt 	bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data);
    332        1.2   matt }
    333        1.2   matt 
    334        1.2   matt static inline uint32_t
    335        1.2   matt etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
    336        1.2   matt {
    337        1.2   matt 	return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
    338        1.2   matt }
    339        1.2   matt 
    340       1.16   matt static int
    341       1.16   matt pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
    342        1.2   matt {
    343       1.16   matt 	return strcmp(cf->cf_name, "mdio") == 0;
    344        1.2   matt }
    345        1.2   matt 
    346       1.16   matt static int
    347       1.16   matt pq3mdio_match(device_t parent, cfdata_t cf, void *aux)
    348       1.15   matt {
    349       1.16   matt 	const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
    350       1.16   matt 	const bool p1025_p = (svr == (SVR_P1025v1 >> 16)
    351       1.16   matt 	    || svr == (SVR_P1016v1 >> 16));
    352       1.16   matt 
    353       1.16   matt 	if (device_is_a(parent, "cpunode")) {
    354       1.16   matt 		if (!p1025_p
    355       1.16   matt 		    || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
    356       1.16   matt 			return 0;
    357       1.16   matt 
    358       1.16   matt 		return 1;
    359       1.16   matt 	}
    360       1.16   matt 
    361       1.16   matt 	if (device_is_a(parent, "tsec")) {
    362       1.16   matt 		if (p1025_p
    363       1.16   matt 		    || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
    364       1.16   matt 			return 0;
    365       1.16   matt 
    366       1.16   matt 		return 1;
    367       1.16   matt 	}
    368       1.16   matt 
    369       1.16   matt 	return 0;
    370       1.15   matt }
    371       1.15   matt 
    372       1.16   matt static void
    373       1.16   matt pq3mdio_attach(device_t parent, device_t self, void *aux)
    374       1.15   matt {
    375       1.16   matt 	struct pq3mdio_softc * const mdio = device_private(self);
    376       1.16   matt 	struct cpunode_attach_args * const cna = aux;
    377       1.16   matt 	struct cpunode_locators * const cnl = &cna->cna_locs;
    378       1.16   matt 
    379       1.16   matt 	mdio->mdio_dev = self;
    380       1.16   matt 	mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    381       1.16   matt 
    382       1.16   matt 	if (device_is_a(parent, "cpunode")) {
    383       1.16   matt 		struct cpunode_softc * const psc = device_private(parent);
    384       1.16   matt 		psc->sc_children |= cna->cna_childmask;
    385       1.16   matt 
    386       1.16   matt 		mdio->mdio_bst = cna->cna_memt;
    387       1.16   matt 		if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr,
    388       1.16   matt 				cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) {
    389       1.16   matt 			aprint_error(": error mapping registers @ %#x\n",
    390       1.16   matt 			    cnl->cnl_addr);
    391       1.16   matt 			return;
    392       1.16   matt 		}
    393       1.16   matt 	} else {
    394       1.16   matt 		struct pq3etsec_softc * const sc = device_private(parent);
    395       1.16   matt 
    396       1.16   matt 		KASSERT(device_is_a(parent, "tsec"));
    397       1.16   matt 		KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE
    398       1.16   matt 		    || cnl->cnl_addr == ETSEC2_BASE
    399       1.16   matt 		    || cnl->cnl_addr == ETSEC3_BASE
    400       1.16   matt 		    || cnl->cnl_addr == ETSEC4_BASE,
    401       1.16   matt 		    "unknown tsec addr %x", cnl->cnl_addr);
    402       1.16   matt 
    403       1.16   matt 		mdio->mdio_bst = sc->sc_bst;
    404       1.16   matt 		mdio->mdio_bsh = sc->sc_bsh;
    405       1.16   matt 	}
    406       1.16   matt 
    407       1.16   matt 	aprint_normal("\n");
    408       1.15   matt }
    409       1.15   matt 
    410        1.2   matt static int
    411       1.16   matt pq3mdio_mii_readreg(device_t self, int phy, int reg)
    412        1.2   matt {
    413       1.16   matt 	struct pq3mdio_softc * const mdio = device_private(self);
    414       1.16   matt 	uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
    415        1.2   matt 
    416       1.16   matt 	mutex_enter(mdio->mdio_lock);
    417        1.2   matt 
    418       1.16   matt 	etsec_mdio_write(mdio, MIIMADD,
    419        1.2   matt 	    __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
    420        1.2   matt 
    421       1.16   matt 	etsec_mdio_write(mdio, MIIMCOM, 0);	/* clear any past bits */
    422       1.16   matt 	etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ);
    423        1.2   matt 
    424       1.16   matt 	while (etsec_mdio_read(mdio, MIIMIND) != 0) {
    425        1.2   matt 			delay(1);
    426        1.2   matt 	}
    427       1.16   matt 	int data = etsec_mdio_read(mdio, MIIMSTAT);
    428        1.2   matt 
    429        1.2   matt 	if (miimcom == MIIMCOM_SCAN)
    430       1.16   matt 		etsec_mdio_write(mdio, MIIMCOM, miimcom);
    431        1.2   matt 
    432        1.2   matt #if 0
    433       1.16   matt 	aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
    434        1.2   matt 	    __func__, phy, reg, data);
    435        1.2   matt #endif
    436       1.16   matt 	mutex_exit(mdio->mdio_lock);
    437        1.2   matt 	return data;
    438        1.2   matt }
    439        1.2   matt 
    440        1.2   matt static void
    441       1.16   matt pq3mdio_mii_writereg(device_t self, int phy, int reg, int data)
    442        1.2   matt {
    443       1.16   matt 	struct pq3mdio_softc * const mdio = device_private(self);
    444       1.16   matt 	uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
    445        1.2   matt 
    446        1.2   matt #if 0
    447       1.16   matt 	aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
    448        1.2   matt 	    __func__, phy, reg, data);
    449        1.2   matt #endif
    450        1.2   matt 
    451       1.16   matt 	mutex_enter(mdio->mdio_lock);
    452       1.16   matt 
    453       1.16   matt 	etsec_mdio_write(mdio, MIIMADD,
    454        1.2   matt 	    __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
    455       1.16   matt 	etsec_mdio_write(mdio, MIIMCOM, 0);	/* clear any past bits */
    456       1.16   matt 	etsec_mdio_write(mdio, MIIMCON, data);
    457        1.2   matt 
    458        1.2   matt 	int timo = 1000;	/* 1ms */
    459       1.16   matt 	while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
    460        1.2   matt 			delay(1);
    461        1.2   matt 	}
    462        1.2   matt 
    463        1.2   matt 	if (miimcom == MIIMCOM_SCAN)
    464       1.16   matt 		etsec_mdio_write(mdio, MIIMCOM, miimcom);
    465       1.16   matt 
    466       1.16   matt 	mutex_exit(mdio->mdio_lock);
    467       1.16   matt }
    468       1.16   matt 
    469       1.16   matt static inline void
    470       1.16   matt etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
    471       1.16   matt {
    472       1.16   matt 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
    473        1.2   matt }
    474        1.2   matt 
    475        1.2   matt static void
    476       1.16   matt pq3etsec_mii_statchg(struct ifnet *ifp)
    477        1.2   matt {
    478       1.16   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
    479        1.2   matt 	struct mii_data * const mii = &sc->sc_mii;
    480        1.2   matt 
    481        1.2   matt 	uint32_t maccfg1 = sc->sc_maccfg1;
    482        1.2   matt 	uint32_t maccfg2 = sc->sc_maccfg2;
    483        1.2   matt 	uint32_t ecntrl = sc->sc_ecntrl;
    484        1.2   matt 
    485        1.2   matt 	maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW);
    486        1.2   matt 	maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD);
    487        1.2   matt 
    488        1.2   matt 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
    489        1.2   matt 		maccfg2 |= MACCFG2_FD;
    490        1.2   matt 	}
    491        1.2   matt 
    492        1.2   matt 	/*
    493        1.2   matt 	 * Now deal with the flow control bits.
    494        1.2   matt 	 */
    495        1.2   matt 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
    496        1.2   matt 	    && (mii->mii_media_active & IFM_ETH_FMASK)) {
    497        1.2   matt 		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
    498        1.2   matt 			maccfg1 |= MACCFG1_RX_FLOW;
    499        1.2   matt 		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
    500        1.2   matt 			maccfg1 |= MACCFG1_TX_FLOW;
    501        1.2   matt 	}
    502        1.2   matt 
    503        1.2   matt 	/*
    504        1.2   matt 	 * Now deal with the speed.
    505        1.2   matt 	 */
    506        1.2   matt 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
    507        1.2   matt 		maccfg2 |= MACCFG2_IFMODE_GMII;
    508        1.2   matt 	} else {
    509        1.2   matt 		maccfg2 |= MACCFG2_IFMODE_MII;
    510        1.2   matt 		ecntrl &= ~ECNTRL_R100M;
    511        1.2   matt 		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
    512        1.2   matt 			ecntrl |= ECNTRL_R100M;
    513        1.2   matt 		}
    514        1.2   matt 	}
    515        1.2   matt 
    516        1.2   matt 	/*
    517        1.2   matt 	 * If things are different, re-init things.
    518        1.2   matt 	 */
    519        1.2   matt 	if (maccfg1 != sc->sc_maccfg1
    520        1.2   matt 	    || maccfg2 != sc->sc_maccfg2
    521        1.2   matt 	    || ecntrl != sc->sc_ecntrl) {
    522        1.2   matt 		if (sc->sc_if.if_flags & IFF_RUNNING)
    523        1.2   matt 			atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
    524        1.2   matt 		sc->sc_maccfg1 = maccfg1;
    525        1.2   matt 		sc->sc_maccfg2 = maccfg2;
    526        1.2   matt 		sc->sc_ecntrl = ecntrl;
    527        1.2   matt 	}
    528        1.2   matt }
    529        1.2   matt 
    530        1.2   matt #if 0
    531        1.2   matt static void
    532        1.2   matt pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
    533        1.2   matt {
    534        1.2   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
    535        1.2   matt 
    536        1.2   matt 	mii_pollstat(&sc->sc_mii);
    537        1.2   matt 	ether_mediastatus(ifp, ifmr);
    538        1.2   matt         ifmr->ifm_status = sc->sc_mii.mii_media_status;
    539        1.2   matt         ifmr->ifm_active = sc->sc_mii.mii_media_active;
    540        1.2   matt }
    541        1.2   matt 
    542        1.2   matt static int
    543        1.2   matt pq3etsec_mediachange(struct ifnet *ifp)
    544        1.2   matt {
    545        1.2   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
    546        1.2   matt 
    547        1.2   matt 	if ((ifp->if_flags & IFF_UP) == 0)
    548        1.2   matt 		return 0;
    549        1.2   matt 
    550        1.2   matt 	int rv = mii_mediachg(&sc->sc_mii);
    551        1.2   matt 	return (rv == ENXIO) ? 0 : rv;
    552        1.2   matt }
    553        1.2   matt #endif
    554        1.2   matt 
    555       1.16   matt static int
    556       1.16   matt pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
    557       1.16   matt {
    558       1.15   matt 
    559       1.16   matt 	if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
    560       1.16   matt 		return 0;
    561       1.15   matt 
    562       1.16   matt 	return 1;
    563       1.15   matt }
    564       1.15   matt 
    565        1.2   matt static void
    566        1.2   matt pq3etsec_attach(device_t parent, device_t self, void *aux)
    567        1.2   matt {
    568        1.2   matt 	struct cpunode_softc * const psc = device_private(parent);
    569        1.2   matt 	struct pq3etsec_softc * const sc = device_private(self);
    570        1.2   matt 	struct cpunode_attach_args * const cna = aux;
    571        1.2   matt 	struct cpunode_locators * const cnl = &cna->cna_locs;
    572        1.5   matt 	cfdata_t cf = device_cfdata(self);
    573        1.2   matt 	int error;
    574        1.2   matt 
    575        1.2   matt 	psc->sc_children |= cna->cna_childmask;
    576        1.2   matt 	sc->sc_dev = self;
    577        1.2   matt 	sc->sc_bst = cna->cna_memt;
    578        1.2   matt 	sc->sc_dmat = &booke_bus_dma_tag;
    579        1.2   matt 
    580        1.2   matt 	/*
    581       1.16   matt 	 * Pull out the mdio bus and phy we are supposed to use.
    582        1.2   matt 	 */
    583       1.16   matt 	const int mdio = cf->cf_loc[CPUNODECF_MDIO];
    584       1.16   matt 	const int phy = cf->cf_loc[CPUNODECF_PHY];
    585       1.16   matt 	if (mdio != CPUNODECF_MDIO_DEFAULT)
    586       1.16   matt 		aprint_normal(" mdio %d", mdio);
    587        1.2   matt 
    588        1.2   matt 	/*
    589        1.2   matt 	 * See if the phy is in the config file...
    590        1.2   matt 	 */
    591       1.16   matt 	if (phy != CPUNODECF_PHY_DEFAULT) {
    592       1.16   matt 		sc->sc_phy_addr = phy;
    593        1.2   matt 	} else {
    594        1.2   matt 		unsigned char prop_name[20];
    595        1.2   matt 		snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
    596        1.2   matt 		    cnl->cnl_instance);
    597        1.2   matt 		sc->sc_phy_addr = board_info_get_number(prop_name);
    598        1.2   matt 	}
    599        1.9   matt 	if (sc->sc_phy_addr != MII_PHY_ANY)
    600        1.9   matt 		aprint_normal(" phy %d", sc->sc_phy_addr);
    601        1.2   matt 
    602        1.2   matt 	error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
    603        1.2   matt 	    &sc->sc_bsh);
    604        1.2   matt 	if (error) {
    605        1.2   matt 		aprint_error(": error mapping registers: %d\n", error);
    606        1.2   matt 		return;
    607        1.2   matt 	}
    608        1.2   matt 
    609        1.2   matt 	/*
    610        1.2   matt 	 * Assume firmware has aready set the mac address and fetch it
    611        1.2   matt 	 * before we reinit it.
    612        1.2   matt 	 */
    613        1.2   matt 	sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
    614        1.2   matt 	sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
    615        1.2   matt 	sc->sc_rctrl = RCTRL_DEFAULT;
    616       1.12   matt 	sc->sc_ecntrl = etsec_read(sc, ECNTRL);
    617       1.12   matt 	sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
    618       1.13   matt 	sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
    619        1.2   matt 
    620        1.2   matt 	if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
    621        1.2   matt 		size_t len;
    622        1.2   matt 		const uint8_t *mac_addr =
    623        1.2   matt 		    board_info_get_data("tsec-mac-addr-base", &len);
    624        1.2   matt 		KASSERT(len == ETHER_ADDR_LEN);
    625        1.2   matt 		sc->sc_macstnaddr2 =
    626        1.2   matt 		    (mac_addr[1] << 24)
    627        1.2   matt 		    | (mac_addr[0] << 16);
    628        1.2   matt 		sc->sc_macstnaddr1 =
    629        1.2   matt 		    ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
    630        1.2   matt 		    | (mac_addr[4] << 16)
    631        1.2   matt 		    | (mac_addr[3] << 8)
    632        1.2   matt 		    | (mac_addr[2] << 0);
    633        1.2   matt #if 0
    634        1.2   matt 		aprint_error(": mac-address unknown\n");
    635        1.2   matt 		return;
    636        1.2   matt #endif
    637        1.2   matt 	}
    638        1.2   matt 
    639  1.16.16.1  skrll 	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    640  1.16.16.1  skrll 	sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
    641  1.16.16.1  skrll 
    642  1.16.16.1  skrll 	callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
    643  1.16.16.1  skrll 	callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
    644  1.16.16.1  skrll 
    645  1.16.16.1  skrll 	/* Disable interrupts */
    646  1.16.16.1  skrll 	etsec_write(sc, IMASK, 0);
    647        1.2   matt 
    648        1.2   matt 	error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
    649        1.2   matt 	if (error) {
    650        1.2   matt 		aprint_error(": failed to init rxq: %d\n", error);
    651        1.2   matt 		return;
    652        1.2   matt 	}
    653        1.2   matt 
    654        1.2   matt 	error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
    655        1.2   matt 	if (error) {
    656        1.2   matt 		aprint_error(": failed to init txq: %d\n", error);
    657        1.2   matt 		return;
    658        1.2   matt 	}
    659        1.2   matt 
    660        1.2   matt 	error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
    661       1.10   matt 	    ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
    662        1.2   matt 	if (error) {
    663        1.2   matt 		aprint_error(": failed to allocate rx dmamaps: %d\n", error);
    664        1.2   matt 		return;
    665        1.2   matt 	}
    666        1.2   matt 
    667        1.2   matt 	error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
    668       1.10   matt 	    ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
    669        1.2   matt 	if (error) {
    670        1.2   matt 		aprint_error(": failed to allocate tx dmamaps: %d\n", error);
    671        1.2   matt 		return;
    672        1.2   matt 	}
    673        1.2   matt 
    674        1.2   matt 	sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
    675        1.2   matt 	    pq3etsec_tx_intr, sc);
    676        1.2   matt 	if (sc->sc_tx_ih == NULL) {
    677        1.2   matt 		aprint_error(": failed to establish tx interrupt: %d\n",
    678        1.2   matt 		    cnl->cnl_intrs[0]);
    679        1.2   matt 		return;
    680        1.2   matt 	}
    681        1.2   matt 
    682        1.2   matt 	sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
    683        1.2   matt 	    pq3etsec_rx_intr, sc);
    684        1.2   matt 	if (sc->sc_rx_ih == NULL) {
    685        1.2   matt 		aprint_error(": failed to establish rx interrupt: %d\n",
    686        1.2   matt 		    cnl->cnl_intrs[1]);
    687        1.2   matt 		return;
    688        1.2   matt 	}
    689        1.2   matt 
    690        1.2   matt 	sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
    691        1.2   matt 	    pq3etsec_error_intr, sc);
    692        1.2   matt 	if (sc->sc_error_ih == NULL) {
    693        1.2   matt 		aprint_error(": failed to establish error interrupt: %d\n",
    694        1.2   matt 		    cnl->cnl_intrs[2]);
    695        1.2   matt 		return;
    696        1.2   matt 	}
    697        1.2   matt 
    698  1.16.16.1  skrll 	int softint_flags = SOFTINT_NET;
    699  1.16.16.1  skrll #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE)
    700  1.16.16.1  skrll 	softint_flags |= SOFTINT_MPSAFE;
    701  1.16.16.1  skrll #endif	/* !MULTIPROCESSOR || NET_MPSAFE */
    702  1.16.16.1  skrll 	sc->sc_soft_ih = softint_establish(softint_flags,
    703        1.2   matt 	    pq3etsec_soft_intr, sc);
    704        1.2   matt 	if (sc->sc_soft_ih == NULL) {
    705        1.2   matt 		aprint_error(": failed to establish soft interrupt\n");
    706        1.2   matt 		return;
    707        1.2   matt 	}
    708        1.2   matt 
    709       1.16   matt 	/*
    710  1.16.16.1  skrll 	 * If there was no MDIO
    711       1.16   matt 	 */
    712       1.16   matt 	if (mdio == CPUNODECF_MDIO_DEFAULT) {
    713       1.16   matt 		aprint_normal("\n");
    714       1.16   matt 		cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna);
    715       1.16   matt 		if (mdio_cf != NULL) {
    716       1.16   matt 			sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL);
    717       1.16   matt 		}
    718       1.16   matt 	} else {
    719       1.16   matt 		sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio);
    720       1.16   matt 		if (sc->sc_mdio_dev == NULL) {
    721       1.16   matt 			aprint_error(": failed to locate mdio device\n");
    722       1.16   matt 			return;
    723       1.16   matt 		}
    724       1.16   matt 		aprint_normal("\n");
    725       1.16   matt 	}
    726        1.2   matt 
    727        1.4   matt 	etsec_write(sc, ATTR, ATTR_DEFAULT);
    728        1.4   matt 	etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
    729        1.4   matt 
    730  1.16.16.1  skrll 	/* Enable interrupt coalesing */
    731  1.16.16.1  skrll 	sc->sc_ic_rx_time = 768;
    732  1.16.16.1  skrll 	sc->sc_ic_rx_count = 16;
    733  1.16.16.1  skrll 	sc->sc_ic_tx_time = 768;
    734  1.16.16.1  skrll 	sc->sc_ic_tx_count = 16;
    735  1.16.16.1  skrll 	pq3etsec_set_ic_rx(sc);
    736  1.16.16.1  skrll 	pq3etsec_set_ic_tx(sc);
    737  1.16.16.1  skrll 	pq3etsec_sysctl_setup(NULL, sc);
    738        1.2   matt 
    739  1.16.16.1  skrll 	char enaddr[ETHER_ADDR_LEN] = {
    740  1.16.16.1  skrll 	    [0] = sc->sc_macstnaddr2 >> 16,
    741  1.16.16.1  skrll 	    [1] = sc->sc_macstnaddr2 >> 24,
    742  1.16.16.1  skrll 	    [2] = sc->sc_macstnaddr1 >>  0,
    743  1.16.16.1  skrll 	    [3] = sc->sc_macstnaddr1 >>  8,
    744  1.16.16.1  skrll 	    [4] = sc->sc_macstnaddr1 >> 16,
    745  1.16.16.1  skrll 	    [5] = sc->sc_macstnaddr1 >> 24,
    746  1.16.16.1  skrll 	};
    747        1.2   matt 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    748        1.2   matt 	   ether_sprintf(enaddr));
    749        1.2   matt 
    750        1.2   matt 	const char * const xname = device_xname(sc->sc_dev);
    751        1.2   matt 	struct ethercom * const ec = &sc->sc_ec;
    752        1.2   matt 	struct ifnet * const ifp = &ec->ec_if;
    753        1.2   matt 
    754        1.2   matt 	ec->ec_mii = &sc->sc_mii;
    755        1.2   matt 
    756        1.2   matt 	sc->sc_mii.mii_ifp = ifp;
    757       1.16   matt 	sc->sc_mii.mii_readreg = pq3mdio_mii_readreg;
    758       1.16   matt 	sc->sc_mii.mii_writereg = pq3mdio_mii_writereg;
    759        1.2   matt 	sc->sc_mii.mii_statchg = pq3etsec_mii_statchg;
    760        1.2   matt 
    761        1.2   matt 	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
    762        1.2   matt 	    ether_mediastatus);
    763        1.2   matt 
    764       1.16   matt 	if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) {
    765       1.16   matt 		mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff,
    766        1.3   matt 		    sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
    767        1.3   matt 
    768        1.3   matt 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    769        1.3   matt 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    770        1.3   matt 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
    771        1.3   matt 		} else {
    772        1.3   matt 			callout_schedule(&sc->sc_mii_callout, hz);
    773        1.3   matt 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
    774        1.3   matt 		}
    775        1.2   matt 	} else {
    776        1.3   matt 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
    777        1.3   matt 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
    778        1.2   matt 	}
    779        1.2   matt 
    780        1.2   matt 	ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
    781        1.2   matt 	    | ETHERCAP_JUMBO_MTU;
    782        1.2   matt 
    783        1.2   matt 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
    784        1.2   matt 	ifp->if_softc = sc;
    785        1.2   matt 	ifp->if_capabilities = IFCAP_ETSEC;
    786        1.2   matt 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    787        1.2   matt 	ifp->if_ioctl = pq3etsec_ifioctl;
    788        1.2   matt 	ifp->if_start = pq3etsec_ifstart;
    789        1.2   matt 	ifp->if_watchdog = pq3etsec_ifwatchdog;
    790        1.2   matt 	ifp->if_init = pq3etsec_ifinit;
    791        1.2   matt 	ifp->if_stop = pq3etsec_ifstop;
    792        1.2   matt 	IFQ_SET_READY(&ifp->if_snd);
    793        1.2   matt 
    794        1.2   matt 	pq3etsec_ifstop(ifp, true);
    795        1.2   matt 
    796        1.2   matt 	/*
    797        1.2   matt 	 * Attach the interface.
    798        1.2   matt 	 */
    799  1.16.16.1  skrll 	if_initialize(ifp);
    800        1.2   matt 	ether_ifattach(ifp, enaddr);
    801  1.16.16.1  skrll 	if_register(ifp);
    802        1.2   matt 
    803        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
    804        1.2   matt 	    NULL, xname, "rx stall");
    805        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
    806        1.2   matt 	    NULL, xname, "tx stall");
    807        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
    808        1.2   matt 	    NULL, xname, "tx intr");
    809        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
    810        1.2   matt 	    NULL, xname, "rx intr");
    811        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
    812        1.2   matt 	    NULL, xname, "error intr");
    813        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
    814        1.2   matt 	    NULL, xname, "soft intr");
    815        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
    816        1.2   matt 	    NULL, xname, "tx pause");
    817        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
    818        1.2   matt 	    NULL, xname, "rx pause");
    819        1.2   matt 	evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
    820        1.2   matt 	    NULL, xname, "mii ticks");
    821        1.2   matt }
    822        1.2   matt 
    823        1.2   matt static uint64_t
    824        1.2   matt pq3etsec_macaddr_create(const uint8_t *lladdr)
    825        1.2   matt {
    826        1.2   matt 	uint64_t macaddr = 0;
    827        1.2   matt 
    828        1.2   matt 	lladdr += ETHER_ADDR_LEN;
    829        1.2   matt 	for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
    830        1.2   matt 		macaddr = (macaddr << 8) | *--lladdr;
    831        1.2   matt 	}
    832        1.2   matt 	return macaddr << 16;
    833        1.2   matt }
    834        1.2   matt 
    835        1.2   matt static int
    836        1.2   matt pq3etsec_ifinit(struct ifnet *ifp)
    837        1.2   matt {
    838        1.2   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
    839        1.2   matt 	int error = 0;
    840        1.2   matt 
    841        1.2   matt 	sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
    842        1.2   matt 	if (ifp->if_mtu > ETHERMTU_JUMBO)
    843        1.2   matt 		return error;
    844        1.2   matt 
    845        1.2   matt 	KASSERT(ifp->if_flags & IFF_UP);
    846        1.2   matt 
    847        1.2   matt 	/*
    848        1.2   matt 	 * Stop the interface (steps 1 to 4 in the Soft Reset and
    849        1.2   matt 	 * Reconfigurating Procedure.
    850        1.2   matt 	 */
    851        1.2   matt 	pq3etsec_ifstop(ifp, 0);
    852        1.2   matt 
    853        1.2   matt 	/*
    854        1.2   matt 	 * If our frame size has changed (or it's our first time through)
    855        1.2   matt 	 * destroy the existing transmit mapcache.
    856        1.2   matt 	 */
    857        1.2   matt 	if (sc->sc_tx_mapcache != NULL
    858        1.2   matt 	    && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
    859        1.2   matt 		pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
    860        1.2   matt 		sc->sc_tx_mapcache = NULL;
    861        1.2   matt 	}
    862        1.2   matt 
    863        1.2   matt 	if (sc->sc_tx_mapcache == NULL) {
    864        1.2   matt 		error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
    865       1.10   matt 		    ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS);
    866        1.2   matt 		if (error)
    867        1.2   matt 			return error;
    868        1.2   matt 	}
    869        1.2   matt 
    870        1.2   matt 	sc->sc_ev_mii_ticks.ev_count++;
    871        1.2   matt 	mii_tick(&sc->sc_mii);
    872        1.2   matt 
    873        1.2   matt 	if (ifp->if_flags & IFF_PROMISC) {
    874        1.2   matt 		sc->sc_rctrl |= RCTRL_PROM;
    875        1.2   matt 	} else {
    876        1.2   matt 		sc->sc_rctrl &= ~RCTRL_PROM;
    877        1.2   matt 	}
    878        1.2   matt 
    879        1.2   matt 	uint32_t rctrl_prsdep = 0;
    880        1.2   matt 	sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP);
    881        1.2   matt 	if (VLAN_ATTACHED(&sc->sc_ec)) {
    882        1.2   matt 		sc->sc_rctrl |= RCTRL_VLEX;
    883        1.2   matt 		rctrl_prsdep = RCTRL_PRSDEP_L2;
    884        1.2   matt 	}
    885        1.2   matt 	if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
    886        1.2   matt 		sc->sc_rctrl |= RCTRL_IPCSEN;
    887        1.2   matt 		rctrl_prsdep = RCTRL_PRSDEP_L3;
    888        1.2   matt 	}
    889        1.2   matt 	if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
    890        1.2   matt 		sc->sc_rctrl |= RCTRL_TUCSEN;
    891        1.2   matt 		rctrl_prsdep = RCTRL_PRSDEP_L4;
    892        1.2   matt 	}
    893        1.2   matt 	sc->sc_rctrl |= rctrl_prsdep;
    894        1.2   matt #if 0
    895        1.2   matt 	if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP))
    896        1.2   matt 		aprint_normal_dev(sc->sc_dev,
    897        1.2   matt 		    "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
    898        1.2   matt 		    sc->sc_rctrl,
    899        1.2   matt 		    __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
    900        1.2   matt 		    __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
    901        1.2   matt 		    __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
    902        1.2   matt 		    __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
    903        1.2   matt #endif
    904        1.2   matt 
    905        1.2   matt 	sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS);
    906        1.2   matt 	if (VLAN_ATTACHED(&sc->sc_ec))		/* is this really true */
    907        1.2   matt 		sc->sc_tctrl |= TCTRL_VLINS;
    908        1.2   matt 	if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
    909        1.2   matt 		sc->sc_tctrl |= TCTRL_IPCSEN;
    910        1.2   matt 	if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
    911        1.2   matt 		sc->sc_tctrl |= TCTRL_TUCSEN;
    912        1.2   matt #if 0
    913        1.2   matt 	if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS))
    914        1.2   matt 		aprint_normal_dev(sc->sc_dev,
    915        1.2   matt 		    "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
    916        1.2   matt 		    sc->sc_tctrl,
    917        1.2   matt 		    __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
    918        1.2   matt 		    __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
    919        1.2   matt 		    __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
    920        1.2   matt #endif
    921        1.2   matt 
    922        1.2   matt 	sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN);
    923        1.2   matt 
    924        1.2   matt 	const uint64_t macstnaddr =
    925        1.2   matt 	    pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
    926        1.2   matt 
    927        1.2   matt 	sc->sc_imask = IEVENT_DPE;
    928        1.2   matt 
    929        1.2   matt 	/* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
    930        1.2   matt 	pq3etsec_rxq_reset(sc, &sc->sc_rxq);
    931        1.2   matt 	pq3etsec_rxq_produce(sc, &sc->sc_rxq);	/* fill with rx buffers */
    932        1.2   matt 
    933        1.2   matt 	/* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
    934        1.2   matt 	pq3etsec_txq_reset(sc, &sc->sc_txq);
    935        1.2   matt 
    936        1.2   matt 	/* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
    937        1.2   matt 	KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
    938        1.2   matt 	etsec_write(sc, MAXFRM, sc->sc_maxfrm);
    939        1.2   matt 	etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
    940        1.2   matt 	etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >>  0));
    941        1.2   matt 	etsec_write(sc, MACCFG1, sc->sc_maccfg1);
    942        1.2   matt 	etsec_write(sc, MACCFG2, sc->sc_maccfg2);
    943        1.2   matt 	etsec_write(sc, ECNTRL, sc->sc_ecntrl);
    944        1.2   matt 
    945        1.2   matt 	/* 8. Setup group address hash table (GADDR0-GADDR15) */
    946        1.2   matt 	pq3etsec_mc_setup(sc);
    947        1.2   matt 
    948        1.2   matt 	/* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
    949        1.2   matt 	etsec_write(sc, MRBLR, MCLBYTES);
    950        1.2   matt 
    951        1.2   matt 	/* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
    952        1.2   matt 	sc->sc_dmactrl |= DMACTRL_DEFAULT;
    953        1.2   matt 	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
    954        1.2   matt 
    955        1.2   matt 	/* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
    956        1.2   matt 	etsec_write(sc, TQUEUE, TQUEUE_EN0);
    957        1.2   matt 	sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC;
    958        1.2   matt 
    959        1.2   matt 	etsec_write(sc, TCTRL, sc->sc_tctrl);	/* for TOE stuff */
    960        1.2   matt 
    961        1.2   matt 	/* 12. Enable receive queues in RQUEUE, */
    962        1.2   matt 	etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0);
    963        1.2   matt 	sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC;
    964        1.2   matt 
    965        1.2   matt 	/*     and optionally set TOE functionality in RCTRL. */
    966        1.2   matt 	etsec_write(sc, RCTRL, sc->sc_rctrl);
    967        1.2   matt 	sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
    968        1.2   matt 	if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
    969        1.2   matt 		sc->sc_rx_adjlen += sizeof(struct rxfcb);
    970        1.2   matt 
    971        1.2   matt 	/* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
    972        1.2   matt 	etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
    973        1.2   matt 
    974        1.2   matt 	/* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
    975        1.2   matt 	etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
    976        1.2   matt 
    977        1.2   matt 	/* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
    978        1.2   matt 	sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS);
    979        1.2   matt 	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
    980        1.2   matt 
    981        1.2   matt 	/* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
    982        1.2   matt 	etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
    983        1.2   matt 	etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
    984        1.2   matt 
    985        1.2   matt 	sc->sc_soft_flags = 0;
    986        1.2   matt 
    987        1.2   matt 	etsec_write(sc, IMASK, sc->sc_imask);
    988        1.2   matt 
    989        1.2   matt 	ifp->if_flags |= IFF_RUNNING;
    990        1.2   matt 
    991        1.2   matt 	return error;
    992        1.2   matt }
    993        1.2   matt 
    994        1.2   matt static void
    995        1.2   matt pq3etsec_ifstop(struct ifnet *ifp, int disable)
    996        1.2   matt {
    997        1.2   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
    998        1.2   matt 
    999        1.2   matt 	KASSERT(!cpu_intr_p());
   1000        1.2   matt 	const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC;
   1001        1.2   matt 	/*
   1002        1.2   matt 	 * Clear the GTSC and GRSC from the interrupt mask until
   1003        1.2   matt 	 * we are ready for them.  Then clear them from IEVENT,
   1004        1.2   matt 	 * request the graceful shutdown, and then enable the
   1005        1.2   matt 	 * GTSC and GRSC bits in the mask.  This should cause the
   1006        1.2   matt 	 * error interrupt to fire which will issue a wakeup to
   1007        1.2   matt 	 * allow us to resume.
   1008        1.2   matt 	 */
   1009        1.2   matt 
   1010        1.2   matt 	/*
   1011        1.2   matt 	 * 1. Set GRS/GTS bits in DMACTRL register
   1012        1.2   matt 	 */
   1013        1.2   matt 	sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS;
   1014        1.2   matt 	etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
   1015        1.2   matt 	etsec_write(sc, IEVENT, imask_gsc_mask);
   1016        1.2   matt 	etsec_write(sc, DMACTRL, sc->sc_dmactrl);
   1017        1.2   matt 
   1018        1.2   matt 	if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) {
   1019        1.2   matt 		/*
   1020        1.2   matt 		 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
   1021        1.2   matt 		 */
   1022        1.2   matt 		etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
   1023        1.2   matt 
   1024        1.2   matt 		u_int timo = 1000;
   1025        1.2   matt 		uint32_t ievent = etsec_read(sc, IEVENT);
   1026        1.2   matt 		while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
   1027        1.2   matt 			if (--timo == 0) {
   1028        1.2   matt 				aprint_error_dev(sc->sc_dev,
   1029        1.2   matt 				    "WARNING: "
   1030        1.2   matt 				    "request to stop failed (IEVENT=%#x)\n",
   1031        1.2   matt 				    ievent);
   1032        1.2   matt 				break;
   1033        1.2   matt 			}
   1034        1.2   matt 			delay(10);
   1035        1.2   matt 			ievent = etsec_read(sc, IEVENT);
   1036        1.2   matt 		}
   1037        1.2   matt 	}
   1038        1.2   matt 
   1039        1.2   matt 	/*
   1040        1.2   matt 	 * Now reset the controller.
   1041        1.2   matt 	 *
   1042        1.2   matt 	 * 3. Set SOFT_RESET bit in MACCFG1 register
   1043        1.2   matt 	 * 4. Clear SOFT_RESET bit in MACCFG1 register
   1044        1.2   matt 	 */
   1045        1.2   matt 	etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
   1046        1.2   matt 	etsec_write(sc, MACCFG1, 0);
   1047        1.2   matt 	etsec_write(sc, IMASK, 0);
   1048        1.2   matt 	etsec_write(sc, IEVENT, ~0);
   1049        1.2   matt 	sc->sc_imask = 0;
   1050        1.2   matt 	ifp->if_flags &= ~IFF_RUNNING;
   1051        1.2   matt 
   1052        1.2   matt 	uint32_t tbipa = etsec_read(sc, TBIPA);
   1053        1.2   matt 	if (tbipa == sc->sc_phy_addr) {
   1054        1.2   matt 		aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
   1055        1.2   matt 		etsec_write(sc, TBIPA, 0x1f);
   1056        1.2   matt 	}
   1057        1.2   matt 	uint32_t miimcfg = etsec_read(sc, MIIMCFG);
   1058        1.2   matt 	etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
   1059        1.2   matt 	etsec_write(sc, MIIMCFG, miimcfg);
   1060        1.2   matt 
   1061        1.2   matt 	/*
   1062        1.2   matt 	 * Let's consume any remaing transmitted packets.  And if we are
   1063        1.2   matt 	 * disabling the interface, purge ourselves of any untransmitted
   1064        1.2   matt 	 * packets.  But don't consume any received packets, just drop them.
   1065        1.2   matt 	 * If we aren't disabling the interface, save the mbufs in the
   1066        1.2   matt 	 * receive queue for reuse.
   1067        1.2   matt 	 */
   1068        1.2   matt 	pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
   1069        1.2   matt 	pq3etsec_txq_consume(sc, &sc->sc_txq);
   1070        1.2   matt 	if (disable) {
   1071        1.2   matt 		pq3etsec_txq_purge(sc, &sc->sc_txq);
   1072  1.16.16.1  skrll 		IFQ_PURGE(&ifp->if_snd);
   1073        1.2   matt 	}
   1074        1.2   matt }
   1075        1.2   matt 
   1076        1.2   matt static void
   1077        1.2   matt pq3etsec_ifwatchdog(struct ifnet *ifp)
   1078        1.2   matt {
   1079        1.2   matt }
   1080        1.2   matt 
   1081        1.2   matt static void
   1082        1.2   matt pq3etsec_mc_setup(
   1083        1.2   matt 	struct pq3etsec_softc *sc)
   1084        1.2   matt {
   1085        1.2   matt 	struct ethercom * const ec = &sc->sc_ec;
   1086        1.2   matt 	struct ifnet * const ifp = &sc->sc_if;
   1087        1.2   matt 	struct ether_multi *enm;
   1088        1.2   matt 	struct ether_multistep step;
   1089        1.2   matt 	uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
   1090        1.2   matt 	const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
   1091        1.2   matt 
   1092        1.2   matt 	memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
   1093        1.2   matt 	memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
   1094        1.2   matt 
   1095        1.2   matt 	ifp->if_flags &= ~IFF_ALLMULTI;
   1096        1.2   matt 
   1097        1.2   matt 	ETHER_FIRST_MULTI(step, ec, enm);
   1098        1.2   matt 	for (u_int i = 0; enm != NULL; ) {
   1099        1.2   matt 		const char *addr = enm->enm_addrlo;
   1100        1.2   matt 		if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
   1101        1.2   matt 			ifp->if_flags |= IFF_ALLMULTI;
   1102        1.2   matt 			memset(gaddr, 0xff, 32 << (crc_shift & 1));
   1103        1.2   matt 			memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
   1104        1.2   matt 			break;
   1105        1.2   matt 		}
   1106        1.2   matt 		if ((sc->sc_rctrl & RCTRL_EMEN)
   1107        1.2   matt 		    && i < __arraycount(sc->sc_macaddrs)) {
   1108        1.2   matt 			sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
   1109        1.2   matt 		} else {
   1110        1.2   matt 			uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
   1111        1.2   matt #if 0
   1112        1.2   matt 			printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
   1113        1.2   matt 			    ether_sprintf(addr), crc,
   1114        1.2   matt 			    crc >> crc_shift,
   1115        1.2   matt 			    crc >> (crc_shift + 5),
   1116        1.2   matt 			    (crc >> crc_shift) & 31,
   1117        1.2   matt 			    1 << (((crc >> crc_shift) & 31) ^ 31));
   1118        1.2   matt #endif
   1119        1.2   matt 			/*
   1120        1.2   matt 			 * The documentation doesn't completely follow PowerPC
   1121        1.2   matt 			 * bit order.  The BE crc32 (H) for 01:00:5E:00:00:01
   1122        1.2   matt 			 * is 0x7fa32d9b.  By empirical testing, the
   1123        1.2   matt 			 * corresponding hash bit is word 3, bit 31 (ppc bit
   1124        1.2   matt 			 * order).  Since 3 << 31 | 31 is 0x7f, we deduce
   1125        1.2   matt 			 * H[0:2] selects the register while H[3:7] selects
   1126        1.2   matt 			 * the bit (ppc bit order).
   1127        1.2   matt 			 */
   1128        1.2   matt 			crc >>= crc_shift;
   1129        1.2   matt 			gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
   1130        1.2   matt 		}
   1131        1.2   matt 		ETHER_NEXT_MULTI(step, enm);
   1132        1.2   matt 	}
   1133        1.2   matt 	for (u_int i = 0; i < 8; i++) {
   1134        1.2   matt 		etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
   1135        1.2   matt 		etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
   1136        1.2   matt #if 0
   1137        1.2   matt 		if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
   1138        1.2   matt 		printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
   1139        1.2   matt 		    i, IGADDR(i), etsec_read(sc, IGADDR(i)),
   1140        1.2   matt 		    i, GADDR(i), etsec_read(sc, GADDR(i)));
   1141        1.2   matt #endif
   1142        1.2   matt 	}
   1143        1.2   matt 	for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
   1144        1.2   matt 		uint64_t macaddr = sc->sc_macaddrs[i];
   1145        1.2   matt 		etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
   1146        1.2   matt 		etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >>  0));
   1147        1.2   matt #if 0
   1148        1.2   matt 		if (macaddr)
   1149        1.2   matt 		printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
   1150        1.2   matt 		    i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
   1151        1.2   matt 		    i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
   1152        1.2   matt #endif
   1153        1.2   matt 	}
   1154        1.2   matt }
   1155        1.2   matt 
   1156        1.2   matt static int
   1157        1.2   matt pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
   1158        1.2   matt {
   1159        1.2   matt 	struct pq3etsec_softc *sc  = ifp->if_softc;
   1160        1.2   matt 	struct ifreq * const ifr = data;
   1161        1.2   matt 	const int s = splnet();
   1162        1.2   matt 	int error;
   1163        1.2   matt 
   1164        1.2   matt 	switch (cmd) {
   1165        1.2   matt 	case SIOCSIFMEDIA:
   1166        1.2   matt 	case SIOCGIFMEDIA:
   1167        1.2   matt 		/* Flow control requires full-duplex mode. */
   1168        1.2   matt 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   1169        1.2   matt 		    (ifr->ifr_media & IFM_FDX) == 0)
   1170        1.2   matt 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   1171        1.2   matt 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   1172        1.2   matt 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   1173        1.2   matt 				/* We can do both TXPAUSE and RXPAUSE. */
   1174        1.2   matt 				ifr->ifr_media |=
   1175        1.2   matt 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   1176        1.2   matt 			}
   1177        1.2   matt 		}
   1178        1.2   matt 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1179        1.2   matt 		break;
   1180        1.2   matt 
   1181        1.2   matt 	default:
   1182        1.2   matt 		error = ether_ioctl(ifp, cmd, data);
   1183        1.2   matt 		if (error != ENETRESET)
   1184        1.2   matt 			break;
   1185        1.2   matt 
   1186        1.2   matt 		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   1187        1.2   matt 			error = 0;
   1188        1.2   matt 			if (ifp->if_flags & IFF_RUNNING)
   1189        1.2   matt 				pq3etsec_mc_setup(sc);
   1190        1.2   matt 			break;
   1191        1.2   matt 		}
   1192        1.2   matt 		error = pq3etsec_ifinit(ifp);
   1193        1.2   matt 		break;
   1194        1.2   matt 	}
   1195        1.2   matt 
   1196        1.2   matt 	splx(s);
   1197        1.2   matt 	return error;
   1198        1.2   matt }
   1199        1.2   matt 
   1200        1.2   matt static void
   1201        1.2   matt pq3etsec_rxq_desc_presync(
   1202        1.2   matt 	struct pq3etsec_softc *sc,
   1203        1.2   matt 	struct pq3etsec_rxqueue *rxq,
   1204        1.2   matt 	volatile struct rxbd *rxbd,
   1205        1.2   matt 	size_t count)
   1206        1.2   matt {
   1207        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
   1208        1.2   matt 	    (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
   1209        1.2   matt 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1210        1.2   matt }
   1211        1.2   matt 
   1212        1.2   matt static void
   1213        1.2   matt pq3etsec_rxq_desc_postsync(
   1214        1.2   matt 	struct pq3etsec_softc *sc,
   1215        1.2   matt 	struct pq3etsec_rxqueue *rxq,
   1216        1.2   matt 	volatile struct rxbd *rxbd,
   1217        1.2   matt 	size_t count)
   1218        1.2   matt {
   1219        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
   1220        1.2   matt 	    (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
   1221        1.2   matt 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1222        1.2   matt }
   1223        1.2   matt 
   1224        1.2   matt static void
   1225        1.2   matt pq3etsec_txq_desc_presync(
   1226        1.2   matt 	struct pq3etsec_softc *sc,
   1227        1.2   matt 	struct pq3etsec_txqueue *txq,
   1228        1.2   matt 	volatile struct txbd *txbd,
   1229        1.2   matt 	size_t count)
   1230        1.2   matt {
   1231        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
   1232        1.2   matt 	    (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
   1233        1.2   matt 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1234        1.2   matt }
   1235        1.2   matt 
   1236        1.2   matt static void
   1237        1.2   matt pq3etsec_txq_desc_postsync(
   1238        1.2   matt 	struct pq3etsec_softc *sc,
   1239        1.2   matt 	struct pq3etsec_txqueue *txq,
   1240        1.2   matt 	volatile struct txbd *txbd,
   1241        1.2   matt 	size_t count)
   1242        1.2   matt {
   1243        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
   1244        1.2   matt 	    (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
   1245        1.2   matt 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1246        1.2   matt }
   1247        1.2   matt 
   1248        1.2   matt static bus_dmamap_t
   1249        1.2   matt pq3etsec_mapcache_get(
   1250        1.2   matt 	struct pq3etsec_softc *sc,
   1251        1.2   matt 	struct pq3etsec_mapcache *dmc)
   1252        1.2   matt {
   1253       1.10   matt 	KASSERT(dmc->dmc_nmaps > 0);
   1254        1.2   matt 	KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
   1255        1.2   matt 	return dmc->dmc_maps[--dmc->dmc_nmaps];
   1256        1.2   matt }
   1257        1.2   matt 
   1258        1.2   matt static void
   1259        1.2   matt pq3etsec_mapcache_put(
   1260        1.2   matt 	struct pq3etsec_softc *sc,
   1261        1.2   matt 	struct pq3etsec_mapcache *dmc,
   1262        1.2   matt 	bus_dmamap_t map)
   1263        1.2   matt {
   1264        1.2   matt 	KASSERT(map != NULL);
   1265        1.2   matt 	KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
   1266        1.2   matt 	dmc->dmc_maps[dmc->dmc_nmaps++] = map;
   1267        1.2   matt }
   1268        1.2   matt 
   1269        1.2   matt static void
   1270        1.2   matt pq3etsec_mapcache_destroy(
   1271        1.2   matt 	struct pq3etsec_softc *sc,
   1272        1.2   matt 	struct pq3etsec_mapcache *dmc)
   1273        1.2   matt {
   1274        1.2   matt 	const size_t dmc_size =
   1275        1.2   matt 	    offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
   1276        1.2   matt 
   1277        1.2   matt 	for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
   1278        1.2   matt 		bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
   1279        1.2   matt 	}
   1280       1.14   matt 	kmem_intr_free(dmc, dmc_size);
   1281        1.2   matt }
   1282        1.2   matt 
   1283        1.2   matt static int
   1284        1.2   matt pq3etsec_mapcache_create(
   1285        1.2   matt 	struct pq3etsec_softc *sc,
   1286        1.2   matt 	struct pq3etsec_mapcache **dmc_p,
   1287        1.2   matt 	size_t maxmaps,
   1288        1.2   matt 	size_t maxmapsize,
   1289        1.2   matt 	size_t maxseg)
   1290        1.2   matt {
   1291        1.2   matt 	const size_t dmc_size =
   1292        1.2   matt 	    offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
   1293       1.14   matt 	struct pq3etsec_mapcache * const dmc =
   1294       1.14   matt 		kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
   1295        1.2   matt 
   1296        1.2   matt 	dmc->dmc_maxmaps = maxmaps;
   1297       1.10   matt 	dmc->dmc_nmaps = maxmaps;
   1298        1.2   matt 	dmc->dmc_maxmapsize = maxmapsize;
   1299        1.2   matt 	dmc->dmc_maxseg = maxseg;
   1300        1.2   matt 
   1301       1.10   matt 	for (u_int i = 0; i < maxmaps; i++) {
   1302        1.2   matt 		int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
   1303        1.2   matt 		     dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
   1304        1.2   matt 		     BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
   1305        1.2   matt 		if (error) {
   1306        1.2   matt 			aprint_error_dev(sc->sc_dev,
   1307        1.2   matt 			    "failed to creat dma map cache "
   1308       1.10   matt 			    "entry %u of %zu: %d\n",
   1309       1.10   matt 			    i, maxmaps, error);
   1310        1.2   matt 			while (i-- > 0) {
   1311        1.2   matt 				bus_dmamap_destroy(sc->sc_dmat,
   1312        1.2   matt 				    dmc->dmc_maps[i]);
   1313        1.2   matt 			}
   1314       1.14   matt 			kmem_intr_free(dmc, dmc_size);
   1315        1.2   matt 			return error;
   1316        1.2   matt 		}
   1317        1.2   matt 		KASSERT(dmc->dmc_maps[i] != NULL);
   1318        1.2   matt 	}
   1319        1.2   matt 
   1320        1.2   matt 	*dmc_p = dmc;
   1321        1.2   matt 
   1322        1.2   matt 	return 0;
   1323        1.2   matt }
   1324        1.2   matt 
   1325        1.2   matt #if 0
   1326        1.2   matt static void
   1327        1.2   matt pq3etsec_dmamem_free(
   1328        1.2   matt 	bus_dma_tag_t dmat,
   1329        1.2   matt 	size_t map_size,
   1330        1.2   matt 	bus_dma_segment_t *seg,
   1331        1.2   matt 	bus_dmamap_t map,
   1332        1.2   matt 	void *kvap)
   1333        1.2   matt {
   1334        1.2   matt 	bus_dmamap_destroy(dmat, map);
   1335        1.2   matt 	bus_dmamem_unmap(dmat, kvap, map_size);
   1336        1.2   matt 	bus_dmamem_free(dmat, seg, 1);
   1337        1.2   matt }
   1338        1.2   matt #endif
   1339        1.2   matt 
   1340        1.2   matt static int
   1341        1.2   matt pq3etsec_dmamem_alloc(
   1342        1.2   matt 	bus_dma_tag_t dmat,
   1343        1.2   matt 	size_t map_size,
   1344        1.2   matt 	bus_dma_segment_t *seg,
   1345        1.2   matt 	bus_dmamap_t *map,
   1346        1.2   matt 	void **kvap)
   1347        1.2   matt {
   1348        1.2   matt 	int error;
   1349        1.2   matt 	int nseg;
   1350        1.2   matt 
   1351        1.2   matt 	*kvap = NULL;
   1352        1.2   matt 	*map = NULL;
   1353        1.2   matt 
   1354        1.2   matt 	error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
   1355        1.2   matt 	   seg, 1, &nseg, 0);
   1356        1.2   matt 	if (error)
   1357        1.2   matt 		return error;
   1358        1.2   matt 
   1359        1.2   matt 	KASSERT(nseg == 1);
   1360        1.2   matt 
   1361        1.2   matt 	error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
   1362        1.2   matt 	    BUS_DMA_COHERENT);
   1363        1.2   matt 	if (error == 0) {
   1364        1.2   matt 		error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
   1365        1.2   matt 		    map);
   1366        1.2   matt 		if (error == 0) {
   1367        1.2   matt 			error = bus_dmamap_load(dmat, *map, *kvap, map_size,
   1368        1.2   matt 			    NULL, 0);
   1369        1.2   matt 			if (error == 0)
   1370        1.2   matt 				return 0;
   1371        1.2   matt 			bus_dmamap_destroy(dmat, *map);
   1372        1.2   matt 			*map = NULL;
   1373        1.2   matt 		}
   1374        1.2   matt 		bus_dmamem_unmap(dmat, *kvap, map_size);
   1375        1.2   matt 		*kvap = NULL;
   1376        1.2   matt 	}
   1377        1.2   matt 	bus_dmamem_free(dmat, seg, nseg);
   1378        1.2   matt 	return 0;
   1379        1.2   matt }
   1380        1.2   matt 
   1381        1.2   matt static struct mbuf *
   1382        1.2   matt pq3etsec_rx_buf_alloc(
   1383        1.2   matt 	struct pq3etsec_softc *sc)
   1384        1.2   matt {
   1385        1.2   matt 	struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
   1386        1.2   matt 	if (m == NULL) {
   1387        1.2   matt 		printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
   1388        1.2   matt 		return NULL;
   1389        1.2   matt 	}
   1390        1.2   matt 	MCLGET(m, M_DONTWAIT);
   1391        1.2   matt 	if ((m->m_flags & M_EXT) == 0) {
   1392        1.2   matt 		printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
   1393        1.2   matt 		m_freem(m);
   1394        1.2   matt 		return NULL;
   1395        1.2   matt 	}
   1396        1.2   matt 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   1397        1.2   matt 
   1398        1.2   matt 	bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
   1399        1.2   matt 	if (map == NULL) {
   1400        1.2   matt 		printf("%s:%d: %s\n", __func__, __LINE__, "map get");
   1401        1.2   matt 		m_freem(m);
   1402        1.2   matt 		return NULL;
   1403        1.2   matt 	}
   1404        1.2   matt 	M_SETCTX(m, map);
   1405        1.2   matt 	m->m_len = m->m_pkthdr.len = MCLBYTES;
   1406        1.2   matt 	int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
   1407        1.2   matt 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   1408        1.2   matt 	if (error) {
   1409        1.2   matt 		aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
   1410        1.2   matt 		    error);
   1411        1.2   matt 		M_SETCTX(m, NULL);
   1412        1.2   matt 		m_freem(m);
   1413        1.2   matt 		pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
   1414        1.2   matt 		return NULL;
   1415        1.2   matt 	}
   1416        1.2   matt 	KASSERT(map->dm_mapsize == MCLBYTES);
   1417        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1418        1.2   matt 	    BUS_DMASYNC_PREREAD);
   1419        1.2   matt 
   1420        1.2   matt 	return m;
   1421        1.2   matt }
   1422        1.2   matt 
   1423        1.2   matt static void
   1424        1.2   matt pq3etsec_rx_map_unload(
   1425        1.2   matt 	struct pq3etsec_softc *sc,
   1426        1.2   matt 	struct mbuf *m)
   1427        1.2   matt {
   1428        1.2   matt 	KASSERT(m);
   1429        1.2   matt 	for (; m != NULL; m = m->m_next) {
   1430        1.2   matt 		bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
   1431        1.2   matt 		KASSERT(map);
   1432        1.2   matt 		KASSERT(map->dm_mapsize == MCLBYTES);
   1433        1.2   matt 		bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
   1434        1.2   matt 		    BUS_DMASYNC_POSTREAD);
   1435        1.2   matt 		bus_dmamap_unload(sc->sc_dmat, map);
   1436        1.2   matt 		pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
   1437        1.2   matt 		M_SETCTX(m, NULL);
   1438        1.2   matt 	}
   1439        1.2   matt }
   1440        1.2   matt 
   1441        1.2   matt static bool
   1442        1.2   matt pq3etsec_rxq_produce(
   1443        1.2   matt 	struct pq3etsec_softc *sc,
   1444        1.2   matt 	struct pq3etsec_rxqueue *rxq)
   1445        1.2   matt {
   1446        1.2   matt 	volatile struct rxbd *producer = rxq->rxq_producer;
   1447        1.2   matt #if 0
   1448        1.2   matt 	size_t inuse = rxq->rxq_inuse;
   1449        1.2   matt #endif
   1450        1.2   matt 	while (rxq->rxq_inuse < rxq->rxq_threshold) {
   1451        1.2   matt 		struct mbuf *m;
   1452        1.2   matt 		IF_DEQUEUE(&sc->sc_rx_bufcache, m);
   1453        1.2   matt 		if (m == NULL) {
   1454        1.2   matt 			m = pq3etsec_rx_buf_alloc(sc);
   1455        1.2   matt 			if (m == NULL) {
   1456        1.2   matt 				printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
   1457        1.2   matt 				break;
   1458        1.2   matt 			}
   1459        1.2   matt 		}
   1460        1.2   matt 		bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
   1461        1.2   matt 		KASSERT(map);
   1462        1.2   matt 
   1463        1.2   matt #ifdef ETSEC_DEBUG
   1464        1.2   matt 		KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
   1465        1.2   matt 		rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
   1466        1.2   matt #endif
   1467        1.2   matt 
   1468        1.2   matt 		/* rxbd_len is write-only by the ETSEC */
   1469        1.2   matt 		producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
   1470        1.2   matt 		membar_producer();
   1471        1.2   matt 		producer->rxbd_flags |= RXBD_E;
   1472        1.2   matt 		if (__predict_false(rxq->rxq_mhead == NULL)) {
   1473        1.2   matt 			KASSERT(producer == rxq->rxq_consumer);
   1474        1.2   matt 			rxq->rxq_mconsumer = m;
   1475        1.2   matt 		}
   1476        1.2   matt 		*rxq->rxq_mtail = m;
   1477        1.2   matt 		rxq->rxq_mtail = &m->m_next;
   1478        1.2   matt 		m->m_len = MCLBYTES;
   1479        1.2   matt 		m->m_next = NULL;
   1480        1.2   matt 		rxq->rxq_inuse++;
   1481        1.2   matt 		if (++producer == rxq->rxq_last) {
   1482        1.2   matt 			membar_producer();
   1483        1.2   matt 			pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
   1484        1.2   matt 			    rxq->rxq_last - rxq->rxq_producer);
   1485        1.2   matt 			producer = rxq->rxq_producer = rxq->rxq_first;
   1486        1.2   matt 		}
   1487        1.2   matt 	}
   1488        1.2   matt 	if (producer != rxq->rxq_producer) {
   1489        1.2   matt 		membar_producer();
   1490        1.2   matt 		pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
   1491        1.2   matt 		    producer - rxq->rxq_producer);
   1492        1.2   matt 		rxq->rxq_producer = producer;
   1493        1.2   matt 	}
   1494        1.2   matt 	uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
   1495        1.2   matt 	if (qhlt) {
   1496        1.2   matt 		KASSERT(qhlt & rxq->rxq_qmask);
   1497        1.2   matt 		sc->sc_ev_rx_stall.ev_count++;
   1498        1.2   matt 		etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
   1499        1.2   matt 	}
   1500        1.2   matt #if 0
   1501        1.2   matt 	aprint_normal_dev(sc->sc_dev,
   1502        1.2   matt 	    "%s: buffers inuse went from %zu to %zu\n",
   1503        1.2   matt 	    __func__, inuse, rxq->rxq_inuse);
   1504        1.2   matt #endif
   1505        1.2   matt 	return true;
   1506        1.2   matt }
   1507        1.2   matt 
   1508        1.2   matt static bool
   1509        1.2   matt pq3etsec_rx_offload(
   1510        1.2   matt 	struct pq3etsec_softc *sc,
   1511        1.2   matt 	struct mbuf *m,
   1512        1.2   matt 	const struct rxfcb *fcb)
   1513        1.2   matt {
   1514        1.2   matt 	if (fcb->rxfcb_flags & RXFCB_VLN) {
   1515        1.2   matt 		VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl,
   1516        1.2   matt 		    m_freem(m); return false);
   1517        1.2   matt 	}
   1518        1.2   matt 	if ((fcb->rxfcb_flags & RXFCB_IP) == 0
   1519        1.2   matt 	    || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0)
   1520        1.2   matt 		return true;
   1521        1.2   matt 	int csum_flags = 0;
   1522        1.2   matt 	if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) {
   1523        1.2   matt 		csum_flags |= M_CSUM_IPv4;
   1524        1.2   matt 		if (fcb->rxfcb_flags & RXFCB_EIP)
   1525        1.2   matt 			csum_flags |= M_CSUM_IPv4_BAD;
   1526        1.2   matt 	}
   1527        1.2   matt 	if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
   1528        1.2   matt 		int ipv_flags;
   1529        1.2   matt 		if (fcb->rxfcb_flags & RXFCB_IP6)
   1530        1.2   matt 			ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6;
   1531        1.2   matt 		else
   1532        1.2   matt 			ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4;
   1533        1.2   matt 		if (fcb->rxfcb_pro == IPPROTO_TCP) {
   1534        1.2   matt 			csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags;
   1535        1.2   matt 		} else {
   1536        1.2   matt 			csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags;
   1537        1.2   matt 		}
   1538        1.2   matt 		if (fcb->rxfcb_flags & RXFCB_ETU)
   1539        1.2   matt 			csum_flags |= M_CSUM_TCP_UDP_BAD;
   1540        1.2   matt 	}
   1541        1.2   matt 
   1542        1.2   matt 	m->m_pkthdr.csum_flags = csum_flags;
   1543        1.2   matt 	return true;
   1544        1.2   matt }
   1545        1.2   matt 
   1546        1.2   matt static void
   1547        1.2   matt pq3etsec_rx_input(
   1548        1.2   matt 	struct pq3etsec_softc *sc,
   1549        1.2   matt 	struct mbuf *m,
   1550        1.2   matt 	uint16_t rxbd_flags)
   1551        1.2   matt {
   1552        1.2   matt 	struct ifnet * const ifp = &sc->sc_if;
   1553        1.2   matt 
   1554        1.2   matt 	pq3etsec_rx_map_unload(sc, m);
   1555        1.2   matt 
   1556        1.2   matt 	if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
   1557        1.2   matt 		struct rxfcb fcb = *mtod(m, struct rxfcb *);
   1558        1.2   matt 		if (!pq3etsec_rx_offload(sc, m, &fcb))
   1559        1.2   matt 			return;
   1560        1.2   matt 	}
   1561        1.2   matt 	m_adj(m, sc->sc_rx_adjlen);
   1562        1.2   matt 
   1563        1.2   matt 	if (rxbd_flags & RXBD_M)
   1564        1.2   matt 		m->m_flags |= M_PROMISC;
   1565        1.2   matt 	if (rxbd_flags & RXBD_BC)
   1566        1.2   matt 		m->m_flags |= M_BCAST;
   1567        1.2   matt 	if (rxbd_flags & RXBD_MC)
   1568        1.2   matt 		m->m_flags |= M_MCAST;
   1569        1.2   matt 	m->m_flags |= M_HASFCS;
   1570  1.16.16.3  skrll 	m_set_rcvif(m, &sc->sc_if);
   1571        1.2   matt 
   1572        1.2   matt 	ifp->if_ipackets++;
   1573        1.2   matt 	ifp->if_ibytes += m->m_pkthdr.len;
   1574        1.2   matt 
   1575        1.2   matt 	/*
   1576        1.2   matt 	 * Let's give it to the network subsystm to deal with.
   1577        1.2   matt 	 */
   1578        1.2   matt 	int s = splnet();
   1579        1.2   matt 	bpf_mtap(ifp, m);
   1580  1.16.16.2  skrll 	if_input(ifp, m);
   1581        1.2   matt 	splx(s);
   1582        1.2   matt }
   1583        1.2   matt 
   1584        1.2   matt static void
   1585        1.2   matt pq3etsec_rxq_consume(
   1586        1.2   matt 	struct pq3etsec_softc *sc,
   1587        1.2   matt 	struct pq3etsec_rxqueue *rxq)
   1588        1.2   matt {
   1589        1.2   matt 	struct ifnet * const ifp = &sc->sc_if;
   1590        1.2   matt 	volatile struct rxbd *consumer = rxq->rxq_consumer;
   1591        1.2   matt 	size_t rxconsumed = 0;
   1592        1.2   matt 
   1593        1.2   matt 	etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
   1594        1.2   matt 
   1595        1.2   matt 	for (;;) {
   1596        1.2   matt 		if (consumer == rxq->rxq_producer) {
   1597        1.2   matt 			rxq->rxq_consumer = consumer;
   1598        1.2   matt 			rxq->rxq_inuse -= rxconsumed;
   1599        1.4   matt 			KASSERT(rxq->rxq_inuse == 0);
   1600        1.2   matt 			return;
   1601        1.2   matt 		}
   1602        1.2   matt 		pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
   1603        1.2   matt 		const uint16_t rxbd_flags = consumer->rxbd_flags;
   1604        1.2   matt 		if (rxbd_flags & RXBD_E) {
   1605        1.2   matt 			rxq->rxq_consumer = consumer;
   1606        1.2   matt 			rxq->rxq_inuse -= rxconsumed;
   1607        1.2   matt 			return;
   1608        1.2   matt 		}
   1609        1.2   matt 		KASSERT(rxq->rxq_mconsumer != NULL);
   1610        1.2   matt #ifdef ETSEC_DEBUG
   1611        1.2   matt 		KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
   1612        1.2   matt #endif
   1613        1.2   matt #if 0
   1614        1.2   matt 		printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
   1615        1.2   matt 		    __func__,
   1616        1.2   matt 		    consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
   1617        1.2   matt 		    mtod(rxq->rxq_mconsumer, int *)[0],
   1618        1.2   matt 		    mtod(rxq->rxq_mconsumer, int *)[1],
   1619        1.2   matt 		    mtod(rxq->rxq_mconsumer, int *)[2],
   1620        1.2   matt 		    mtod(rxq->rxq_mconsumer, int *)[3]);
   1621        1.2   matt #endif
   1622        1.2   matt 		/*
   1623        1.2   matt 		 * We own this packet again.  Clear all flags except wrap.
   1624        1.2   matt 		 */
   1625        1.2   matt 		rxconsumed++;
   1626        1.2   matt 		consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I);
   1627        1.2   matt 
   1628        1.2   matt 		/*
   1629        1.2   matt 		 * If this descriptor has the LAST bit set and no errors,
   1630        1.2   matt 		 * it's a valid input packet.
   1631        1.2   matt 		 */
   1632        1.2   matt 		if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) {
   1633        1.2   matt 			size_t rxbd_len = consumer->rxbd_len;
   1634        1.2   matt 			struct mbuf *m = rxq->rxq_mhead;
   1635        1.2   matt 			struct mbuf *m_last = rxq->rxq_mconsumer;
   1636        1.2   matt 			if ((rxq->rxq_mhead = m_last->m_next) == NULL)
   1637        1.2   matt 				rxq->rxq_mtail = &rxq->rxq_mhead;
   1638        1.2   matt 			rxq->rxq_mconsumer = rxq->rxq_mhead;
   1639        1.2   matt 			m_last->m_next = NULL;
   1640        1.2   matt 			m_last->m_len = rxbd_len & (MCLBYTES - 1);
   1641        1.2   matt 			m->m_pkthdr.len = rxbd_len;
   1642        1.2   matt 			pq3etsec_rx_input(sc, m, rxbd_flags);
   1643        1.2   matt 		} else if (rxbd_flags & RXBD_L) {
   1644        1.2   matt 			KASSERT(rxbd_flags & RXBD_ERRORS);
   1645        1.2   matt 			struct mbuf *m;
   1646        1.2   matt 			/*
   1647        1.2   matt 			 * We encountered an error, take the mbufs and add
   1648        1.2   matt 			 * then to the rx bufcache so we can reuse them.
   1649        1.2   matt 			 */
   1650        1.2   matt 			ifp->if_ierrors++;
   1651        1.2   matt 			for (m = rxq->rxq_mhead;
   1652        1.2   matt 			     m != rxq->rxq_mconsumer;
   1653        1.2   matt 			     m = m->m_next) {
   1654        1.2   matt 				IF_ENQUEUE(&sc->sc_rx_bufcache, m);
   1655        1.2   matt 			}
   1656        1.2   matt 			m = rxq->rxq_mconsumer;
   1657        1.2   matt 			if ((rxq->rxq_mhead = m->m_next) == NULL)
   1658        1.2   matt 				rxq->rxq_mtail = &rxq->rxq_mhead;
   1659        1.2   matt 			rxq->rxq_mconsumer = m->m_next;
   1660        1.2   matt 			IF_ENQUEUE(&sc->sc_rx_bufcache, m);
   1661        1.2   matt 		} else {
   1662        1.2   matt 			rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
   1663        1.2   matt 		}
   1664        1.2   matt #ifdef ETSEC_DEBUG
   1665        1.2   matt 		rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
   1666        1.2   matt #endif
   1667        1.2   matt 
   1668        1.2   matt 		/*
   1669        1.2   matt 		 * Wrap at the last entry!
   1670        1.2   matt 		 */
   1671        1.2   matt 		if (rxbd_flags & RXBD_W) {
   1672        1.2   matt 			KASSERT(consumer + 1 == rxq->rxq_last);
   1673        1.2   matt 			consumer = rxq->rxq_first;
   1674        1.2   matt 		} else {
   1675        1.2   matt 			consumer++;
   1676        1.2   matt 		}
   1677        1.2   matt #ifdef ETSEC_DEBUG
   1678        1.2   matt 		KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
   1679        1.2   matt #endif
   1680        1.2   matt 	}
   1681        1.2   matt }
   1682        1.2   matt 
   1683        1.2   matt static void
   1684        1.2   matt pq3etsec_rxq_purge(
   1685        1.2   matt 	struct pq3etsec_softc *sc,
   1686        1.2   matt 	struct pq3etsec_rxqueue *rxq,
   1687        1.2   matt 	bool discard)
   1688        1.2   matt {
   1689        1.2   matt 	struct mbuf *m;
   1690        1.2   matt 
   1691        1.2   matt 	if ((m = rxq->rxq_mhead) != NULL) {
   1692        1.2   matt #ifdef ETSEC_DEBUG
   1693        1.2   matt 		memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
   1694        1.2   matt #endif
   1695        1.2   matt 
   1696        1.2   matt 		if (discard) {
   1697        1.2   matt 			pq3etsec_rx_map_unload(sc, m);
   1698        1.2   matt 			m_freem(m);
   1699        1.2   matt 		} else {
   1700        1.2   matt 			while (m != NULL) {
   1701        1.2   matt 				struct mbuf *m0 = m->m_next;
   1702        1.2   matt 				m->m_next = NULL;
   1703        1.2   matt 				IF_ENQUEUE(&sc->sc_rx_bufcache, m);
   1704        1.2   matt 				m = m0;
   1705        1.2   matt 			}
   1706        1.2   matt 		}
   1707        1.2   matt 
   1708        1.2   matt 	}
   1709        1.2   matt 
   1710        1.2   matt 	rxq->rxq_mconsumer = NULL;
   1711        1.2   matt 	rxq->rxq_mhead = NULL;
   1712        1.2   matt 	rxq->rxq_mtail = &rxq->rxq_mhead;
   1713        1.2   matt 	rxq->rxq_inuse = 0;
   1714        1.2   matt }
   1715        1.2   matt 
   1716        1.2   matt static void
   1717        1.2   matt pq3etsec_rxq_reset(
   1718        1.2   matt 	struct pq3etsec_softc *sc,
   1719        1.2   matt 	struct pq3etsec_rxqueue *rxq)
   1720        1.2   matt {
   1721        1.2   matt 	/*
   1722        1.2   matt 	 * sync all the descriptors
   1723        1.2   matt 	 */
   1724        1.2   matt 	pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
   1725        1.2   matt 	    rxq->rxq_last - rxq->rxq_first);
   1726        1.2   matt 
   1727        1.2   matt 	/*
   1728        1.2   matt 	 * Make sure we own all descriptors in the ring.
   1729        1.2   matt 	 */
   1730        1.2   matt 	volatile struct rxbd *rxbd;
   1731        1.2   matt 	for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
   1732        1.2   matt 		rxbd->rxbd_flags = RXBD_I;
   1733        1.2   matt 	}
   1734        1.2   matt 
   1735        1.2   matt 	/*
   1736        1.2   matt 	 * Last descriptor has the wrap flag.
   1737        1.2   matt 	 */
   1738        1.2   matt 	rxbd->rxbd_flags = RXBD_W|RXBD_I;
   1739        1.2   matt 
   1740        1.2   matt 	/*
   1741        1.2   matt 	 * Reset the producer consumer indexes.
   1742        1.2   matt 	 */
   1743        1.2   matt 	rxq->rxq_consumer = rxq->rxq_first;
   1744        1.2   matt 	rxq->rxq_producer = rxq->rxq_first;
   1745        1.2   matt 	rxq->rxq_inuse = 0;
   1746        1.2   matt 	if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
   1747        1.2   matt 		rxq->rxq_threshold = ETSEC_MINRXMBUFS;
   1748        1.2   matt 
   1749        1.2   matt 	sc->sc_imask |= IEVENT_RXF|IEVENT_BSY;
   1750        1.2   matt 
   1751        1.2   matt 	/*
   1752        1.2   matt 	 * Restart the transmit at the first descriptor
   1753        1.2   matt 	 */
   1754        1.2   matt 	etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
   1755        1.2   matt }
   1756        1.2   matt 
   1757        1.2   matt static int
   1758        1.2   matt pq3etsec_rxq_attach(
   1759        1.2   matt 	struct pq3etsec_softc *sc,
   1760        1.2   matt 	struct pq3etsec_rxqueue *rxq,
   1761        1.2   matt 	u_int qno)
   1762        1.2   matt {
   1763        1.2   matt 	size_t map_size = PAGE_SIZE;
   1764        1.2   matt 	size_t desc_count = map_size / sizeof(struct rxbd);
   1765        1.2   matt 	int error;
   1766        1.2   matt 	void *descs;
   1767        1.2   matt 
   1768        1.2   matt 	error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
   1769        1.2   matt 	   &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
   1770        1.2   matt 	if (error)
   1771        1.2   matt 		return error;
   1772        1.2   matt 
   1773        1.2   matt 	memset(descs, 0, map_size);
   1774        1.2   matt 	rxq->rxq_first = descs;
   1775        1.2   matt 	rxq->rxq_last = rxq->rxq_first + desc_count;
   1776        1.2   matt 	rxq->rxq_consumer = descs;
   1777        1.2   matt 	rxq->rxq_producer = descs;
   1778        1.2   matt 
   1779        1.2   matt 	pq3etsec_rxq_purge(sc, rxq, true);
   1780        1.2   matt 	pq3etsec_rxq_reset(sc, rxq);
   1781        1.2   matt 
   1782        1.2   matt 	rxq->rxq_reg_rbase = RBASEn(qno);
   1783        1.2   matt 	rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
   1784        1.2   matt 
   1785        1.2   matt 	return 0;
   1786        1.2   matt }
   1787        1.2   matt 
   1788        1.2   matt static bool
   1789        1.2   matt pq3etsec_txq_active_p(
   1790        1.2   matt 	struct pq3etsec_softc * const sc,
   1791        1.2   matt 	struct pq3etsec_txqueue *txq)
   1792        1.2   matt {
   1793        1.2   matt 	return !IF_IS_EMPTY(&txq->txq_mbufs);
   1794        1.2   matt }
   1795        1.2   matt 
   1796        1.2   matt static bool
   1797        1.2   matt pq3etsec_txq_fillable_p(
   1798        1.2   matt 	struct pq3etsec_softc * const sc,
   1799        1.2   matt 	struct pq3etsec_txqueue *txq)
   1800        1.2   matt {
   1801        1.2   matt 	return txq->txq_free >= txq->txq_threshold;
   1802        1.2   matt }
   1803        1.2   matt 
   1804        1.2   matt static int
   1805        1.2   matt pq3etsec_txq_attach(
   1806        1.2   matt 	struct pq3etsec_softc *sc,
   1807        1.2   matt 	struct pq3etsec_txqueue *txq,
   1808        1.2   matt 	u_int qno)
   1809        1.2   matt {
   1810        1.2   matt 	size_t map_size = PAGE_SIZE;
   1811        1.2   matt 	size_t desc_count = map_size / sizeof(struct txbd);
   1812        1.2   matt 	int error;
   1813        1.2   matt 	void *descs;
   1814        1.2   matt 
   1815        1.2   matt 	error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
   1816        1.2   matt 	   &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
   1817        1.2   matt 	if (error)
   1818        1.2   matt 		return error;
   1819        1.2   matt 
   1820        1.2   matt 	memset(descs, 0, map_size);
   1821        1.2   matt 	txq->txq_first = descs;
   1822        1.2   matt 	txq->txq_last = txq->txq_first + desc_count;
   1823        1.2   matt 	txq->txq_consumer = descs;
   1824        1.2   matt 	txq->txq_producer = descs;
   1825        1.2   matt 
   1826        1.2   matt 	IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
   1827        1.2   matt 
   1828        1.2   matt 	txq->txq_reg_tbase = TBASEn(qno);
   1829        1.2   matt 	txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
   1830        1.2   matt 
   1831        1.2   matt 	pq3etsec_txq_reset(sc, txq);
   1832        1.2   matt 
   1833        1.2   matt 	return 0;
   1834        1.2   matt }
   1835        1.2   matt 
   1836        1.2   matt static int
   1837        1.2   matt pq3etsec_txq_map_load(
   1838        1.2   matt 	struct pq3etsec_softc *sc,
   1839        1.2   matt 	struct pq3etsec_txqueue *txq,
   1840        1.2   matt 	struct mbuf *m)
   1841        1.2   matt {
   1842        1.2   matt 	bus_dmamap_t map;
   1843        1.2   matt 	int error;
   1844        1.2   matt 
   1845        1.2   matt 	map = M_GETCTX(m, bus_dmamap_t);
   1846        1.2   matt 	if (map != NULL)
   1847        1.2   matt 		return 0;
   1848        1.2   matt 
   1849        1.2   matt 	map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
   1850        1.2   matt 	if (map == NULL)
   1851        1.2   matt 		return ENOMEM;
   1852        1.2   matt 
   1853        1.2   matt 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
   1854        1.2   matt 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1855        1.2   matt 	if (error)
   1856        1.2   matt 		return error;
   1857        1.2   matt 
   1858        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
   1859        1.2   matt 	    BUS_DMASYNC_PREWRITE);
   1860        1.2   matt 	M_SETCTX(m, map);
   1861        1.2   matt 	return 0;
   1862        1.2   matt }
   1863        1.2   matt 
   1864        1.2   matt static void
   1865        1.2   matt pq3etsec_txq_map_unload(
   1866        1.2   matt 	struct pq3etsec_softc *sc,
   1867        1.2   matt 	struct pq3etsec_txqueue *txq,
   1868        1.2   matt 	struct mbuf *m)
   1869        1.2   matt {
   1870        1.2   matt 	KASSERT(m);
   1871        1.2   matt 	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
   1872        1.2   matt 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   1873        1.2   matt 	    BUS_DMASYNC_POSTWRITE);
   1874        1.2   matt 	bus_dmamap_unload(sc->sc_dmat, map);
   1875        1.2   matt 	pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
   1876        1.2   matt }
   1877        1.2   matt 
   1878        1.2   matt static bool
   1879        1.2   matt pq3etsec_txq_produce(
   1880        1.2   matt 	struct pq3etsec_softc *sc,
   1881        1.2   matt 	struct pq3etsec_txqueue *txq,
   1882        1.2   matt 	struct mbuf *m)
   1883        1.2   matt {
   1884        1.2   matt 	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
   1885        1.2   matt 
   1886        1.2   matt 	if (map->dm_nsegs > txq->txq_free)
   1887        1.2   matt 		return false;
   1888        1.2   matt 
   1889        1.2   matt 	/*
   1890        1.2   matt 	 * TCP Offload flag must be set in the first descriptor.
   1891        1.2   matt 	 */
   1892        1.2   matt 	volatile struct txbd *producer = txq->txq_producer;
   1893        1.2   matt 	uint16_t last_flags = TXBD_L;
   1894        1.2   matt 	uint16_t first_flags = TXBD_R
   1895        1.2   matt 	    | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
   1896        1.2   matt 
   1897        1.2   matt 	/*
   1898        1.2   matt 	 * If we've produced enough descriptors without consuming any
   1899        1.2   matt 	 * we need to ask for an interrupt to reclaim some.
   1900        1.2   matt 	 */
   1901        1.2   matt 	txq->txq_lastintr += map->dm_nsegs;
   1902  1.16.16.1  skrll 	if (ETSEC_IC_TX_ENABLED(sc)
   1903  1.16.16.1  skrll 	    || txq->txq_lastintr >= txq->txq_threshold
   1904        1.2   matt 	    || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
   1905        1.2   matt 		txq->txq_lastintr = 0;
   1906        1.2   matt 		last_flags |= TXBD_I;
   1907        1.2   matt 	}
   1908        1.2   matt 
   1909        1.2   matt #ifdef ETSEC_DEBUG
   1910        1.2   matt 	KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
   1911        1.2   matt #endif
   1912        1.2   matt 	KASSERT(producer != txq->txq_last);
   1913        1.2   matt 	producer->txbd_bufptr = map->dm_segs[0].ds_addr;
   1914        1.2   matt 	producer->txbd_len = map->dm_segs[0].ds_len;
   1915        1.2   matt 
   1916        1.2   matt 	if (map->dm_nsegs > 1) {
   1917        1.2   matt 		volatile struct txbd *start = producer + 1;
   1918        1.2   matt 		size_t count = map->dm_nsegs - 1;
   1919        1.2   matt 		for (u_int i = 1; i < map->dm_nsegs; i++) {
   1920        1.2   matt 			if (__predict_false(++producer == txq->txq_last)) {
   1921        1.2   matt 				producer = txq->txq_first;
   1922        1.2   matt 				if (start < txq->txq_last) {
   1923        1.2   matt 					pq3etsec_txq_desc_presync(sc, txq,
   1924        1.2   matt 					    start, txq->txq_last - start);
   1925        1.2   matt 					count -= txq->txq_last - start;
   1926        1.2   matt 				}
   1927        1.2   matt 				start = txq->txq_first;
   1928        1.2   matt 			}
   1929        1.2   matt #ifdef ETSEC_DEBUG
   1930        1.2   matt 			KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
   1931        1.2   matt #endif
   1932        1.2   matt 			producer->txbd_bufptr = map->dm_segs[i].ds_addr;
   1933        1.2   matt 			producer->txbd_len = map->dm_segs[i].ds_len;
   1934        1.2   matt 			producer->txbd_flags = TXBD_R
   1935        1.2   matt 			    | (producer->txbd_flags & TXBD_W)
   1936        1.2   matt 			    | (i == map->dm_nsegs - 1 ? last_flags : 0);
   1937        1.2   matt #if 0
   1938        1.2   matt 			printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
   1939        1.2   matt 			    producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
   1940        1.2   matt #endif
   1941        1.2   matt 		}
   1942        1.2   matt 		pq3etsec_txq_desc_presync(sc, txq, start, count);
   1943        1.2   matt 	} else {
   1944        1.2   matt 		first_flags |= last_flags;
   1945        1.2   matt 	}
   1946        1.2   matt 
   1947        1.2   matt 	membar_producer();
   1948        1.2   matt 	txq->txq_producer->txbd_flags =
   1949        1.2   matt 	    first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
   1950        1.2   matt #if 0
   1951        1.2   matt 	printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
   1952        1.2   matt 	    txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
   1953        1.2   matt 	    txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
   1954        1.2   matt #endif
   1955        1.2   matt 	pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
   1956        1.2   matt 
   1957        1.2   matt 	/*
   1958        1.2   matt 	 * Reduce free count by the number of segments we consumed.
   1959        1.2   matt 	 */
   1960        1.2   matt 	txq->txq_free -= map->dm_nsegs;
   1961        1.2   matt 	KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
   1962        1.2   matt 	KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
   1963        1.2   matt 	KASSERT(producer->txbd_flags & TXBD_L);
   1964        1.2   matt #ifdef ETSEC_DEBUG
   1965        1.2   matt 	txq->txq_lmbufs[producer - txq->txq_first] = m;
   1966        1.2   matt #endif
   1967        1.2   matt 
   1968        1.2   matt #if 0
   1969        1.2   matt 	printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
   1970        1.2   matt 	    __func__, m, m->m_pkthdr.len, map->dm_nsegs,
   1971        1.2   matt 	    txq->txq_producer - txq->txq_first, producer - txq->txq_first);
   1972        1.2   matt #endif
   1973        1.2   matt 
   1974        1.2   matt 	if (++producer == txq->txq_last)
   1975        1.2   matt 		txq->txq_producer = txq->txq_first;
   1976        1.2   matt 	else
   1977        1.2   matt 		txq->txq_producer = producer;
   1978        1.2   matt 	IF_ENQUEUE(&txq->txq_mbufs, m);
   1979        1.2   matt 
   1980        1.2   matt 	/*
   1981        1.2   matt 	 * Restart the transmitter.
   1982        1.2   matt 	 */
   1983        1.2   matt 	etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT);	/* W1C */
   1984        1.2   matt 
   1985        1.2   matt 	return true;
   1986        1.2   matt }
   1987        1.2   matt 
   1988        1.2   matt static void
   1989        1.2   matt pq3etsec_tx_offload(
   1990        1.2   matt 	struct pq3etsec_softc *sc,
   1991        1.2   matt 	struct pq3etsec_txqueue *txq,
   1992        1.2   matt 	struct mbuf **mp)
   1993        1.2   matt {
   1994        1.2   matt 	struct mbuf *m = *mp;
   1995        1.2   matt 	u_int csum_flags = m->m_pkthdr.csum_flags;
   1996        1.2   matt 	struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m);
   1997        1.2   matt 
   1998        1.2   matt 	KASSERT(m->m_flags & M_PKTHDR);
   1999        1.2   matt 
   2000        1.2   matt 	/*
   2001        1.2   matt 	 * Let see if we are doing any offload first.
   2002        1.2   matt 	 */
   2003        1.2   matt 	if (csum_flags == 0 && vtag == 0) {
   2004        1.2   matt 		m->m_flags &= ~M_HASFCB;
   2005        1.2   matt 		return;
   2006        1.2   matt 	}
   2007        1.2   matt 
   2008        1.2   matt 	uint16_t flags = 0;
   2009        1.2   matt 	if (csum_flags & M_CSUM_IP) {
   2010        1.2   matt 		flags |= TXFCB_IP
   2011        1.2   matt 		    | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
   2012        1.2   matt 		    | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
   2013        1.2   matt 		    | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
   2014        1.2   matt 		    | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
   2015        1.2   matt 		    | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
   2016        1.2   matt 	}
   2017        1.2   matt 	if (vtag) {
   2018        1.2   matt 		flags |= TXFCB_VLN;
   2019        1.2   matt 	}
   2020        1.2   matt 	if (flags == 0) {
   2021        1.2   matt 		m->m_flags &= ~M_HASFCB;
   2022        1.2   matt 		return;
   2023        1.2   matt 	}
   2024        1.2   matt 
   2025        1.2   matt 	struct txfcb fcb;
   2026        1.2   matt 	fcb.txfcb_flags = flags;
   2027        1.2   matt 	if (csum_flags & M_CSUM_IPv4)
   2028        1.2   matt 		fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
   2029        1.2   matt 	else
   2030        1.2   matt 		fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
   2031        1.2   matt 	fcb.txfcb_l3os = ETHER_HDR_LEN;
   2032        1.2   matt 	fcb.txfcb_phcs = 0;
   2033        1.2   matt 	fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0;
   2034        1.2   matt 
   2035        1.2   matt #if 0
   2036        1.2   matt 	printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
   2037        1.2   matt 	    __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
   2038        1.2   matt 	    fcb.txfcb_phcs, fcb.txfcb_vlctl);
   2039        1.2   matt #endif
   2040        1.2   matt 
   2041        1.2   matt 	if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
   2042        1.2   matt 		m->m_data -= sizeof(fcb);
   2043        1.2   matt 		m->m_len += sizeof(fcb);
   2044        1.2   matt 	} else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
   2045        1.2   matt 		memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
   2046        1.2   matt 		m->m_data = m->m_pktdat;
   2047        1.2   matt 		m->m_len += sizeof(fcb);
   2048        1.2   matt 	} else {
   2049        1.2   matt 		struct mbuf *mn;
   2050        1.2   matt 		MGET(mn, M_DONTWAIT, m->m_type);
   2051        1.2   matt 		if (mn == NULL) {
   2052        1.2   matt 			if (csum_flags & M_CSUM_IP4) {
   2053        1.2   matt #ifdef INET
   2054        1.2   matt 				ip_undefer_csum(m, ETHER_HDR_LEN,
   2055        1.2   matt 				    csum_flags & M_CSUM_IP4);
   2056        1.2   matt #else
   2057        1.2   matt 				panic("%s: impossible M_CSUM flags %#x",
   2058        1.2   matt 				    device_xname(sc->sc_dev), csum_flags);
   2059        1.2   matt #endif
   2060        1.2   matt 			} else if (csum_flags & M_CSUM_IP6) {
   2061        1.2   matt #ifdef INET6
   2062        1.2   matt 				ip6_undefer_csum(m, ETHER_HDR_LEN,
   2063        1.2   matt 				    csum_flags & M_CSUM_IP6);
   2064        1.2   matt #else
   2065        1.2   matt 				panic("%s: impossible M_CSUM flags %#x",
   2066        1.2   matt 				    device_xname(sc->sc_dev), csum_flags);
   2067        1.2   matt #endif
   2068        1.2   matt 			} else if (vtag) {
   2069        1.2   matt 			}
   2070        1.2   matt 
   2071        1.2   matt 			m->m_flags &= ~M_HASFCB;
   2072        1.2   matt 			return;
   2073        1.2   matt 		}
   2074        1.2   matt 
   2075        1.2   matt 		M_MOVE_PKTHDR(mn, m);
   2076        1.2   matt 		mn->m_next = m;
   2077        1.2   matt 		m = mn;
   2078        1.2   matt 		MH_ALIGN(m, sizeof(fcb));
   2079        1.2   matt 		m->m_len = sizeof(fcb);
   2080        1.2   matt 		*mp = m;
   2081        1.2   matt 	}
   2082        1.2   matt 	m->m_pkthdr.len += sizeof(fcb);
   2083        1.2   matt 	m->m_flags |= M_HASFCB;
   2084        1.2   matt 	*mtod(m, struct txfcb *) = fcb;
   2085        1.2   matt 	return;
   2086        1.2   matt }
   2087        1.2   matt 
   2088        1.2   matt static bool
   2089        1.2   matt pq3etsec_txq_enqueue(
   2090        1.2   matt 	struct pq3etsec_softc *sc,
   2091        1.2   matt 	struct pq3etsec_txqueue *txq)
   2092        1.2   matt {
   2093        1.2   matt 	for (;;) {
   2094        1.2   matt 		if (IF_QFULL(&txq->txq_mbufs))
   2095        1.2   matt 			return false;
   2096        1.2   matt 		struct mbuf *m = txq->txq_next;
   2097        1.2   matt 		if (m == NULL) {
   2098        1.2   matt 			int s = splnet();
   2099  1.16.16.1  skrll 			IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
   2100        1.2   matt 			splx(s);
   2101        1.2   matt 			if (m == NULL)
   2102        1.2   matt 				return true;
   2103        1.2   matt 			M_SETCTX(m, NULL);
   2104        1.2   matt 			pq3etsec_tx_offload(sc, txq, &m);
   2105        1.2   matt 		} else {
   2106        1.2   matt 			txq->txq_next = NULL;
   2107        1.2   matt 		}
   2108        1.2   matt 		int error = pq3etsec_txq_map_load(sc, txq, m);
   2109        1.2   matt 		if (error) {
   2110        1.2   matt 			aprint_error_dev(sc->sc_dev,
   2111        1.2   matt 			    "discarded packet due to "
   2112        1.2   matt 			    "dmamap load failure: %d\n", error);
   2113        1.2   matt 			m_freem(m);
   2114        1.2   matt 			continue;
   2115        1.2   matt 		}
   2116        1.2   matt 		KASSERT(txq->txq_next == NULL);
   2117        1.2   matt 		if (!pq3etsec_txq_produce(sc, txq, m)) {
   2118        1.2   matt 			txq->txq_next = m;
   2119        1.2   matt 			return false;
   2120        1.2   matt 		}
   2121        1.2   matt 		KASSERT(txq->txq_next == NULL);
   2122        1.2   matt 	}
   2123        1.2   matt }
   2124        1.2   matt 
   2125        1.2   matt static bool
   2126        1.2   matt pq3etsec_txq_consume(
   2127        1.2   matt 	struct pq3etsec_softc *sc,
   2128        1.2   matt 	struct pq3etsec_txqueue *txq)
   2129        1.2   matt {
   2130        1.2   matt 	struct ifnet * const ifp = &sc->sc_if;
   2131        1.2   matt 	volatile struct txbd *consumer = txq->txq_consumer;
   2132        1.2   matt 	size_t txfree = 0;
   2133        1.2   matt 
   2134        1.2   matt #if 0
   2135        1.2   matt 	printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
   2136        1.2   matt #endif
   2137        1.2   matt 	etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
   2138        1.2   matt 
   2139        1.2   matt 	for (;;) {
   2140        1.2   matt 		if (consumer == txq->txq_producer) {
   2141        1.2   matt 			txq->txq_consumer = consumer;
   2142        1.2   matt 			txq->txq_free += txfree;
   2143        1.2   matt 			txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
   2144        1.2   matt #if 0
   2145        1.2   matt 			printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
   2146        1.2   matt 			    __func__, txfree, txq->txq_free - txfree, txq->txq_free);
   2147        1.2   matt #endif
   2148        1.2   matt 			KASSERT(txq->txq_lastintr == 0);
   2149        1.2   matt 			KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
   2150        1.2   matt 			return true;
   2151        1.2   matt 		}
   2152        1.2   matt 		pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
   2153        1.2   matt 		const uint16_t txbd_flags = consumer->txbd_flags;
   2154        1.2   matt 		if (txbd_flags & TXBD_R) {
   2155        1.2   matt 			txq->txq_consumer = consumer;
   2156        1.2   matt 			txq->txq_free += txfree;
   2157        1.2   matt 			txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
   2158        1.2   matt #if 0
   2159        1.2   matt 			printf("%s: freed %zu descriptors\n",
   2160        1.2   matt 			    __func__, txfree);
   2161        1.2   matt #endif
   2162        1.2   matt 			return pq3etsec_txq_fillable_p(sc, txq);
   2163        1.2   matt 		}
   2164        1.2   matt 
   2165        1.2   matt 		/*
   2166        1.2   matt 		 * If this is the last descriptor in the chain, get the
   2167        1.2   matt 		 * mbuf, free its dmamap, and free the mbuf chain itself.
   2168        1.2   matt 		 */
   2169        1.2   matt 		if (txbd_flags & TXBD_L) {
   2170        1.2   matt 			struct mbuf *m;
   2171        1.2   matt 
   2172        1.2   matt 			IF_DEQUEUE(&txq->txq_mbufs, m);
   2173        1.2   matt #ifdef ETSEC_DEBUG
   2174        1.8    jym 			KASSERTMSG(
   2175        1.8    jym 			    m == txq->txq_lmbufs[consumer-txq->txq_first],
   2176        1.8    jym 			    "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
   2177        1.8    jym 			    __func__, consumer, consumer - txq->txq_first,
   2178        1.8    jym 			    txbd_flags, m,
   2179        1.8    jym 			    &txq->txq_lmbufs[consumer-txq->txq_first],
   2180        1.8    jym 			    txq->txq_lmbufs[consumer-txq->txq_first]);
   2181        1.2   matt #endif
   2182        1.2   matt 			KASSERT(m);
   2183        1.2   matt 			pq3etsec_txq_map_unload(sc, txq, m);
   2184        1.2   matt #if 0
   2185        1.2   matt 			printf("%s: mbuf %p: consumed a %u byte packet\n",
   2186        1.2   matt 			    __func__, m, m->m_pkthdr.len);
   2187        1.2   matt #endif
   2188        1.2   matt 			if (m->m_flags & M_HASFCB)
   2189        1.2   matt 				m_adj(m, sizeof(struct txfcb));
   2190  1.16.16.1  skrll 			bpf_mtap(ifp, m);
   2191        1.2   matt 			ifp->if_opackets++;
   2192        1.2   matt 			ifp->if_obytes += m->m_pkthdr.len;
   2193        1.2   matt 			if (m->m_flags & M_MCAST)
   2194        1.2   matt 				ifp->if_omcasts++;
   2195        1.2   matt 			if (txbd_flags & TXBD_ERRORS)
   2196        1.2   matt 				ifp->if_oerrors++;
   2197        1.2   matt 			m_freem(m);
   2198        1.2   matt #ifdef ETSEC_DEBUG
   2199        1.2   matt 			txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
   2200        1.2   matt #endif
   2201        1.2   matt 		} else {
   2202        1.2   matt #ifdef ETSEC_DEBUG
   2203        1.2   matt 			KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
   2204        1.2   matt #endif
   2205        1.2   matt 		}
   2206        1.2   matt 
   2207        1.2   matt 		/*
   2208        1.2   matt 		 * We own this packet again.  Clear all flags except wrap.
   2209        1.2   matt 		 */
   2210        1.2   matt 		txfree++;
   2211        1.2   matt 		//consumer->txbd_flags = txbd_flags & TXBD_W;
   2212        1.2   matt 
   2213        1.2   matt 		/*
   2214        1.2   matt 		 * Wrap at the last entry!
   2215        1.2   matt 		 */
   2216        1.2   matt 		if (txbd_flags & TXBD_W) {
   2217        1.2   matt 			KASSERT(consumer + 1 == txq->txq_last);
   2218        1.2   matt 			consumer = txq->txq_first;
   2219        1.2   matt 		} else {
   2220        1.2   matt 			consumer++;
   2221        1.2   matt 			KASSERT(consumer < txq->txq_last);
   2222        1.2   matt 		}
   2223        1.2   matt 	}
   2224        1.2   matt }
   2225        1.2   matt 
   2226        1.2   matt static void
   2227        1.2   matt pq3etsec_txq_purge(
   2228        1.2   matt 	struct pq3etsec_softc *sc,
   2229        1.2   matt 	struct pq3etsec_txqueue *txq)
   2230        1.2   matt {
   2231        1.2   matt 	struct mbuf *m;
   2232        1.2   matt 	KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
   2233        1.2   matt 
   2234        1.2   matt 	for (;;) {
   2235        1.2   matt 		IF_DEQUEUE(&txq->txq_mbufs, m);
   2236        1.2   matt 		if (m == NULL)
   2237        1.2   matt 			break;
   2238        1.2   matt 		pq3etsec_txq_map_unload(sc, txq, m);
   2239        1.2   matt 		m_freem(m);
   2240        1.2   matt 	}
   2241        1.2   matt 	if ((m = txq->txq_next) != NULL) {
   2242        1.2   matt 		txq->txq_next = NULL;
   2243        1.2   matt 		pq3etsec_txq_map_unload(sc, txq, m);
   2244        1.2   matt 		m_freem(m);
   2245        1.2   matt 	}
   2246        1.2   matt #ifdef ETSEC_DEBUG
   2247        1.2   matt 	memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
   2248        1.2   matt #endif
   2249        1.2   matt }
   2250        1.2   matt 
   2251        1.2   matt static void
   2252        1.2   matt pq3etsec_txq_reset(
   2253        1.2   matt 	struct pq3etsec_softc *sc,
   2254        1.2   matt 	struct pq3etsec_txqueue *txq)
   2255        1.2   matt {
   2256        1.2   matt 	/*
   2257        1.2   matt 	 * sync all the descriptors
   2258        1.2   matt 	 */
   2259        1.2   matt 	pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
   2260        1.2   matt 	    txq->txq_last - txq->txq_first);
   2261        1.2   matt 
   2262        1.2   matt 	/*
   2263        1.2   matt 	 * Make sure we own all descriptors in the ring.
   2264        1.2   matt 	 */
   2265        1.2   matt 	volatile struct txbd *txbd;
   2266        1.2   matt 	for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
   2267        1.2   matt 		txbd->txbd_flags = 0;
   2268        1.2   matt 	}
   2269        1.2   matt 
   2270        1.2   matt 	/*
   2271        1.2   matt 	 * Last descriptor has the wrap flag.
   2272        1.2   matt 	 */
   2273        1.2   matt 	txbd->txbd_flags = TXBD_W;
   2274        1.2   matt 
   2275        1.2   matt 	/*
   2276        1.2   matt 	 * Reset the producer consumer indexes.
   2277        1.2   matt 	 */
   2278        1.2   matt 	txq->txq_consumer = txq->txq_first;
   2279        1.2   matt 	txq->txq_producer = txq->txq_first;
   2280        1.2   matt 	txq->txq_free = txq->txq_last - txq->txq_first - 1;
   2281        1.2   matt 	txq->txq_threshold = txq->txq_free / 2;
   2282        1.2   matt 	txq->txq_lastintr = 0;
   2283        1.2   matt 
   2284        1.2   matt 	/*
   2285        1.2   matt 	 * What do we want to get interrupted on?
   2286        1.2   matt 	 */
   2287        1.2   matt 	sc->sc_imask |= IEVENT_TXF|IEVENT_TXE;
   2288        1.2   matt 
   2289        1.2   matt 	/*
   2290        1.2   matt 	 * Restart the transmit at the first descriptor
   2291        1.2   matt 	 */
   2292        1.2   matt 	etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
   2293        1.2   matt }
   2294        1.2   matt 
   2295        1.2   matt static void
   2296        1.2   matt pq3etsec_ifstart(struct ifnet *ifp)
   2297        1.2   matt {
   2298        1.2   matt 	struct pq3etsec_softc * const sc = ifp->if_softc;
   2299        1.2   matt 
   2300  1.16.16.1  skrll 	if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
   2301  1.16.16.1  skrll 		return;
   2302  1.16.16.1  skrll 	}
   2303  1.16.16.1  skrll 
   2304        1.2   matt 	atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
   2305        1.2   matt 	softint_schedule(sc->sc_soft_ih);
   2306        1.2   matt }
   2307        1.2   matt 
   2308        1.2   matt static void
   2309        1.2   matt pq3etsec_tx_error(
   2310        1.2   matt 	struct pq3etsec_softc * const sc)
   2311        1.2   matt {
   2312        1.2   matt 	struct pq3etsec_txqueue * const txq = &sc->sc_txq;
   2313        1.2   matt 
   2314        1.2   matt 	pq3etsec_txq_consume(sc, txq);
   2315        1.2   matt 
   2316        1.2   matt 	if (pq3etsec_txq_fillable_p(sc, txq))
   2317        1.2   matt 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
   2318        1.2   matt 	if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) {
   2319        1.2   matt 	} else if (sc->sc_txerrors & IEVENT_EBERR) {
   2320        1.2   matt 	}
   2321        1.2   matt 
   2322        1.2   matt 	if (pq3etsec_txq_active_p(sc, txq))
   2323        1.2   matt 		etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
   2324        1.2   matt 	if (!pq3etsec_txq_enqueue(sc, txq)) {
   2325        1.2   matt 		sc->sc_ev_tx_stall.ev_count++;
   2326        1.2   matt 		sc->sc_if.if_flags |= IFF_OACTIVE;
   2327        1.2   matt 	}
   2328        1.2   matt 
   2329        1.2   matt 	sc->sc_txerrors = 0;
   2330        1.2   matt }
   2331        1.2   matt 
   2332        1.2   matt int
   2333        1.2   matt pq3etsec_tx_intr(void *arg)
   2334        1.2   matt {
   2335        1.2   matt 	struct pq3etsec_softc * const sc = arg;
   2336        1.2   matt 
   2337  1.16.16.1  skrll 	mutex_enter(sc->sc_hwlock);
   2338  1.16.16.1  skrll 
   2339        1.2   matt 	sc->sc_ev_tx_intr.ev_count++;
   2340        1.2   matt 
   2341        1.2   matt 	uint32_t ievent = etsec_read(sc, IEVENT);
   2342        1.2   matt 	ievent &= IEVENT_TXF|IEVENT_TXB;
   2343        1.2   matt 	etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
   2344        1.2   matt 
   2345        1.2   matt #if 0
   2346        1.2   matt 	aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
   2347        1.2   matt 	    __func__, ievent, etsec_read(sc, IMASK));
   2348        1.2   matt #endif
   2349        1.2   matt 
   2350  1.16.16.1  skrll 	if (ievent == 0) {
   2351  1.16.16.1  skrll 		mutex_exit(sc->sc_hwlock);
   2352        1.2   matt 		return 0;
   2353  1.16.16.1  skrll 	}
   2354        1.2   matt 
   2355        1.2   matt 	sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB);
   2356        1.2   matt 	atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
   2357        1.2   matt 	etsec_write(sc, IMASK, sc->sc_imask);
   2358        1.2   matt 	softint_schedule(sc->sc_soft_ih);
   2359  1.16.16.1  skrll 
   2360  1.16.16.1  skrll 	mutex_exit(sc->sc_hwlock);
   2361  1.16.16.1  skrll 
   2362        1.2   matt 	return 1;
   2363        1.2   matt }
   2364        1.2   matt 
   2365        1.2   matt int
   2366        1.2   matt pq3etsec_rx_intr(void *arg)
   2367        1.2   matt {
   2368        1.2   matt 	struct pq3etsec_softc * const sc = arg;
   2369        1.2   matt 
   2370  1.16.16.1  skrll 	mutex_enter(sc->sc_hwlock);
   2371  1.16.16.1  skrll 
   2372        1.2   matt 	sc->sc_ev_rx_intr.ev_count++;
   2373        1.2   matt 
   2374        1.2   matt 	uint32_t ievent = etsec_read(sc, IEVENT);
   2375        1.2   matt 	ievent &= IEVENT_RXF|IEVENT_RXB;
   2376        1.2   matt 	etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
   2377  1.16.16.1  skrll 	if (ievent == 0) {
   2378  1.16.16.1  skrll 		mutex_exit(sc->sc_hwlock);
   2379        1.2   matt 		return 0;
   2380  1.16.16.1  skrll 	}
   2381        1.2   matt 
   2382        1.2   matt #if 0
   2383        1.2   matt 	aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
   2384        1.2   matt #endif
   2385        1.2   matt 
   2386        1.2   matt 	sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB);
   2387        1.2   matt 	atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
   2388        1.2   matt 	etsec_write(sc, IMASK, sc->sc_imask);
   2389        1.2   matt 	softint_schedule(sc->sc_soft_ih);
   2390  1.16.16.1  skrll 
   2391  1.16.16.1  skrll 	mutex_exit(sc->sc_hwlock);
   2392  1.16.16.1  skrll 
   2393        1.2   matt 	return 1;
   2394        1.2   matt }
   2395        1.2   matt 
   2396        1.2   matt int
   2397        1.2   matt pq3etsec_error_intr(void *arg)
   2398        1.2   matt {
   2399        1.2   matt 	struct pq3etsec_softc * const sc = arg;
   2400        1.2   matt 
   2401  1.16.16.1  skrll 	mutex_enter(sc->sc_hwlock);
   2402  1.16.16.1  skrll 
   2403        1.2   matt 	sc->sc_ev_error_intr.ev_count++;
   2404        1.2   matt 
   2405        1.2   matt 	for (int rv = 0, soft_flags = 0;; rv = 1) {
   2406        1.2   matt 		uint32_t ievent = etsec_read(sc, IEVENT);
   2407        1.2   matt 		ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB);
   2408        1.2   matt 		etsec_write(sc, IEVENT, ievent);	/* write 1 to clear */
   2409        1.2   matt 		if (ievent == 0) {
   2410        1.2   matt 			if (soft_flags) {
   2411        1.2   matt 				atomic_or_uint(&sc->sc_soft_flags, soft_flags);
   2412        1.2   matt 				softint_schedule(sc->sc_soft_ih);
   2413        1.2   matt 			}
   2414  1.16.16.1  skrll 			mutex_exit(sc->sc_hwlock);
   2415        1.2   matt 			return rv;
   2416        1.2   matt 		}
   2417        1.2   matt #if 0
   2418        1.2   matt 		aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
   2419        1.2   matt 		    __func__, ievent, etsec_read(sc, IMASK));
   2420        1.2   matt #endif
   2421        1.2   matt 
   2422        1.2   matt 		if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) {
   2423        1.2   matt 			sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC);
   2424        1.2   matt 			etsec_write(sc, IMASK, sc->sc_imask);
   2425        1.2   matt 			wakeup(sc);
   2426        1.2   matt 		}
   2427        1.2   matt 		if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) {
   2428        1.2   matt 			sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR);
   2429        1.2   matt 			etsec_write(sc, IMASK, sc->sc_imask);
   2430        1.2   matt 			wakeup(&sc->sc_mii);
   2431        1.2   matt 		}
   2432        1.2   matt 		if (ievent & IEVENT_BSY) {
   2433        1.2   matt 			soft_flags |= SOFT_RXBSY;
   2434        1.2   matt 			sc->sc_imask &= ~IEVENT_BSY;
   2435        1.2   matt 			etsec_write(sc, IMASK, sc->sc_imask);
   2436        1.2   matt 		}
   2437        1.2   matt 		if (ievent & IEVENT_TXE) {
   2438        1.2   matt 			soft_flags |= SOFT_TXERROR;
   2439        1.2   matt 			sc->sc_imask &= ~IEVENT_TXE;
   2440        1.2   matt 			sc->sc_txerrors |= ievent;
   2441        1.2   matt 		}
   2442        1.2   matt 		if (ievent & IEVENT_TXC) {
   2443        1.2   matt 			sc->sc_ev_tx_pause.ev_count++;
   2444        1.2   matt 		}
   2445        1.2   matt 		if (ievent & IEVENT_RXC) {
   2446        1.2   matt 			sc->sc_ev_rx_pause.ev_count++;
   2447        1.2   matt 		}
   2448        1.2   matt 		if (ievent & IEVENT_DPE) {
   2449        1.2   matt 			soft_flags |= SOFT_RESET;
   2450        1.2   matt 			sc->sc_imask &= ~IEVENT_DPE;
   2451        1.2   matt 			etsec_write(sc, IMASK, sc->sc_imask);
   2452        1.2   matt 		}
   2453        1.2   matt 	}
   2454        1.2   matt }
   2455        1.2   matt 
   2456        1.2   matt void
   2457        1.2   matt pq3etsec_soft_intr(void *arg)
   2458        1.2   matt {
   2459        1.2   matt 	struct pq3etsec_softc * const sc = arg;
   2460        1.2   matt 	struct ifnet * const ifp = &sc->sc_if;
   2461  1.16.16.1  skrll 	uint32_t imask = 0;
   2462        1.2   matt 
   2463        1.2   matt 	mutex_enter(sc->sc_lock);
   2464        1.2   matt 
   2465        1.2   matt 	u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
   2466        1.2   matt 
   2467        1.2   matt 	sc->sc_ev_soft_intr.ev_count++;
   2468        1.2   matt 
   2469        1.2   matt 	if (soft_flags & SOFT_RESET) {
   2470        1.2   matt 		int s = splnet();
   2471        1.2   matt 		pq3etsec_ifinit(ifp);
   2472        1.2   matt 		splx(s);
   2473        1.2   matt 		soft_flags = 0;
   2474        1.2   matt 	}
   2475        1.2   matt 
   2476        1.2   matt 	if (soft_flags & SOFT_RXBSY) {
   2477        1.2   matt 		struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
   2478        1.2   matt 		size_t threshold = 5 * rxq->rxq_threshold / 4;
   2479        1.2   matt 		if (threshold >= rxq->rxq_last - rxq->rxq_first) {
   2480        1.2   matt 			threshold = rxq->rxq_last - rxq->rxq_first - 1;
   2481        1.2   matt 		} else {
   2482  1.16.16.1  skrll 			imask |= IEVENT_BSY;
   2483        1.2   matt 		}
   2484        1.2   matt 		aprint_normal_dev(sc->sc_dev,
   2485        1.2   matt 		    "increasing receive buffers from %zu to %zu\n",
   2486        1.2   matt 		    rxq->rxq_threshold, threshold);
   2487        1.2   matt 		rxq->rxq_threshold = threshold;
   2488        1.2   matt 	}
   2489        1.2   matt 
   2490        1.2   matt 	if ((soft_flags & SOFT_TXINTR)
   2491        1.2   matt 	    || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
   2492        1.2   matt 		/*
   2493        1.2   matt 		 * Let's do what we came here for.  Consume transmitted
   2494        1.2   matt 		 * packets off the the transmit ring.
   2495        1.2   matt 		 */
   2496        1.2   matt 		if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
   2497        1.2   matt 		    || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
   2498        1.2   matt 			sc->sc_ev_tx_stall.ev_count++;
   2499        1.2   matt 			ifp->if_flags |= IFF_OACTIVE;
   2500        1.2   matt 		} else {
   2501        1.2   matt 			ifp->if_flags &= ~IFF_OACTIVE;
   2502        1.2   matt 		}
   2503  1.16.16.1  skrll 		imask |= IEVENT_TXF;
   2504        1.2   matt 	}
   2505        1.2   matt 
   2506        1.2   matt 	if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) {
   2507        1.2   matt 		/*
   2508        1.2   matt 		 * Let's consume
   2509        1.2   matt 		 */
   2510        1.2   matt 		pq3etsec_rxq_consume(sc, &sc->sc_rxq);
   2511  1.16.16.1  skrll 		imask |= IEVENT_RXF;
   2512        1.2   matt 	}
   2513        1.2   matt 
   2514        1.2   matt 	if (soft_flags & SOFT_TXERROR) {
   2515        1.2   matt 		pq3etsec_tx_error(sc);
   2516  1.16.16.1  skrll 		imask |= IEVENT_TXE;
   2517        1.2   matt 	}
   2518        1.2   matt 
   2519        1.2   matt 	if (ifp->if_flags & IFF_RUNNING) {
   2520        1.2   matt 		pq3etsec_rxq_produce(sc, &sc->sc_rxq);
   2521  1.16.16.1  skrll 		mutex_spin_enter(sc->sc_hwlock);
   2522  1.16.16.1  skrll 		sc->sc_imask |= imask;
   2523        1.2   matt 		etsec_write(sc, IMASK, sc->sc_imask);
   2524  1.16.16.1  skrll 		mutex_spin_exit(sc->sc_hwlock);
   2525        1.2   matt 	} else {
   2526        1.2   matt 		KASSERT((soft_flags & SOFT_RXBSY) == 0);
   2527        1.2   matt 	}
   2528        1.2   matt 
   2529        1.2   matt 	mutex_exit(sc->sc_lock);
   2530        1.2   matt }
   2531        1.2   matt 
   2532        1.2   matt static void
   2533        1.2   matt pq3etsec_mii_tick(void *arg)
   2534        1.2   matt {
   2535        1.2   matt 	struct pq3etsec_softc * const sc = arg;
   2536        1.2   matt 	mutex_enter(sc->sc_lock);
   2537        1.2   matt 	callout_ack(&sc->sc_mii_callout);
   2538        1.2   matt 	sc->sc_ev_mii_ticks.ev_count++;
   2539        1.2   matt #ifdef DEBUG
   2540        1.2   matt 	uint64_t now = mftb();
   2541        1.2   matt 	if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
   2542        1.2   matt 		aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
   2543        1.2   matt 		    __func__, now - sc->sc_mii_last_tick);
   2544        1.2   matt 		callout_stop(&sc->sc_mii_callout);
   2545        1.2   matt 	}
   2546        1.2   matt #endif
   2547        1.2   matt 	mii_tick(&sc->sc_mii);
   2548        1.2   matt 	int s = splnet();
   2549        1.2   matt 	if (sc->sc_soft_flags & SOFT_RESET)
   2550        1.2   matt 		softint_schedule(sc->sc_soft_ih);
   2551        1.2   matt 	splx(s);
   2552        1.2   matt 	callout_schedule(&sc->sc_mii_callout, hz);
   2553        1.6   matt #ifdef DEBUG
   2554        1.2   matt 	sc->sc_mii_last_tick = now;
   2555        1.6   matt #endif
   2556        1.2   matt 	mutex_exit(sc->sc_lock);
   2557        1.2   matt }
   2558  1.16.16.1  skrll 
   2559  1.16.16.1  skrll static void
   2560  1.16.16.1  skrll pq3etsec_set_ic_rx(struct pq3etsec_softc *sc)
   2561  1.16.16.1  skrll {
   2562  1.16.16.1  skrll 	uint32_t reg;
   2563  1.16.16.1  skrll 
   2564  1.16.16.1  skrll 	if (ETSEC_IC_RX_ENABLED(sc)) {
   2565  1.16.16.1  skrll 		reg = RXIC_ICEN;
   2566  1.16.16.1  skrll 		reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count);
   2567  1.16.16.1  skrll 		reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time);
   2568  1.16.16.1  skrll 	} else {
   2569  1.16.16.1  skrll 		/* Disable RX interrupt coalescing */
   2570  1.16.16.1  skrll 		reg = 0;
   2571  1.16.16.1  skrll 	}
   2572  1.16.16.1  skrll 
   2573  1.16.16.1  skrll 	etsec_write(sc, RXIC, reg);
   2574  1.16.16.1  skrll }
   2575  1.16.16.1  skrll 
   2576  1.16.16.1  skrll static void
   2577  1.16.16.1  skrll pq3etsec_set_ic_tx(struct pq3etsec_softc *sc)
   2578  1.16.16.1  skrll {
   2579  1.16.16.1  skrll 	uint32_t reg;
   2580  1.16.16.1  skrll 
   2581  1.16.16.1  skrll 	if (ETSEC_IC_TX_ENABLED(sc)) {
   2582  1.16.16.1  skrll 		reg = TXIC_ICEN;
   2583  1.16.16.1  skrll 		reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count);
   2584  1.16.16.1  skrll 		reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time);
   2585  1.16.16.1  skrll 	} else {
   2586  1.16.16.1  skrll 		/* Disable TX interrupt coalescing */
   2587  1.16.16.1  skrll 		reg = 0;
   2588  1.16.16.1  skrll 	}
   2589  1.16.16.1  skrll 
   2590  1.16.16.1  skrll 	etsec_write(sc, TXIC, reg);
   2591  1.16.16.1  skrll }
   2592  1.16.16.1  skrll 
   2593  1.16.16.1  skrll /*
   2594  1.16.16.1  skrll  * sysctl
   2595  1.16.16.1  skrll  */
   2596  1.16.16.1  skrll static int
   2597  1.16.16.1  skrll pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep)
   2598  1.16.16.1  skrll {
   2599  1.16.16.1  skrll 	struct sysctlnode node = *rnode;
   2600  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2601  1.16.16.1  skrll 	int value = *valuep;
   2602  1.16.16.1  skrll 	int error;
   2603  1.16.16.1  skrll 
   2604  1.16.16.1  skrll 	node.sysctl_data = &value;
   2605  1.16.16.1  skrll 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2606  1.16.16.1  skrll 	if (error != 0 || newp == NULL)
   2607  1.16.16.1  skrll 		return error;
   2608  1.16.16.1  skrll 
   2609  1.16.16.1  skrll 	if (value < 0 || value > 65535)
   2610  1.16.16.1  skrll 		return EINVAL;
   2611  1.16.16.1  skrll 
   2612  1.16.16.1  skrll 	mutex_enter(sc->sc_lock);
   2613  1.16.16.1  skrll 	*valuep = value;
   2614  1.16.16.1  skrll 	if (valuep == &sc->sc_ic_rx_time)
   2615  1.16.16.1  skrll 		pq3etsec_set_ic_rx(sc);
   2616  1.16.16.1  skrll 	else
   2617  1.16.16.1  skrll 		pq3etsec_set_ic_tx(sc);
   2618  1.16.16.1  skrll 	mutex_exit(sc->sc_lock);
   2619  1.16.16.1  skrll 
   2620  1.16.16.1  skrll 	return 0;
   2621  1.16.16.1  skrll }
   2622  1.16.16.1  skrll 
   2623  1.16.16.1  skrll static int
   2624  1.16.16.1  skrll pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep)
   2625  1.16.16.1  skrll {
   2626  1.16.16.1  skrll 	struct sysctlnode node = *rnode;
   2627  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2628  1.16.16.1  skrll 	int value = *valuep;
   2629  1.16.16.1  skrll 	int error;
   2630  1.16.16.1  skrll 
   2631  1.16.16.1  skrll 	node.sysctl_data = &value;
   2632  1.16.16.1  skrll 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2633  1.16.16.1  skrll 	if (error != 0 || newp == NULL)
   2634  1.16.16.1  skrll 		return error;
   2635  1.16.16.1  skrll 
   2636  1.16.16.1  skrll 	if (value < 0 || value > 255)
   2637  1.16.16.1  skrll 		return EINVAL;
   2638  1.16.16.1  skrll 
   2639  1.16.16.1  skrll 	mutex_enter(sc->sc_lock);
   2640  1.16.16.1  skrll 	*valuep = value;
   2641  1.16.16.1  skrll 	if (valuep == &sc->sc_ic_rx_count)
   2642  1.16.16.1  skrll 		pq3etsec_set_ic_rx(sc);
   2643  1.16.16.1  skrll 	else
   2644  1.16.16.1  skrll 		pq3etsec_set_ic_tx(sc);
   2645  1.16.16.1  skrll 	mutex_exit(sc->sc_lock);
   2646  1.16.16.1  skrll 
   2647  1.16.16.1  skrll 	return 0;
   2648  1.16.16.1  skrll }
   2649  1.16.16.1  skrll 
   2650  1.16.16.1  skrll static int
   2651  1.16.16.1  skrll pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS)
   2652  1.16.16.1  skrll {
   2653  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2654  1.16.16.1  skrll 
   2655  1.16.16.1  skrll 	return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
   2656  1.16.16.1  skrll 	    &sc->sc_ic_rx_time);
   2657  1.16.16.1  skrll }
   2658  1.16.16.1  skrll 
   2659  1.16.16.1  skrll static int
   2660  1.16.16.1  skrll pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS)
   2661  1.16.16.1  skrll {
   2662  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2663  1.16.16.1  skrll 
   2664  1.16.16.1  skrll 	return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
   2665  1.16.16.1  skrll 	    &sc->sc_ic_rx_count);
   2666  1.16.16.1  skrll }
   2667  1.16.16.1  skrll 
   2668  1.16.16.1  skrll static int
   2669  1.16.16.1  skrll pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS)
   2670  1.16.16.1  skrll {
   2671  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2672  1.16.16.1  skrll 
   2673  1.16.16.1  skrll 	return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
   2674  1.16.16.1  skrll 	    &sc->sc_ic_tx_time);
   2675  1.16.16.1  skrll }
   2676  1.16.16.1  skrll 
   2677  1.16.16.1  skrll static int
   2678  1.16.16.1  skrll pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS)
   2679  1.16.16.1  skrll {
   2680  1.16.16.1  skrll 	struct pq3etsec_softc *sc = rnode->sysctl_data;
   2681  1.16.16.1  skrll 
   2682  1.16.16.1  skrll 	return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
   2683  1.16.16.1  skrll 	    &sc->sc_ic_tx_count);
   2684  1.16.16.1  skrll }
   2685  1.16.16.1  skrll 
   2686  1.16.16.1  skrll static void pq3etsec_sysctl_setup(struct sysctllog **clog,
   2687  1.16.16.1  skrll     struct pq3etsec_softc *sc)
   2688  1.16.16.1  skrll {
   2689  1.16.16.1  skrll 	const struct sysctlnode *cnode, *rnode;
   2690  1.16.16.1  skrll 
   2691  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, NULL, &rnode,
   2692  1.16.16.1  skrll 	    CTLFLAG_PERMANENT,
   2693  1.16.16.1  skrll 	    CTLTYPE_NODE, device_xname(sc->sc_dev),
   2694  1.16.16.1  skrll 	    SYSCTL_DESCR("TSEC interface"),
   2695  1.16.16.1  skrll 	    NULL, 0, NULL, 0,
   2696  1.16.16.1  skrll 	    CTL_HW, CTL_CREATE, CTL_EOL) != 0)
   2697  1.16.16.1  skrll 		goto bad;
   2698  1.16.16.1  skrll 
   2699  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, &rnode, &rnode,
   2700  1.16.16.1  skrll 	    CTLFLAG_PERMANENT,
   2701  1.16.16.1  skrll 	    CTLTYPE_NODE, "int_coal",
   2702  1.16.16.1  skrll 	    SYSCTL_DESCR("Interrupts coalescing"),
   2703  1.16.16.1  skrll 	    NULL, 0, NULL, 0,
   2704  1.16.16.1  skrll 	    CTL_CREATE, CTL_EOL) != 0)
   2705  1.16.16.1  skrll 		goto bad;
   2706  1.16.16.1  skrll 
   2707  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, &rnode, &cnode,
   2708  1.16.16.1  skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2709  1.16.16.1  skrll 	    CTLTYPE_INT, "rx_time",
   2710  1.16.16.1  skrll 	    SYSCTL_DESCR("RX time threshold (0-65535)"),
   2711  1.16.16.1  skrll 	    pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0,
   2712  1.16.16.1  skrll 	    CTL_CREATE, CTL_EOL) != 0)
   2713  1.16.16.1  skrll 		goto bad;
   2714  1.16.16.1  skrll 
   2715  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, &rnode, &cnode,
   2716  1.16.16.1  skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2717  1.16.16.1  skrll 	    CTLTYPE_INT, "rx_count",
   2718  1.16.16.1  skrll 	    SYSCTL_DESCR("RX frame count threshold (0-255)"),
   2719  1.16.16.1  skrll 	    pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0,
   2720  1.16.16.1  skrll 	    CTL_CREATE, CTL_EOL) != 0)
   2721  1.16.16.1  skrll 		goto bad;
   2722  1.16.16.1  skrll 
   2723  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, &rnode, &cnode,
   2724  1.16.16.1  skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2725  1.16.16.1  skrll 	    CTLTYPE_INT, "tx_time",
   2726  1.16.16.1  skrll 	    SYSCTL_DESCR("TX time threshold (0-65535)"),
   2727  1.16.16.1  skrll 	    pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0,
   2728  1.16.16.1  skrll 	    CTL_CREATE, CTL_EOL) != 0)
   2729  1.16.16.1  skrll 		goto bad;
   2730  1.16.16.1  skrll 
   2731  1.16.16.1  skrll 	if (sysctl_createv(clog, 0, &rnode, &cnode,
   2732  1.16.16.1  skrll 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2733  1.16.16.1  skrll 	    CTLTYPE_INT, "tx_count",
   2734  1.16.16.1  skrll 	    SYSCTL_DESCR("TX frame count threshold (0-255)"),
   2735  1.16.16.1  skrll 	    pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0,
   2736  1.16.16.1  skrll 	    CTL_CREATE, CTL_EOL) != 0)
   2737  1.16.16.1  skrll 		goto bad;
   2738  1.16.16.1  skrll 
   2739  1.16.16.1  skrll 	return;
   2740  1.16.16.1  skrll 
   2741  1.16.16.1  skrll  bad:
   2742  1.16.16.1  skrll 	aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n");
   2743  1.16.16.1  skrll }
   2744