Home | History | Annotate | Line # | Download | only in ixgbe
      1  1.117  riastrad /* $NetBSD: ix_txrx.c,v 1.117 2024/06/29 12:11:12 riastradh Exp $ */
      2   1.28   msaitoh 
      3    1.1   msaitoh /******************************************************************************
      4    1.1   msaitoh 
      5   1.28   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      6    1.1   msaitoh   All rights reserved.
      7   1.28   msaitoh 
      8   1.28   msaitoh   Redistribution and use in source and binary forms, with or without
      9    1.1   msaitoh   modification, are permitted provided that the following conditions are met:
     10   1.28   msaitoh 
     11   1.28   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     12    1.1   msaitoh       this list of conditions and the following disclaimer.
     13   1.28   msaitoh 
     14   1.28   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     15   1.28   msaitoh       notice, this list of conditions and the following disclaimer in the
     16    1.1   msaitoh       documentation and/or other materials provided with the distribution.
     17   1.28   msaitoh 
     18   1.28   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     19   1.28   msaitoh       contributors may be used to endorse or promote products derived from
     20    1.1   msaitoh       this software without specific prior written permission.
     21   1.28   msaitoh 
     22    1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   1.28   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   1.28   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   1.28   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   1.28   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   1.28   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   1.28   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   1.28   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   1.28   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31    1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32    1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     33    1.1   msaitoh 
     34    1.1   msaitoh ******************************************************************************/
     35   1.39   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
     36   1.28   msaitoh 
     37    1.1   msaitoh /*
     38    1.1   msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39    1.1   msaitoh  * All rights reserved.
     40    1.1   msaitoh  *
     41    1.1   msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     42    1.1   msaitoh  * by Coyote Point Systems, Inc.
     43    1.1   msaitoh  *
     44    1.1   msaitoh  * Redistribution and use in source and binary forms, with or without
     45    1.1   msaitoh  * modification, are permitted provided that the following conditions
     46    1.1   msaitoh  * are met:
     47    1.1   msaitoh  * 1. Redistributions of source code must retain the above copyright
     48    1.1   msaitoh  *    notice, this list of conditions and the following disclaimer.
     49    1.1   msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     50    1.1   msaitoh  *    notice, this list of conditions and the following disclaimer in the
     51    1.1   msaitoh  *    documentation and/or other materials provided with the distribution.
     52    1.1   msaitoh  *
     53    1.1   msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54    1.1   msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55    1.1   msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56    1.1   msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57    1.1   msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58    1.1   msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59    1.1   msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60    1.1   msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61    1.1   msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62    1.1   msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63    1.1   msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     64    1.1   msaitoh  */
     65    1.1   msaitoh 
     66   1.71   msaitoh #include <sys/cdefs.h>
     67  1.117  riastrad __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.117 2024/06/29 12:11:12 riastradh Exp $");
     68   1.71   msaitoh 
     69    1.8   msaitoh #include "opt_inet.h"
     70    1.8   msaitoh #include "opt_inet6.h"
     71    1.8   msaitoh 
     72    1.1   msaitoh #include "ixgbe.h"
     73    1.1   msaitoh 
     74  1.116   msaitoh #ifdef RSC
     75    1.1   msaitoh /*
     76   1.28   msaitoh  * HW RSC control:
     77   1.28   msaitoh  *  this feature only works with
     78   1.28   msaitoh  *  IPv4, and only on 82599 and later.
     79   1.28   msaitoh  *  Also this will cause IP forwarding to
     80   1.28   msaitoh  *  fail and that can't be controlled by
     81   1.28   msaitoh  *  the stack as LRO can. For all these
     82   1.28   msaitoh  *  reasons I've deemed it best to leave
     83   1.28   msaitoh  *  this off and not bother with a tuneable
     84   1.28   msaitoh  *  interface, this would need to be compiled
     85   1.28   msaitoh  *  to enable.
     86   1.28   msaitoh  */
     87    1.1   msaitoh static bool ixgbe_rsc_enable = FALSE;
     88  1.116   msaitoh #endif
     89    1.1   msaitoh 
     90  1.112   msaitoh #ifdef IXGBE_FDIR
     91    1.3   msaitoh /*
     92   1.28   msaitoh  * For Flow Director: this is the
     93   1.28   msaitoh  * number of TX packets we sample
     94   1.28   msaitoh  * for the filter pool, this means
     95   1.28   msaitoh  * every 20th packet will be probed.
     96   1.28   msaitoh  *
     97   1.28   msaitoh  * This feature can be disabled by
     98   1.28   msaitoh  * setting this to 0.
     99   1.28   msaitoh  */
    100    1.3   msaitoh static int atr_sample_rate = 20;
    101  1.112   msaitoh #endif
    102    1.3   msaitoh 
    103  1.102   msaitoh #define IXGBE_M_ADJ(sc, rxr, mp)					\
    104  1.102   msaitoh 	if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN))	\
    105   1.86   msaitoh 		m_adj(mp, ETHER_ALIGN)
    106   1.86   msaitoh 
    107   1.28   msaitoh /************************************************************************
    108    1.3   msaitoh  *  Local Function prototypes
    109   1.28   msaitoh  ************************************************************************/
    110   1.28   msaitoh static void          ixgbe_setup_transmit_ring(struct tx_ring *);
    111   1.28   msaitoh static void          ixgbe_free_transmit_buffers(struct tx_ring *);
    112   1.28   msaitoh static int           ixgbe_setup_receive_ring(struct rx_ring *);
    113   1.28   msaitoh static void          ixgbe_free_receive_buffers(struct rx_ring *);
    114   1.28   msaitoh static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
    115   1.28   msaitoh                                        struct ixgbe_hw_stats *);
    116   1.28   msaitoh static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
    117   1.38  knakahar static void          ixgbe_drain(struct ifnet *, struct tx_ring *);
    118   1.28   msaitoh static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
    119   1.28   msaitoh static int           ixgbe_tx_ctx_setup(struct tx_ring *,
    120   1.28   msaitoh                                         struct mbuf *, u32 *, u32 *);
    121   1.28   msaitoh static int           ixgbe_tso_setup(struct tx_ring *,
    122   1.28   msaitoh                                      struct mbuf *, u32 *, u32 *);
    123    1.1   msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    124    1.1   msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    125   1.28   msaitoh                                     struct mbuf *, u32);
    126  1.102   msaitoh static int           ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t,
    127   1.28   msaitoh                                       struct ixgbe_dma_alloc *, int);
    128  1.102   msaitoh static void          ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *);
    129  1.116   msaitoh #ifdef RSC
    130  1.105   msaitoh static void	     ixgbe_setup_hw_rsc(struct rx_ring *);
    131  1.116   msaitoh #endif
    132    1.1   msaitoh 
    133   1.28   msaitoh /************************************************************************
    134   1.28   msaitoh  * ixgbe_legacy_start_locked - Transmit entry point
    135    1.1   msaitoh  *
    136   1.28   msaitoh  *   Called by the stack to initiate a transmit.
    137   1.28   msaitoh  *   The driver will remain in this routine as long as there are
    138   1.28   msaitoh  *   packets to transmit and transmit resources are available.
    139   1.28   msaitoh  *   In case resources are not available, the stack is notified
    140   1.28   msaitoh  *   and the packet is requeued.
    141   1.28   msaitoh  ************************************************************************/
    142   1.28   msaitoh int
    143   1.28   msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    144    1.1   msaitoh {
    145   1.45   msaitoh 	int rc;
    146    1.1   msaitoh 	struct mbuf    *m_head;
    147  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    148    1.1   msaitoh 
    149    1.1   msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    150    1.1   msaitoh 
    151  1.102   msaitoh 	if (sc->link_active != LINK_STATE_UP) {
    152   1.38  knakahar 		/*
    153   1.38  knakahar 		 * discard all packets buffered in IFQ to avoid
    154   1.38  knakahar 		 * sending old packets at next link up timing.
    155   1.38  knakahar 		 */
    156   1.38  knakahar 		ixgbe_drain(ifp, txr);
    157   1.38  knakahar 		return (ENETDOWN);
    158   1.38  knakahar 	}
    159    1.1   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    160   1.28   msaitoh 		return (ENETDOWN);
    161   1.47   msaitoh 	if (txr->txr_no_space)
    162   1.47   msaitoh 		return (ENETDOWN);
    163   1.58   msaitoh 
    164    1.1   msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    165    1.1   msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    166    1.1   msaitoh 			break;
    167    1.1   msaitoh 
    168    1.1   msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    169    1.1   msaitoh 		if (m_head == NULL)
    170    1.1   msaitoh 			break;
    171    1.1   msaitoh 
    172    1.1   msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    173    1.1   msaitoh 			break;
    174    1.1   msaitoh 		}
    175    1.1   msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    176    1.1   msaitoh 		if (rc != 0) {
    177    1.1   msaitoh 			m_freem(m_head);
    178    1.1   msaitoh 			continue;
    179    1.1   msaitoh 		}
    180    1.1   msaitoh 
    181    1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    182   1.48   msaitoh 		bpf_mtap(ifp, m_head, BPF_D_OUT);
    183    1.1   msaitoh 	}
    184   1.44   msaitoh 
    185   1.28   msaitoh 	return IXGBE_SUCCESS;
    186   1.28   msaitoh } /* ixgbe_legacy_start_locked */
    187   1.28   msaitoh 
    188   1.28   msaitoh /************************************************************************
    189   1.28   msaitoh  * ixgbe_legacy_start
    190   1.28   msaitoh  *
    191   1.28   msaitoh  *   Called by the stack, this always uses the first tx ring,
    192   1.28   msaitoh  *   and should not be used with multiqueue tx enabled.
    193   1.28   msaitoh  ************************************************************************/
    194    1.1   msaitoh void
    195   1.28   msaitoh ixgbe_legacy_start(struct ifnet *ifp)
    196    1.1   msaitoh {
    197  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
    198  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    199    1.1   msaitoh 
    200    1.1   msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    201    1.1   msaitoh 		IXGBE_TX_LOCK(txr);
    202   1.28   msaitoh 		ixgbe_legacy_start_locked(ifp, txr);
    203    1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    204    1.1   msaitoh 	}
    205   1.28   msaitoh } /* ixgbe_legacy_start */
    206    1.1   msaitoh 
    207   1.28   msaitoh /************************************************************************
    208   1.28   msaitoh  * ixgbe_mq_start - Multiqueue Transmit Entry Point
    209   1.28   msaitoh  *
    210   1.28   msaitoh  *   (if_transmit function)
    211   1.28   msaitoh  ************************************************************************/
    212    1.1   msaitoh int
    213    1.1   msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    214    1.1   msaitoh {
    215  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
    216    1.1   msaitoh 	struct tx_ring	*txr;
    217   1.70   msaitoh 	int		i;
    218   1.28   msaitoh #ifdef RSS
    219    1.1   msaitoh 	uint32_t bucket_id;
    220    1.1   msaitoh #endif
    221    1.1   msaitoh 
    222    1.1   msaitoh 	/*
    223    1.1   msaitoh 	 * When doing RSS, map it to the same outbound queue
    224    1.1   msaitoh 	 * as the incoming flow would be mapped to.
    225    1.1   msaitoh 	 *
    226    1.1   msaitoh 	 * If everything is setup correctly, it should be the
    227    1.1   msaitoh 	 * same bucket that the current CPU we're on is.
    228    1.1   msaitoh 	 */
    229   1.28   msaitoh #ifdef RSS
    230    1.1   msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    231  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
    232   1.28   msaitoh 		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
    233   1.28   msaitoh 		    &bucket_id) == 0)) {
    234  1.102   msaitoh 			i = bucket_id % sc->num_queues;
    235    1.8   msaitoh #ifdef IXGBE_DEBUG
    236  1.102   msaitoh 			if (bucket_id > sc->num_queues)
    237   1.28   msaitoh 				if_printf(ifp,
    238   1.28   msaitoh 				    "bucket_id (%d) > num_queues (%d)\n",
    239  1.102   msaitoh 				    bucket_id, sc->num_queues);
    240    1.8   msaitoh #endif
    241    1.8   msaitoh 		} else
    242  1.102   msaitoh 			i = m->m_pkthdr.flowid % sc->num_queues;
    243    1.3   msaitoh 	} else
    244   1.28   msaitoh #endif /* 0 */
    245  1.102   msaitoh 		i = (cpu_index(curcpu()) % ncpu) % sc->num_queues;
    246    1.3   msaitoh 
    247    1.3   msaitoh 	/* Check for a hung queue and pick alternative */
    248  1.102   msaitoh 	if (((1ULL << i) & sc->active_queues) == 0)
    249  1.102   msaitoh 		i = ffs64(sc->active_queues);
    250    1.1   msaitoh 
    251  1.102   msaitoh 	txr = &sc->tx_rings[i];
    252    1.1   msaitoh 
    253   1.50   msaitoh 	if (__predict_false(!pcq_put(txr->txr_interq, m))) {
    254   1.18   msaitoh 		m_freem(m);
    255   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->pcq_drops, 1);
    256   1.50   msaitoh 		return ENOBUFS;
    257   1.18   msaitoh 	}
    258  1.100  knakahar #ifdef IXGBE_ALWAYS_TXDEFER
    259  1.100  knakahar 	kpreempt_disable();
    260  1.100  knakahar 	softint_schedule(txr->txr_si);
    261  1.100  knakahar 	kpreempt_enable();
    262  1.100  knakahar #else
    263    1.1   msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    264    1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    265    1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    266   1.34  knakahar 	} else {
    267  1.102   msaitoh 		if (sc->txrx_use_workqueue) {
    268   1.44   msaitoh 			u_int *enqueued;
    269   1.44   msaitoh 
    270   1.34  knakahar 			/*
    271   1.34  knakahar 			 * This function itself is not called in interrupt
    272   1.34  knakahar 			 * context, however it can be called in fast softint
    273   1.34  knakahar 			 * context right after receiving forwarding packets.
    274   1.34  knakahar 			 * So, it is required to protect workqueue from twice
    275   1.34  knakahar 			 * enqueuing when the machine uses both spontaneous
    276   1.34  knakahar 			 * packets and forwarding packets.
    277   1.34  knakahar 			 */
    278  1.102   msaitoh 			enqueued = percpu_getref(sc->txr_wq_enqueued);
    279   1.34  knakahar 			if (*enqueued == 0) {
    280   1.34  knakahar 				*enqueued = 1;
    281  1.102   msaitoh 				percpu_putref(sc->txr_wq_enqueued);
    282  1.102   msaitoh 				workqueue_enqueue(sc->txr_wq,
    283   1.44   msaitoh 				    &txr->wq_cookie, curcpu());
    284   1.34  knakahar 			} else
    285  1.102   msaitoh 				percpu_putref(sc->txr_wq_enqueued);
    286   1.56  knakahar 		} else {
    287   1.56  knakahar 			kpreempt_disable();
    288   1.34  knakahar 			softint_schedule(txr->txr_si);
    289   1.56  knakahar 			kpreempt_enable();
    290   1.56  knakahar 		}
    291   1.34  knakahar 	}
    292  1.100  knakahar #endif
    293    1.1   msaitoh 
    294    1.1   msaitoh 	return (0);
    295   1.28   msaitoh } /* ixgbe_mq_start */
    296    1.1   msaitoh 
    297   1.28   msaitoh /************************************************************************
    298   1.28   msaitoh  * ixgbe_mq_start_locked
    299   1.28   msaitoh  ************************************************************************/
    300    1.1   msaitoh int
    301    1.1   msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    302    1.1   msaitoh {
    303   1.28   msaitoh 	struct mbuf    *next;
    304   1.28   msaitoh 	int            enqueued = 0, err = 0;
    305    1.1   msaitoh 
    306  1.102   msaitoh 	if (txr->sc->link_active != LINK_STATE_UP) {
    307   1.38  knakahar 		/*
    308   1.38  knakahar 		 * discard all packets buffered in txr_interq to avoid
    309   1.38  knakahar 		 * sending old packets at next link up timing.
    310   1.38  knakahar 		 */
    311   1.38  knakahar 		ixgbe_drain(ifp, txr);
    312   1.38  knakahar 		return (ENETDOWN);
    313   1.38  knakahar 	}
    314   1.28   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    315   1.28   msaitoh 		return (ENETDOWN);
    316   1.47   msaitoh 	if (txr->txr_no_space)
    317   1.47   msaitoh 		return (ENETDOWN);
    318    1.1   msaitoh 
    319    1.1   msaitoh 	/* Process the queue */
    320   1.18   msaitoh 	while ((next = pcq_get(txr->txr_interq)) != NULL) {
    321   1.18   msaitoh 		if ((err = ixgbe_xmit(txr, next)) != 0) {
    322   1.18   msaitoh 			m_freem(next);
    323   1.18   msaitoh 			/* All errors are counted in ixgbe_xmit() */
    324    1.1   msaitoh 			break;
    325    1.1   msaitoh 		}
    326    1.1   msaitoh 		enqueued++;
    327    1.3   msaitoh #if __FreeBSD_version >= 1100036
    328    1.4   msaitoh 		/*
    329    1.4   msaitoh 		 * Since we're looking at the tx ring, we can check
    330   1.99    andvar 		 * to see if we're a VF by examining our tail register
    331    1.4   msaitoh 		 * address.
    332    1.4   msaitoh 		 */
    333  1.102   msaitoh 		if ((txr->sc->feat_en & IXGBE_FEATURE_VF) &&
    334   1.28   msaitoh 		    (next->m_flags & M_MCAST))
    335    1.3   msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    336    1.3   msaitoh #endif
    337    1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    338   1.48   msaitoh 		bpf_mtap(ifp, next, BPF_D_OUT);
    339    1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    340    1.1   msaitoh 			break;
    341    1.1   msaitoh 	}
    342    1.1   msaitoh 
    343  1.102   msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->sc))
    344    1.1   msaitoh 		ixgbe_txeof(txr);
    345    1.1   msaitoh 
    346    1.1   msaitoh 	return (err);
    347   1.28   msaitoh } /* ixgbe_mq_start_locked */
    348    1.1   msaitoh 
    349   1.28   msaitoh /************************************************************************
    350   1.28   msaitoh  * ixgbe_deferred_mq_start
    351   1.28   msaitoh  *
    352   1.34  knakahar  *   Called from a softint and workqueue (indirectly) to drain queued
    353   1.34  knakahar  *   transmit packets.
    354   1.28   msaitoh  ************************************************************************/
    355    1.1   msaitoh void
    356   1.18   msaitoh ixgbe_deferred_mq_start(void *arg)
    357    1.1   msaitoh {
    358    1.1   msaitoh 	struct tx_ring *txr = arg;
    359  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    360  1.102   msaitoh 	struct ifnet   *ifp = sc->ifp;
    361    1.1   msaitoh 
    362    1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    363   1.18   msaitoh 	if (pcq_peek(txr->txr_interq) != NULL)
    364    1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    365    1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    366   1.28   msaitoh } /* ixgbe_deferred_mq_start */
    367    1.3   msaitoh 
    368   1.28   msaitoh /************************************************************************
    369   1.34  knakahar  * ixgbe_deferred_mq_start_work
    370   1.34  knakahar  *
    371   1.34  knakahar  *   Called from a workqueue to drain queued transmit packets.
    372   1.34  knakahar  ************************************************************************/
    373   1.34  knakahar void
    374   1.34  knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
    375   1.34  knakahar {
    376   1.34  knakahar 	struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
    377  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    378  1.102   msaitoh 	u_int *enqueued = percpu_getref(sc->txr_wq_enqueued);
    379   1.34  knakahar 	*enqueued = 0;
    380  1.102   msaitoh 	percpu_putref(sc->txr_wq_enqueued);
    381   1.34  knakahar 
    382   1.34  knakahar 	ixgbe_deferred_mq_start(txr);
    383   1.34  knakahar } /* ixgbe_deferred_mq_start */
    384   1.34  knakahar 
    385   1.38  knakahar /************************************************************************
    386   1.38  knakahar  * ixgbe_drain_all
    387   1.38  knakahar  ************************************************************************/
    388   1.38  knakahar void
    389  1.102   msaitoh ixgbe_drain_all(struct ixgbe_softc *sc)
    390   1.38  knakahar {
    391  1.102   msaitoh 	struct ifnet *ifp = sc->ifp;
    392  1.102   msaitoh 	struct ix_queue *que = sc->queues;
    393   1.38  knakahar 
    394  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, que++) {
    395   1.38  knakahar 		struct tx_ring  *txr = que->txr;
    396   1.38  knakahar 
    397   1.38  knakahar 		IXGBE_TX_LOCK(txr);
    398   1.38  knakahar 		ixgbe_drain(ifp, txr);
    399   1.38  knakahar 		IXGBE_TX_UNLOCK(txr);
    400   1.38  knakahar 	}
    401   1.38  knakahar }
    402   1.34  knakahar 
    403   1.34  knakahar /************************************************************************
    404   1.28   msaitoh  * ixgbe_xmit
    405    1.1   msaitoh  *
    406   1.28   msaitoh  *   Maps the mbufs to tx descriptors, allowing the
    407   1.28   msaitoh  *   TX engine to transmit the packets.
    408    1.1   msaitoh  *
    409   1.28   msaitoh  *   Return 0 on success, positive on failure
    410   1.28   msaitoh  ************************************************************************/
    411    1.1   msaitoh static int
    412    1.1   msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    413    1.1   msaitoh {
    414  1.105   msaitoh 	struct ixgbe_softc      *sc = txr->sc;
    415   1.28   msaitoh 	struct ixgbe_tx_buf     *txbuf;
    416    1.1   msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    417  1.102   msaitoh 	struct ifnet	        *ifp = sc->ifp;
    418   1.28   msaitoh 	int                     i, j, error;
    419   1.28   msaitoh 	int                     first;
    420   1.28   msaitoh 	u32                     olinfo_status = 0, cmd_type_len;
    421   1.28   msaitoh 	bool                    remap = TRUE;
    422   1.28   msaitoh 	bus_dmamap_t            map;
    423    1.1   msaitoh 
    424    1.1   msaitoh 	/* Basic descriptor defines */
    425   1.28   msaitoh 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    426    1.1   msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    427    1.1   msaitoh 
    428   1.29  knakahar 	if (vlan_has_tag(m_head))
    429   1.28   msaitoh 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    430    1.1   msaitoh 
    431   1.28   msaitoh 	/*
    432   1.28   msaitoh 	 * Important to capture the first descriptor
    433   1.28   msaitoh 	 * used because it will contain the index of
    434   1.28   msaitoh 	 * the one we tell the hardware to report back
    435   1.28   msaitoh 	 */
    436   1.28   msaitoh 	first = txr->next_avail_desc;
    437    1.1   msaitoh 	txbuf = &txr->tx_buffers[first];
    438    1.1   msaitoh 	map = txbuf->map;
    439    1.1   msaitoh 
    440    1.1   msaitoh 	/*
    441    1.1   msaitoh 	 * Map the packet for DMA.
    442    1.1   msaitoh 	 */
    443   1.22   msaitoh retry:
    444   1.28   msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
    445   1.28   msaitoh 	    BUS_DMA_NOWAIT);
    446    1.1   msaitoh 
    447    1.1   msaitoh 	if (__predict_false(error)) {
    448   1.22   msaitoh 		struct mbuf *m;
    449    1.1   msaitoh 
    450    1.1   msaitoh 		switch (error) {
    451    1.1   msaitoh 		case EAGAIN:
    452   1.35   msaitoh 			txr->q_eagain_tx_dma_setup++;
    453    1.1   msaitoh 			return EAGAIN;
    454    1.1   msaitoh 		case ENOMEM:
    455   1.35   msaitoh 			txr->q_enomem_tx_dma_setup++;
    456    1.1   msaitoh 			return EAGAIN;
    457    1.1   msaitoh 		case EFBIG:
    458   1.22   msaitoh 			/* Try it again? - one try */
    459   1.22   msaitoh 			if (remap == TRUE) {
    460   1.22   msaitoh 				remap = FALSE;
    461   1.22   msaitoh 				/*
    462   1.22   msaitoh 				 * XXX: m_defrag will choke on
    463   1.22   msaitoh 				 * non-MCLBYTES-sized clusters
    464   1.22   msaitoh 				 */
    465   1.35   msaitoh 				txr->q_efbig_tx_dma_setup++;
    466   1.22   msaitoh 				m = m_defrag(m_head, M_NOWAIT);
    467   1.22   msaitoh 				if (m == NULL) {
    468   1.35   msaitoh 					txr->q_mbuf_defrag_failed++;
    469   1.22   msaitoh 					return ENOBUFS;
    470   1.22   msaitoh 				}
    471   1.22   msaitoh 				m_head = m;
    472   1.22   msaitoh 				goto retry;
    473   1.22   msaitoh 			} else {
    474   1.35   msaitoh 				txr->q_efbig2_tx_dma_setup++;
    475   1.22   msaitoh 				return error;
    476   1.22   msaitoh 			}
    477    1.1   msaitoh 		case EINVAL:
    478   1.35   msaitoh 			txr->q_einval_tx_dma_setup++;
    479    1.1   msaitoh 			return error;
    480    1.1   msaitoh 		default:
    481   1.35   msaitoh 			txr->q_other_tx_dma_setup++;
    482    1.1   msaitoh 			return error;
    483    1.1   msaitoh 		}
    484    1.1   msaitoh 	}
    485    1.1   msaitoh 
    486    1.1   msaitoh 	/* Make certain there are enough descriptors */
    487   1.10   msaitoh 	if (txr->tx_avail < (map->dm_nsegs + 2)) {
    488   1.47   msaitoh 		txr->txr_no_space = true;
    489   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->no_desc_avail, 1);
    490    1.1   msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    491    1.1   msaitoh 		return EAGAIN;
    492    1.1   msaitoh 	}
    493    1.1   msaitoh 
    494    1.1   msaitoh 	/*
    495  1.107   msaitoh 	 * Set up the appropriate offload context if requested,
    496  1.107   msaitoh 	 * this may consume one TX descriptor.
    497    1.4   msaitoh 	 */
    498    1.1   msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    499    1.1   msaitoh 	if (__predict_false(error)) {
    500    1.1   msaitoh 		return (error);
    501    1.1   msaitoh 	}
    502    1.1   msaitoh 
    503   1.73  knakahar #ifdef IXGBE_FDIR
    504    1.1   msaitoh 	/* Do the flow director magic */
    505  1.102   msaitoh 	if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
    506  1.102   msaitoh 	    (txr->atr_sample) && (!sc->fdir_reinit)) {
    507    1.1   msaitoh 		++txr->atr_count;
    508    1.1   msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    509    1.1   msaitoh 			ixgbe_atr(txr, m_head);
    510    1.1   msaitoh 			txr->atr_count = 0;
    511    1.1   msaitoh 		}
    512    1.1   msaitoh 	}
    513   1.73  knakahar #endif
    514    1.1   msaitoh 
    515    1.8   msaitoh 	olinfo_status |= IXGBE_ADVTXD_CC;
    516    1.1   msaitoh 	i = txr->next_avail_desc;
    517    1.1   msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    518    1.1   msaitoh 		bus_size_t seglen;
    519   1.77   msaitoh 		uint64_t segaddr;
    520    1.1   msaitoh 
    521    1.1   msaitoh 		txbuf = &txr->tx_buffers[i];
    522    1.1   msaitoh 		txd = &txr->tx_base[i];
    523    1.1   msaitoh 		seglen = map->dm_segs[j].ds_len;
    524    1.1   msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    525    1.1   msaitoh 
    526    1.1   msaitoh 		txd->read.buffer_addr = segaddr;
    527   1.40   msaitoh 		txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
    528    1.1   msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    529    1.1   msaitoh 
    530    1.1   msaitoh 		if (++i == txr->num_desc)
    531    1.1   msaitoh 			i = 0;
    532    1.1   msaitoh 	}
    533    1.1   msaitoh 
    534   1.28   msaitoh 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    535    1.1   msaitoh 	txr->tx_avail -= map->dm_nsegs;
    536    1.1   msaitoh 	txr->next_avail_desc = i;
    537    1.1   msaitoh 
    538    1.1   msaitoh 	txbuf->m_head = m_head;
    539    1.1   msaitoh 	/*
    540    1.4   msaitoh 	 * Here we swap the map so the last descriptor,
    541    1.4   msaitoh 	 * which gets the completion interrupt has the
    542    1.4   msaitoh 	 * real map, and the first descriptor gets the
    543    1.4   msaitoh 	 * unused map from this descriptor.
    544    1.4   msaitoh 	 */
    545    1.1   msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    546    1.1   msaitoh 	txbuf->map = map;
    547    1.1   msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    548    1.1   msaitoh 	    BUS_DMASYNC_PREWRITE);
    549    1.1   msaitoh 
    550   1.28   msaitoh 	/* Set the EOP descriptor that will be marked done */
    551   1.28   msaitoh 	txbuf = &txr->tx_buffers[first];
    552    1.1   msaitoh 	txbuf->eop = txd;
    553    1.1   msaitoh 
    554   1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    555    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    556    1.1   msaitoh 	/*
    557    1.1   msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    558    1.1   msaitoh 	 * hardware that this frame is available to transmit.
    559    1.1   msaitoh 	 */
    560   1.95   msaitoh 	IXGBE_EVC_ADD(&txr->total_packets, 1);
    561  1.102   msaitoh 	IXGBE_WRITE_REG(&sc->hw, txr->tail, i);
    562    1.3   msaitoh 
    563   1.61   thorpej 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
    564  1.117  riastrad 	if_statadd_ref(ifp, nsr, if_obytes, m_head->m_pkthdr.len);
    565   1.23   msaitoh 	if (m_head->m_flags & M_MCAST)
    566  1.117  riastrad 		if_statinc_ref(ifp, nsr, if_omcasts);
    567   1.61   thorpej 	IF_STAT_PUTREF(ifp);
    568   1.23   msaitoh 
    569   1.45   msaitoh 	/* Mark queue as having work */
    570   1.45   msaitoh 	if (txr->busy == 0)
    571   1.45   msaitoh 		txr->busy = 1;
    572   1.45   msaitoh 
    573   1.28   msaitoh 	return (0);
    574   1.28   msaitoh } /* ixgbe_xmit */
    575    1.1   msaitoh 
    576   1.38  knakahar /************************************************************************
    577   1.38  knakahar  * ixgbe_drain
    578   1.38  knakahar  ************************************************************************/
    579   1.38  knakahar static void
    580   1.38  knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
    581   1.38  knakahar {
    582   1.38  knakahar 	struct mbuf *m;
    583   1.38  knakahar 
    584   1.38  knakahar 	IXGBE_TX_LOCK_ASSERT(txr);
    585   1.38  knakahar 
    586   1.38  knakahar 	if (txr->me == 0) {
    587   1.38  knakahar 		while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    588   1.38  knakahar 			IFQ_DEQUEUE(&ifp->if_snd, m);
    589   1.38  knakahar 			m_freem(m);
    590   1.38  knakahar 			IF_DROP(&ifp->if_snd);
    591   1.38  knakahar 		}
    592   1.38  knakahar 	}
    593   1.38  knakahar 
    594   1.38  knakahar 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
    595   1.38  knakahar 		m_freem(m);
    596   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->pcq_drops, 1);
    597   1.38  knakahar 	}
    598   1.38  knakahar }
    599   1.16   msaitoh 
    600   1.28   msaitoh /************************************************************************
    601   1.28   msaitoh  * ixgbe_allocate_transmit_buffers
    602    1.1   msaitoh  *
    603   1.28   msaitoh  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
    604   1.28   msaitoh  *   the information needed to transmit a packet on the wire. This is
    605   1.28   msaitoh  *   called only once at attach, setup is done every reset.
    606   1.28   msaitoh  ************************************************************************/
    607   1.28   msaitoh static int
    608    1.1   msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    609    1.1   msaitoh {
    610  1.102   msaitoh 	struct ixgbe_softc  *sc = txr->sc;
    611  1.102   msaitoh 	device_t            dev = sc->dev;
    612    1.1   msaitoh 	struct ixgbe_tx_buf *txbuf;
    613   1.28   msaitoh 	int                 error, i;
    614    1.1   msaitoh 
    615    1.1   msaitoh 	/*
    616    1.1   msaitoh 	 * Setup DMA descriptor areas.
    617    1.1   msaitoh 	 */
    618   1.28   msaitoh 	error = ixgbe_dma_tag_create(
    619  1.102   msaitoh 	         /*      parent */ sc->osdep.dmat,
    620   1.28   msaitoh 	         /*   alignment */ 1,
    621   1.28   msaitoh 	         /*      bounds */ 0,
    622   1.28   msaitoh 	         /*     maxsize */ IXGBE_TSO_SIZE,
    623  1.102   msaitoh 	         /*   nsegments */ sc->num_segs,
    624   1.28   msaitoh 	         /*  maxsegsize */ PAGE_SIZE,
    625   1.28   msaitoh 	         /*       flags */ 0,
    626   1.28   msaitoh 	                           &txr->txtag);
    627   1.28   msaitoh 	if (error != 0) {
    628    1.1   msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    629    1.1   msaitoh 		goto fail;
    630    1.1   msaitoh 	}
    631    1.1   msaitoh 
    632  1.114   msaitoh 	txr->tx_buffers = kmem_zalloc(sizeof(struct ixgbe_tx_buf) *
    633  1.114   msaitoh 	    sc->num_tx_desc, KM_SLEEP);
    634    1.1   msaitoh 
    635   1.28   msaitoh 	/* Create the descriptor buffer dma maps */
    636    1.1   msaitoh 	txbuf = txr->tx_buffers;
    637  1.102   msaitoh 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
    638    1.1   msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    639    1.1   msaitoh 		if (error != 0) {
    640    1.1   msaitoh 			aprint_error_dev(dev,
    641    1.1   msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    642    1.1   msaitoh 			goto fail;
    643    1.1   msaitoh 		}
    644    1.1   msaitoh 	}
    645    1.1   msaitoh 
    646    1.1   msaitoh 	return 0;
    647    1.1   msaitoh fail:
    648    1.1   msaitoh 	/* We free all, it handles case where we are in the middle */
    649   1.15   msaitoh #if 0 /* XXX was FreeBSD */
    650  1.102   msaitoh 	ixgbe_free_transmit_structures(sc);
    651   1.15   msaitoh #else
    652   1.15   msaitoh 	ixgbe_free_transmit_buffers(txr);
    653   1.15   msaitoh #endif
    654    1.1   msaitoh 	return (error);
    655   1.28   msaitoh } /* ixgbe_allocate_transmit_buffers */
    656    1.1   msaitoh 
    657   1.28   msaitoh /************************************************************************
    658   1.28   msaitoh  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
    659   1.28   msaitoh  ************************************************************************/
    660    1.1   msaitoh static void
    661    1.1   msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    662    1.1   msaitoh {
    663  1.102   msaitoh 	struct ixgbe_softc    *sc = txr->sc;
    664   1.28   msaitoh 	struct ixgbe_tx_buf   *txbuf;
    665    1.1   msaitoh #ifdef DEV_NETMAP
    666  1.102   msaitoh 	struct netmap_sc      *na = NA(sc->ifp);
    667   1.28   msaitoh 	struct netmap_slot    *slot;
    668    1.1   msaitoh #endif /* DEV_NETMAP */
    669    1.1   msaitoh 
    670    1.1   msaitoh 	/* Clear the old ring contents */
    671    1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    672   1.28   msaitoh 
    673    1.1   msaitoh #ifdef DEV_NETMAP
    674  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP) {
    675   1.28   msaitoh 		/*
    676   1.28   msaitoh 		 * (under lock): if in netmap mode, do some consistency
    677   1.28   msaitoh 		 * checks and set slot to entry 0 of the netmap ring.
    678   1.28   msaitoh 		 */
    679   1.28   msaitoh 		slot = netmap_reset(na, NR_TX, txr->me, 0);
    680   1.28   msaitoh 	}
    681    1.1   msaitoh #endif /* DEV_NETMAP */
    682   1.28   msaitoh 
    683    1.1   msaitoh 	bzero((void *)txr->tx_base,
    684  1.102   msaitoh 	    (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
    685    1.1   msaitoh 	/* Reset indices */
    686    1.1   msaitoh 	txr->next_avail_desc = 0;
    687    1.1   msaitoh 	txr->next_to_clean = 0;
    688    1.1   msaitoh 
    689    1.1   msaitoh 	/* Free any existing tx buffers. */
    690   1.28   msaitoh 	txbuf = txr->tx_buffers;
    691    1.5   msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    692    1.1   msaitoh 		if (txbuf->m_head != NULL) {
    693    1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    694    1.1   msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    695    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    696    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    697    1.1   msaitoh 			m_freem(txbuf->m_head);
    698    1.1   msaitoh 			txbuf->m_head = NULL;
    699    1.1   msaitoh 		}
    700   1.28   msaitoh 
    701    1.1   msaitoh #ifdef DEV_NETMAP
    702    1.1   msaitoh 		/*
    703    1.1   msaitoh 		 * In netmap mode, set the map for the packet buffer.
    704    1.1   msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    705    1.1   msaitoh 		 * the physical buffer address in the NIC ring.
    706    1.1   msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    707    1.1   msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    708    1.1   msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    709    1.1   msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    710    1.1   msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    711    1.1   msaitoh 		 */
    712  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
    713   1.53   msaitoh 			int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
    714    1.5   msaitoh 			netmap_load_map(na, txr->txtag,
    715    1.5   msaitoh 			    txbuf->map, NMB(na, slot + si));
    716    1.1   msaitoh 		}
    717    1.1   msaitoh #endif /* DEV_NETMAP */
    718   1.28   msaitoh 
    719    1.1   msaitoh 		/* Clear the EOP descriptor pointer */
    720    1.1   msaitoh 		txbuf->eop = NULL;
    721   1.28   msaitoh 	}
    722    1.1   msaitoh 
    723  1.112   msaitoh #ifdef IXGBE_FDIR
    724    1.1   msaitoh 	/* Set the rate at which we sample packets */
    725  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_FDIR)
    726    1.1   msaitoh 		txr->atr_sample = atr_sample_rate;
    727  1.112   msaitoh #endif
    728    1.1   msaitoh 
    729    1.1   msaitoh 	/* Set number of descriptors available */
    730  1.102   msaitoh 	txr->tx_avail = sc->num_tx_desc;
    731    1.1   msaitoh 
    732    1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    733    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    734    1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    735   1.28   msaitoh } /* ixgbe_setup_transmit_ring */
    736    1.1   msaitoh 
    737   1.28   msaitoh /************************************************************************
    738   1.28   msaitoh  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
    739   1.28   msaitoh  ************************************************************************/
    740    1.1   msaitoh int
    741  1.102   msaitoh ixgbe_setup_transmit_structures(struct ixgbe_softc *sc)
    742    1.1   msaitoh {
    743  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    744    1.1   msaitoh 
    745  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txr++)
    746    1.1   msaitoh 		ixgbe_setup_transmit_ring(txr);
    747    1.1   msaitoh 
    748    1.1   msaitoh 	return (0);
    749   1.28   msaitoh } /* ixgbe_setup_transmit_structures */
    750    1.1   msaitoh 
    751   1.28   msaitoh /************************************************************************
    752   1.28   msaitoh  * ixgbe_free_transmit_structures - Free all transmit rings.
    753   1.28   msaitoh  ************************************************************************/
    754    1.1   msaitoh void
    755  1.102   msaitoh ixgbe_free_transmit_structures(struct ixgbe_softc *sc)
    756    1.1   msaitoh {
    757  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    758    1.1   msaitoh 
    759  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txr++) {
    760    1.1   msaitoh 		ixgbe_free_transmit_buffers(txr);
    761  1.102   msaitoh 		ixgbe_dma_free(sc, &txr->txdma);
    762    1.1   msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    763    1.1   msaitoh 	}
    764  1.114   msaitoh 	kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
    765   1.28   msaitoh } /* ixgbe_free_transmit_structures */
    766    1.1   msaitoh 
    767   1.28   msaitoh /************************************************************************
    768   1.28   msaitoh  * ixgbe_free_transmit_buffers
    769    1.1   msaitoh  *
    770   1.28   msaitoh  *   Free transmit ring related data structures.
    771   1.28   msaitoh  ************************************************************************/
    772    1.1   msaitoh static void
    773    1.1   msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    774    1.1   msaitoh {
    775  1.105   msaitoh 	struct ixgbe_softc  *sc = txr->sc;
    776    1.1   msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    777   1.28   msaitoh 	int                 i;
    778    1.1   msaitoh 
    779   1.14   msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
    780    1.1   msaitoh 
    781    1.1   msaitoh 	if (txr->tx_buffers == NULL)
    782    1.1   msaitoh 		return;
    783    1.1   msaitoh 
    784    1.1   msaitoh 	tx_buffer = txr->tx_buffers;
    785  1.102   msaitoh 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
    786    1.1   msaitoh 		if (tx_buffer->m_head != NULL) {
    787    1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    788    1.1   msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    789    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    790    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    791    1.1   msaitoh 			m_freem(tx_buffer->m_head);
    792    1.1   msaitoh 			tx_buffer->m_head = NULL;
    793    1.1   msaitoh 			if (tx_buffer->map != NULL) {
    794    1.1   msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    795    1.1   msaitoh 				    tx_buffer->map);
    796    1.1   msaitoh 				tx_buffer->map = NULL;
    797    1.1   msaitoh 			}
    798    1.1   msaitoh 		} else if (tx_buffer->map != NULL) {
    799    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    800    1.1   msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    801    1.1   msaitoh 			tx_buffer->map = NULL;
    802    1.1   msaitoh 		}
    803    1.1   msaitoh 	}
    804   1.18   msaitoh 	if (txr->txr_interq != NULL) {
    805   1.18   msaitoh 		struct mbuf *m;
    806   1.18   msaitoh 
    807   1.18   msaitoh 		while ((m = pcq_get(txr->txr_interq)) != NULL)
    808   1.18   msaitoh 			m_freem(m);
    809   1.18   msaitoh 		pcq_destroy(txr->txr_interq);
    810   1.18   msaitoh 	}
    811    1.1   msaitoh 	if (txr->tx_buffers != NULL) {
    812  1.114   msaitoh 		kmem_free(txr->tx_buffers,
    813  1.114   msaitoh 		    sizeof(struct ixgbe_tx_buf) * sc->num_tx_desc);
    814    1.1   msaitoh 		txr->tx_buffers = NULL;
    815    1.1   msaitoh 	}
    816    1.1   msaitoh 	if (txr->txtag != NULL) {
    817    1.1   msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    818    1.1   msaitoh 		txr->txtag = NULL;
    819    1.1   msaitoh 	}
    820   1.28   msaitoh } /* ixgbe_free_transmit_buffers */
    821    1.1   msaitoh 
    822   1.28   msaitoh /************************************************************************
    823   1.28   msaitoh  * ixgbe_tx_ctx_setup
    824    1.1   msaitoh  *
    825   1.28   msaitoh  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
    826   1.28   msaitoh  ************************************************************************/
    827    1.1   msaitoh static int
    828    1.1   msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    829    1.1   msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    830    1.1   msaitoh {
    831  1.102   msaitoh 	struct ixgbe_softc               *sc = txr->sc;
    832    1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    833   1.28   msaitoh 	struct ether_vlan_header         *eh;
    834    1.8   msaitoh #ifdef INET
    835   1.28   msaitoh 	struct ip                        *ip;
    836    1.8   msaitoh #endif
    837    1.8   msaitoh #ifdef INET6
    838   1.28   msaitoh 	struct ip6_hdr                   *ip6;
    839    1.8   msaitoh #endif
    840   1.28   msaitoh 	int                              ehdrlen, ip_hlen = 0;
    841   1.28   msaitoh 	int                              offload = TRUE;
    842   1.28   msaitoh 	int                              ctxd = txr->next_avail_desc;
    843   1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    844   1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    845   1.28   msaitoh 	u16                              vtag = 0;
    846   1.28   msaitoh 	u16                              etype;
    847   1.28   msaitoh 	u8                               ipproto = 0;
    848   1.28   msaitoh 	char                             *l3d;
    849    1.8   msaitoh 
    850    1.1   msaitoh 	/* First check if TSO is to be used */
    851   1.28   msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
    852   1.17   msaitoh 		int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
    853   1.17   msaitoh 
    854   1.21   msaitoh 		if (rv != 0)
    855  1.102   msaitoh 			IXGBE_EVC_ADD(&sc->tso_err, 1);
    856   1.21   msaitoh 		return rv;
    857   1.17   msaitoh 	}
    858    1.1   msaitoh 
    859    1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    860    1.1   msaitoh 		offload = FALSE;
    861    1.1   msaitoh 
    862    1.1   msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    863   1.28   msaitoh 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    864    1.1   msaitoh 
    865    1.1   msaitoh 	/*
    866   1.28   msaitoh 	 * In advanced descriptors the vlan tag must
    867   1.28   msaitoh 	 * be placed into the context descriptor. Hence
    868   1.28   msaitoh 	 * we need to make one even if not doing offloads.
    869   1.28   msaitoh 	 */
    870   1.29  knakahar 	if (vlan_has_tag(mp)) {
    871   1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    872    1.1   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    873  1.102   msaitoh 	} else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
    874   1.28   msaitoh 	           (offload == FALSE))
    875    1.4   msaitoh 		return (0);
    876    1.1   msaitoh 
    877    1.1   msaitoh 	/*
    878    1.1   msaitoh 	 * Determine where frame payload starts.
    879    1.1   msaitoh 	 * Jump over vlan headers if already present,
    880    1.1   msaitoh 	 * helpful for QinQ too.
    881    1.1   msaitoh 	 */
    882    1.1   msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    883    1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    884    1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    885    1.1   msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    886    1.1   msaitoh 		etype = ntohs(eh->evl_proto);
    887    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    888    1.1   msaitoh 	} else {
    889    1.1   msaitoh 		etype = ntohs(eh->evl_encap_proto);
    890    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    891    1.1   msaitoh 	}
    892    1.1   msaitoh 
    893    1.1   msaitoh 	/* Set the ether header length */
    894    1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    895    1.1   msaitoh 
    896    1.3   msaitoh 	if (offload == FALSE)
    897    1.3   msaitoh 		goto no_offloads;
    898    1.3   msaitoh 
    899    1.8   msaitoh 	/*
    900   1.28   msaitoh 	 * If the first mbuf only includes the ethernet header,
    901   1.28   msaitoh 	 * jump to the next one
    902   1.28   msaitoh 	 * XXX: This assumes the stack splits mbufs containing headers
    903   1.28   msaitoh 	 *      on header boundaries
    904    1.8   msaitoh 	 * XXX: And assumes the entire IP header is contained in one mbuf
    905    1.8   msaitoh 	 */
    906    1.8   msaitoh 	if (mp->m_len == ehdrlen && mp->m_next)
    907    1.8   msaitoh 		l3d = mtod(mp->m_next, char *);
    908    1.8   msaitoh 	else
    909    1.8   msaitoh 		l3d = mtod(mp, char *) + ehdrlen;
    910    1.8   msaitoh 
    911    1.1   msaitoh 	switch (etype) {
    912    1.9   msaitoh #ifdef INET
    913    1.1   msaitoh 	case ETHERTYPE_IP:
    914    1.8   msaitoh 		ip = (struct ip *)(l3d);
    915    1.8   msaitoh 		ip_hlen = ip->ip_hl << 2;
    916    1.8   msaitoh 		ipproto = ip->ip_p;
    917    1.8   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    918    1.1   msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    919    1.8   msaitoh 		    ip->ip_sum == 0);
    920    1.1   msaitoh 		break;
    921    1.9   msaitoh #endif
    922    1.9   msaitoh #ifdef INET6
    923    1.1   msaitoh 	case ETHERTYPE_IPV6:
    924    1.8   msaitoh 		ip6 = (struct ip6_hdr *)(l3d);
    925    1.8   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    926    1.8   msaitoh 		ipproto = ip6->ip6_nxt;
    927    1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    928    1.1   msaitoh 		break;
    929    1.9   msaitoh #endif
    930    1.1   msaitoh 	default:
    931   1.11   msaitoh 		offload = false;
    932    1.1   msaitoh 		break;
    933    1.1   msaitoh 	}
    934    1.1   msaitoh 
    935    1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    936    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    937    1.1   msaitoh 
    938    1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    939    1.1   msaitoh 
    940    1.8   msaitoh 	/* No support for offloads for non-L4 next headers */
    941   1.63   msaitoh 	switch (ipproto) {
    942   1.36   msaitoh 	case IPPROTO_TCP:
    943   1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    944   1.36   msaitoh 		    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
    945   1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    946   1.36   msaitoh 		else
    947   1.36   msaitoh 			offload = false;
    948   1.36   msaitoh 		break;
    949   1.36   msaitoh 	case IPPROTO_UDP:
    950   1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    951   1.36   msaitoh 		    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
    952   1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    953   1.36   msaitoh 		else
    954   1.11   msaitoh 			offload = false;
    955   1.36   msaitoh 		break;
    956   1.36   msaitoh 	default:
    957   1.36   msaitoh 		offload = false;
    958   1.36   msaitoh 		break;
    959    1.8   msaitoh 	}
    960    1.8   msaitoh 
    961    1.8   msaitoh 	if (offload) /* Insert L4 checksum into data descriptors */
    962    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    963    1.1   msaitoh 
    964    1.3   msaitoh no_offloads:
    965    1.3   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    966    1.3   msaitoh 
    967  1.106   msaitoh 	/* Now ready a context descriptor */
    968  1.106   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    969  1.106   msaitoh 
    970    1.1   msaitoh 	/* Now copy bits into descriptor */
    971    1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    972    1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    973    1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    974    1.1   msaitoh 	TXD->mss_l4len_idx = htole32(0);
    975    1.1   msaitoh 
    976    1.1   msaitoh 	/* We've consumed the first desc, adjust counters */
    977    1.1   msaitoh 	if (++ctxd == txr->num_desc)
    978    1.1   msaitoh 		ctxd = 0;
    979    1.1   msaitoh 	txr->next_avail_desc = ctxd;
    980    1.1   msaitoh 	--txr->tx_avail;
    981    1.1   msaitoh 
    982   1.28   msaitoh 	return (0);
    983   1.28   msaitoh } /* ixgbe_tx_ctx_setup */
    984    1.1   msaitoh 
    985   1.28   msaitoh /************************************************************************
    986   1.28   msaitoh  * ixgbe_tso_setup
    987    1.1   msaitoh  *
    988   1.28   msaitoh  *   Setup work for hardware segmentation offload (TSO) on
    989   1.28   msaitoh  *   adapters using advanced tx descriptors
    990   1.28   msaitoh  ************************************************************************/
    991    1.1   msaitoh static int
    992   1.28   msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
    993   1.28   msaitoh     u32 *olinfo_status)
    994    1.1   msaitoh {
    995    1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    996   1.28   msaitoh 	struct ether_vlan_header         *eh;
    997    1.1   msaitoh #ifdef INET6
    998   1.28   msaitoh 	struct ip6_hdr                   *ip6;
    999    1.1   msaitoh #endif
   1000    1.1   msaitoh #ifdef INET
   1001   1.28   msaitoh 	struct ip                        *ip;
   1002    1.1   msaitoh #endif
   1003   1.28   msaitoh 	struct tcphdr                    *th;
   1004   1.28   msaitoh 	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
   1005   1.28   msaitoh 	u32                              vlan_macip_lens = 0;
   1006   1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
   1007   1.28   msaitoh 	u32                              mss_l4len_idx = 0, paylen;
   1008   1.28   msaitoh 	u16                              vtag = 0, eh_type;
   1009    1.1   msaitoh 
   1010    1.1   msaitoh 	/*
   1011    1.1   msaitoh 	 * Determine where frame payload starts.
   1012    1.1   msaitoh 	 * Jump over vlan headers if already present
   1013    1.1   msaitoh 	 */
   1014    1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
   1015    1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   1016    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1017    1.1   msaitoh 		eh_type = eh->evl_proto;
   1018    1.1   msaitoh 	} else {
   1019    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1020    1.1   msaitoh 		eh_type = eh->evl_encap_proto;
   1021    1.1   msaitoh 	}
   1022    1.1   msaitoh 
   1023    1.1   msaitoh 	switch (ntohs(eh_type)) {
   1024    1.1   msaitoh #ifdef INET
   1025    1.1   msaitoh 	case ETHERTYPE_IP:
   1026    1.1   msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
   1027    1.1   msaitoh 		if (ip->ip_p != IPPROTO_TCP)
   1028    1.1   msaitoh 			return (ENXIO);
   1029    1.1   msaitoh 		ip->ip_sum = 0;
   1030    1.1   msaitoh 		ip_hlen = ip->ip_hl << 2;
   1031    1.1   msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1032    1.1   msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1033    1.1   msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1034    1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   1035    1.1   msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
   1036    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1037    1.1   msaitoh 		break;
   1038    1.1   msaitoh #endif
   1039   1.28   msaitoh #ifdef INET6
   1040   1.28   msaitoh 	case ETHERTYPE_IPV6:
   1041   1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1042   1.28   msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
   1043   1.28   msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
   1044   1.28   msaitoh 			return (ENXIO);
   1045   1.28   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
   1046   1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1047   1.28   msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
   1048   1.28   msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1049   1.28   msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1050   1.28   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   1051   1.28   msaitoh 		break;
   1052   1.28   msaitoh #endif
   1053    1.1   msaitoh 	default:
   1054    1.1   msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
   1055    1.1   msaitoh 		    __func__, ntohs(eh_type));
   1056    1.1   msaitoh 		break;
   1057    1.1   msaitoh 	}
   1058    1.1   msaitoh 
   1059    1.1   msaitoh 	ctxd = txr->next_avail_desc;
   1060   1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
   1061    1.1   msaitoh 
   1062    1.1   msaitoh 	tcp_hlen = th->th_off << 2;
   1063    1.1   msaitoh 
   1064    1.1   msaitoh 	/* This is used in the transmit desc in encap */
   1065    1.1   msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
   1066    1.1   msaitoh 
   1067    1.1   msaitoh 	/* VLAN MACLEN IPLEN */
   1068   1.29  knakahar 	if (vlan_has_tag(mp)) {
   1069   1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
   1070   1.28   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   1071    1.1   msaitoh 	}
   1072    1.1   msaitoh 
   1073    1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   1074    1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
   1075    1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
   1076    1.1   msaitoh 
   1077    1.1   msaitoh 	/* ADV DTYPE TUCMD */
   1078    1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   1079    1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   1080    1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
   1081    1.1   msaitoh 
   1082    1.1   msaitoh 	/* MSS L4LEN IDX */
   1083    1.1   msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   1084    1.1   msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   1085    1.1   msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   1086    1.1   msaitoh 
   1087    1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
   1088    1.1   msaitoh 
   1089    1.1   msaitoh 	if (++ctxd == txr->num_desc)
   1090    1.1   msaitoh 		ctxd = 0;
   1091    1.1   msaitoh 
   1092    1.1   msaitoh 	txr->tx_avail--;
   1093    1.1   msaitoh 	txr->next_avail_desc = ctxd;
   1094    1.1   msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1095    1.1   msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1096    1.1   msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1097   1.95   msaitoh 	IXGBE_EVC_ADD(&txr->tso_tx, 1);
   1098   1.28   msaitoh 
   1099    1.1   msaitoh 	return (0);
   1100   1.28   msaitoh } /* ixgbe_tso_setup */
   1101    1.1   msaitoh 
   1102    1.3   msaitoh 
   1103   1.28   msaitoh /************************************************************************
   1104   1.28   msaitoh  * ixgbe_txeof
   1105    1.1   msaitoh  *
   1106   1.28   msaitoh  *   Examine each tx_buffer in the used queue. If the hardware is done
   1107   1.28   msaitoh  *   processing the packet then free associated resources. The
   1108   1.28   msaitoh  *   tx_buffer is put back on the free queue.
   1109   1.28   msaitoh  ************************************************************************/
   1110   1.32   msaitoh bool
   1111    1.1   msaitoh ixgbe_txeof(struct tx_ring *txr)
   1112    1.1   msaitoh {
   1113  1.102   msaitoh 	struct ixgbe_softc	*sc = txr->sc;
   1114  1.102   msaitoh 	struct ifnet		*ifp = sc->ifp;
   1115   1.28   msaitoh 	struct ixgbe_tx_buf	*buf;
   1116   1.28   msaitoh 	union ixgbe_adv_tx_desc *txd;
   1117    1.1   msaitoh 	u32			work, processed = 0;
   1118  1.102   msaitoh 	u32			limit = sc->tx_process_limit;
   1119  1.109   msaitoh 	u16			avail;
   1120    1.1   msaitoh 
   1121    1.1   msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1122    1.1   msaitoh 
   1123    1.1   msaitoh #ifdef DEV_NETMAP
   1124  1.102   msaitoh 	if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
   1125  1.102   msaitoh 	    (sc->ifp->if_capenable & IFCAP_NETMAP)) {
   1126  1.102   msaitoh 		struct netmap_sc *na = NA(sc->ifp);
   1127   1.53   msaitoh 		struct netmap_kring *kring = na->tx_rings[txr->me];
   1128    1.1   msaitoh 		txd = txr->tx_base;
   1129    1.1   msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1130    1.1   msaitoh 		    BUS_DMASYNC_POSTREAD);
   1131    1.1   msaitoh 		/*
   1132    1.1   msaitoh 		 * In netmap mode, all the work is done in the context
   1133    1.1   msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1134    1.1   msaitoh 		 * clients, which may be sleeping on individual rings
   1135    1.1   msaitoh 		 * or on a global resource for all rings.
   1136    1.1   msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1137    1.1   msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1138    1.1   msaitoh 		 * more frequently. This is implemented as follows:
   1139    1.1   msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1140    1.1   msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1141    1.1   msaitoh 		 *   means the user thread should not be woken up);
   1142    1.1   msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1143    1.1   msaitoh 		 *   or the slot has the DD bit set.
   1144    1.1   msaitoh 		 */
   1145   1.53   msaitoh 		if (kring->nr_kflags < kring->nkr_num_slots &&
   1146   1.78       ryo 		    le32toh(txd[kring->nr_kflags].wb.status) & IXGBE_TXD_STAT_DD) {
   1147    1.1   msaitoh 			netmap_tx_irq(ifp, txr->me);
   1148    1.1   msaitoh 		}
   1149   1.32   msaitoh 		return false;
   1150    1.1   msaitoh 	}
   1151    1.1   msaitoh #endif /* DEV_NETMAP */
   1152    1.1   msaitoh 
   1153    1.1   msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1154   1.45   msaitoh 		txr->busy = 0;
   1155   1.32   msaitoh 		return false;
   1156    1.1   msaitoh 	}
   1157    1.1   msaitoh 
   1158    1.1   msaitoh 	/* Get work starting point */
   1159    1.1   msaitoh 	work = txr->next_to_clean;
   1160    1.1   msaitoh 	buf = &txr->tx_buffers[work];
   1161    1.1   msaitoh 	txd = &txr->tx_base[work];
   1162    1.1   msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1163  1.109   msaitoh 	avail = txr->tx_avail;
   1164   1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1165    1.1   msaitoh 	    BUS_DMASYNC_POSTREAD);
   1166    1.8   msaitoh 
   1167    1.1   msaitoh 	do {
   1168    1.8   msaitoh 		union ixgbe_adv_tx_desc *eop = buf->eop;
   1169    1.1   msaitoh 		if (eop == NULL) /* No work */
   1170    1.1   msaitoh 			break;
   1171    1.1   msaitoh 
   1172   1.78       ryo 		if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0)
   1173    1.1   msaitoh 			break;	/* I/O not complete */
   1174    1.1   msaitoh 
   1175    1.1   msaitoh 		if (buf->m_head) {
   1176   1.28   msaitoh 			txr->bytes += buf->m_head->m_pkthdr.len;
   1177   1.28   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
   1178    1.1   msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1179    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1180   1.28   msaitoh 			ixgbe_dmamap_unload(txr->txtag, buf->map);
   1181    1.1   msaitoh 			m_freem(buf->m_head);
   1182    1.1   msaitoh 			buf->m_head = NULL;
   1183    1.1   msaitoh 		}
   1184    1.1   msaitoh 		buf->eop = NULL;
   1185  1.109   msaitoh 		++avail;
   1186    1.1   msaitoh 
   1187    1.1   msaitoh 		/* We clean the range if multi segment */
   1188    1.1   msaitoh 		while (txd != eop) {
   1189    1.1   msaitoh 			++txd;
   1190    1.1   msaitoh 			++buf;
   1191    1.1   msaitoh 			++work;
   1192    1.1   msaitoh 			/* wrap the ring? */
   1193    1.1   msaitoh 			if (__predict_false(!work)) {
   1194    1.1   msaitoh 				work -= txr->num_desc;
   1195    1.1   msaitoh 				buf = txr->tx_buffers;
   1196    1.1   msaitoh 				txd = txr->tx_base;
   1197    1.1   msaitoh 			}
   1198    1.1   msaitoh 			if (buf->m_head) {
   1199    1.1   msaitoh 				txr->bytes +=
   1200    1.1   msaitoh 				    buf->m_head->m_pkthdr.len;
   1201    1.1   msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1202    1.1   msaitoh 				    buf->map,
   1203    1.1   msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1204    1.1   msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1205    1.1   msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1206    1.1   msaitoh 				    buf->map);
   1207    1.1   msaitoh 				m_freem(buf->m_head);
   1208    1.1   msaitoh 				buf->m_head = NULL;
   1209    1.1   msaitoh 			}
   1210  1.109   msaitoh 			++avail;
   1211    1.1   msaitoh 			buf->eop = NULL;
   1212    1.1   msaitoh 
   1213    1.1   msaitoh 		}
   1214    1.1   msaitoh 		++processed;
   1215    1.1   msaitoh 
   1216    1.1   msaitoh 		/* Try the next packet */
   1217    1.1   msaitoh 		++txd;
   1218    1.1   msaitoh 		++buf;
   1219    1.1   msaitoh 		++work;
   1220    1.1   msaitoh 		/* reset with a wrap */
   1221    1.1   msaitoh 		if (__predict_false(!work)) {
   1222    1.1   msaitoh 			work -= txr->num_desc;
   1223    1.1   msaitoh 			buf = txr->tx_buffers;
   1224    1.1   msaitoh 			txd = txr->tx_base;
   1225    1.1   msaitoh 		}
   1226    1.1   msaitoh 		prefetch(txd);
   1227    1.1   msaitoh 	} while (__predict_true(--limit));
   1228    1.1   msaitoh 
   1229    1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1230    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1231    1.1   msaitoh 
   1232    1.1   msaitoh 	work += txr->num_desc;
   1233    1.1   msaitoh 	txr->next_to_clean = work;
   1234  1.109   msaitoh 	if (processed) {
   1235  1.109   msaitoh 		txr->tx_avail = avail;
   1236  1.109   msaitoh 		txr->txr_no_space = false;
   1237  1.111   msaitoh 		txr->packets += processed;
   1238  1.110   msaitoh 		if_statadd(ifp, if_opackets, processed);
   1239  1.109   msaitoh 	}
   1240    1.1   msaitoh 
   1241   1.45   msaitoh 	/*
   1242   1.45   msaitoh 	 * Queue Hang detection, we know there's
   1243   1.45   msaitoh 	 * work outstanding or the first return
   1244   1.45   msaitoh 	 * would have been taken, so increment busy
   1245   1.45   msaitoh 	 * if nothing managed to get cleaned, then
   1246   1.45   msaitoh 	 * in local_timer it will be checked and
   1247   1.45   msaitoh 	 * marked as HUNG if it exceeds a MAX attempt.
   1248   1.45   msaitoh 	 */
   1249   1.45   msaitoh 	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
   1250   1.45   msaitoh 		++txr->busy;
   1251   1.45   msaitoh 	/*
   1252   1.45   msaitoh 	 * If anything gets cleaned we reset state to 1,
   1253   1.45   msaitoh 	 * note this will turn off HUNG if its set.
   1254   1.45   msaitoh 	 */
   1255   1.45   msaitoh 	if (processed)
   1256   1.45   msaitoh 		txr->busy = 1;
   1257   1.45   msaitoh 
   1258   1.43   msaitoh 	if (txr->tx_avail == txr->num_desc)
   1259   1.45   msaitoh 		txr->busy = 0;
   1260   1.43   msaitoh 
   1261   1.32   msaitoh 	return ((limit > 0) ? false : true);
   1262   1.28   msaitoh } /* ixgbe_txeof */
   1263    1.1   msaitoh 
   1264  1.116   msaitoh #ifdef RSC
   1265   1.28   msaitoh /************************************************************************
   1266   1.28   msaitoh  * ixgbe_rsc_count
   1267   1.28   msaitoh  *
   1268   1.28   msaitoh  *   Used to detect a descriptor that has been merged by Hardware RSC.
   1269   1.28   msaitoh  ************************************************************************/
   1270    1.1   msaitoh static inline u32
   1271    1.1   msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1272    1.1   msaitoh {
   1273    1.1   msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1274    1.1   msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1275   1.28   msaitoh } /* ixgbe_rsc_count */
   1276    1.1   msaitoh 
   1277   1.28   msaitoh /************************************************************************
   1278   1.28   msaitoh  * ixgbe_setup_hw_rsc
   1279    1.1   msaitoh  *
   1280   1.28   msaitoh  *   Initialize Hardware RSC (LRO) feature on 82599
   1281   1.28   msaitoh  *   for an RX ring, this is toggled by the LRO capability
   1282   1.28   msaitoh  *   even though it is transparent to the stack.
   1283   1.28   msaitoh  *
   1284   1.28   msaitoh  *   NOTE: Since this HW feature only works with IPv4 and
   1285   1.28   msaitoh  *         testing has shown soft LRO to be as effective,
   1286   1.28   msaitoh  *         this feature will be disabled by default.
   1287   1.28   msaitoh  ************************************************************************/
   1288    1.1   msaitoh static void
   1289    1.1   msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1290    1.1   msaitoh {
   1291  1.104   msaitoh 	struct ixgbe_softc *sc = rxr->sc;
   1292  1.104   msaitoh 	struct ixgbe_hw	*hw = &sc->hw;
   1293  1.104   msaitoh 	u32		rscctrl, rdrxctl;
   1294    1.1   msaitoh 
   1295    1.1   msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1296  1.102   msaitoh 	if ((sc->ifp->if_capenable & IFCAP_LRO) == 0) {
   1297    1.1   msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1298    1.1   msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1299    1.1   msaitoh 		return;
   1300    1.1   msaitoh 	}
   1301    1.1   msaitoh 
   1302    1.1   msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1303    1.1   msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1304   1.28   msaitoh #ifdef DEV_NETMAP
   1305   1.28   msaitoh 	/* Always strip CRC unless Netmap disabled it */
   1306  1.102   msaitoh 	if (!(sc->feat_en & IXGBE_FEATURE_NETMAP) ||
   1307  1.102   msaitoh 	    !(sc->ifp->if_capenable & IFCAP_NETMAP) ||
   1308   1.28   msaitoh 	    ix_crcstrip)
   1309    1.1   msaitoh #endif /* DEV_NETMAP */
   1310   1.28   msaitoh 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1311    1.1   msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1312    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1313    1.1   msaitoh 
   1314    1.1   msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1315    1.1   msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1316    1.1   msaitoh 	/*
   1317   1.28   msaitoh 	 * Limit the total number of descriptors that
   1318   1.28   msaitoh 	 * can be combined, so it does not exceed 64K
   1319   1.28   msaitoh 	 */
   1320    1.1   msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1321    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1322    1.1   msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1323    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1324    1.1   msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1325    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1326    1.1   msaitoh 	else  /* Using 16K cluster */
   1327    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1328    1.1   msaitoh 
   1329    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1330    1.1   msaitoh 
   1331    1.1   msaitoh 	/* Enable TCP header recognition */
   1332    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1333   1.28   msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
   1334    1.1   msaitoh 
   1335    1.1   msaitoh 	/* Disable RSC for ACK packets */
   1336    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1337    1.1   msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1338    1.1   msaitoh 
   1339    1.1   msaitoh 	rxr->hw_rsc = TRUE;
   1340   1.28   msaitoh } /* ixgbe_setup_hw_rsc */
   1341  1.116   msaitoh #endif
   1342    1.8   msaitoh 
   1343   1.28   msaitoh /************************************************************************
   1344   1.28   msaitoh  * ixgbe_refresh_mbufs
   1345    1.1   msaitoh  *
   1346   1.28   msaitoh  *   Refresh mbuf buffers for RX descriptor rings
   1347   1.28   msaitoh  *    - now keeps its own state so discards due to resource
   1348   1.28   msaitoh  *      exhaustion are unnecessary, if an mbuf cannot be obtained
   1349   1.28   msaitoh  *      it just returns, keeping its placeholder, thus it can simply
   1350   1.28   msaitoh  *      be recalled to try again.
   1351   1.28   msaitoh  ************************************************************************/
   1352    1.1   msaitoh static void
   1353    1.1   msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1354    1.1   msaitoh {
   1355  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1356   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1357   1.28   msaitoh 	struct mbuf         *mp;
   1358   1.82   msaitoh 	int                 i, error;
   1359   1.28   msaitoh 	bool                refreshed = false;
   1360    1.1   msaitoh 
   1361   1.82   msaitoh 	i = rxr->next_to_refresh;
   1362   1.82   msaitoh 	/* next_to_refresh points to the previous one */
   1363   1.82   msaitoh 	if (++i == rxr->num_desc)
   1364   1.82   msaitoh 		i = 0;
   1365    1.1   msaitoh 
   1366   1.82   msaitoh 	while (i != limit) {
   1367    1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1368   1.91   msaitoh 		if (__predict_false(rxbuf->buf == NULL)) {
   1369   1.87   msaitoh 			mp = ixgbe_getcl();
   1370    1.1   msaitoh 			if (mp == NULL) {
   1371   1.95   msaitoh 				IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1372    1.1   msaitoh 				goto update;
   1373    1.1   msaitoh 			}
   1374   1.86   msaitoh 			mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1375  1.102   msaitoh 			IXGBE_M_ADJ(sc, rxr, mp);
   1376    1.1   msaitoh 		} else
   1377    1.1   msaitoh 			mp = rxbuf->buf;
   1378    1.1   msaitoh 
   1379    1.1   msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1380    1.1   msaitoh 		 * than replaced, there's no need to go through busdma.
   1381    1.1   msaitoh 		 */
   1382    1.1   msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1383    1.1   msaitoh 			/* Get the memory mapping */
   1384    1.4   msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1385    1.1   msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1386    1.1   msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1387   1.91   msaitoh 			if (__predict_false(error != 0)) {
   1388  1.102   msaitoh 				device_printf(sc->dev, "Refresh mbufs: "
   1389   1.55   msaitoh 				    "payload dmamap load failure - %d\n",
   1390   1.55   msaitoh 				    error);
   1391    1.1   msaitoh 				m_free(mp);
   1392    1.1   msaitoh 				rxbuf->buf = NULL;
   1393    1.1   msaitoh 				goto update;
   1394    1.1   msaitoh 			}
   1395    1.1   msaitoh 			rxbuf->buf = mp;
   1396    1.1   msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1397    1.1   msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1398    1.1   msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1399    1.1   msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1400    1.1   msaitoh 		} else {
   1401    1.1   msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1402    1.1   msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1403    1.1   msaitoh 		}
   1404    1.1   msaitoh 
   1405    1.1   msaitoh 		refreshed = true;
   1406   1.82   msaitoh 		/* next_to_refresh points to the previous one */
   1407    1.1   msaitoh 		rxr->next_to_refresh = i;
   1408   1.82   msaitoh 		if (++i == rxr->num_desc)
   1409   1.82   msaitoh 			i = 0;
   1410    1.1   msaitoh 	}
   1411   1.28   msaitoh 
   1412    1.1   msaitoh update:
   1413    1.1   msaitoh 	if (refreshed) /* Update hardware tail index */
   1414  1.102   msaitoh 		IXGBE_WRITE_REG(&sc->hw, rxr->tail, rxr->next_to_refresh);
   1415   1.28   msaitoh 
   1416    1.1   msaitoh 	return;
   1417   1.28   msaitoh } /* ixgbe_refresh_mbufs */
   1418    1.1   msaitoh 
   1419   1.28   msaitoh /************************************************************************
   1420   1.28   msaitoh  * ixgbe_allocate_receive_buffers
   1421    1.1   msaitoh  *
   1422   1.28   msaitoh  *   Allocate memory for rx_buffer structures. Since we use one
   1423   1.28   msaitoh  *   rx_buffer per received packet, the maximum number of rx_buffer's
   1424   1.28   msaitoh  *   that we'll need is equal to the number of receive descriptors
   1425   1.28   msaitoh  *   that we've allocated.
   1426   1.28   msaitoh  ************************************************************************/
   1427   1.28   msaitoh static int
   1428    1.1   msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1429    1.1   msaitoh {
   1430  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1431  1.102   msaitoh 	device_t            dev = sc->dev;
   1432   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1433   1.28   msaitoh 	int                 bsize, error;
   1434    1.1   msaitoh 
   1435    1.1   msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1436  1.114   msaitoh 	rxr->rx_buffers = kmem_zalloc(bsize, KM_SLEEP);
   1437    1.1   msaitoh 
   1438   1.28   msaitoh 	error = ixgbe_dma_tag_create(
   1439  1.102   msaitoh 	         /*      parent */ sc->osdep.dmat,
   1440   1.28   msaitoh 	         /*   alignment */ 1,
   1441   1.28   msaitoh 	         /*      bounds */ 0,
   1442   1.28   msaitoh 	         /*     maxsize */ MJUM16BYTES,
   1443   1.28   msaitoh 	         /*   nsegments */ 1,
   1444   1.28   msaitoh 	         /*  maxsegsize */ MJUM16BYTES,
   1445   1.28   msaitoh 	         /*       flags */ 0,
   1446   1.28   msaitoh 	                           &rxr->ptag);
   1447   1.28   msaitoh 	if (error != 0) {
   1448    1.1   msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1449    1.1   msaitoh 		goto fail;
   1450    1.1   msaitoh 	}
   1451    1.1   msaitoh 
   1452    1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1453    1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1454    1.4   msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1455    1.1   msaitoh 		if (error) {
   1456    1.1   msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1457    1.1   msaitoh 			goto fail;
   1458    1.1   msaitoh 		}
   1459    1.1   msaitoh 	}
   1460    1.1   msaitoh 
   1461    1.1   msaitoh 	return (0);
   1462    1.1   msaitoh 
   1463    1.1   msaitoh fail:
   1464    1.1   msaitoh 	/* Frees all, but can handle partial completion */
   1465  1.102   msaitoh 	ixgbe_free_receive_structures(sc);
   1466   1.28   msaitoh 
   1467    1.1   msaitoh 	return (error);
   1468   1.28   msaitoh } /* ixgbe_allocate_receive_buffers */
   1469    1.1   msaitoh 
   1470   1.28   msaitoh /************************************************************************
   1471   1.30   msaitoh  * ixgbe_free_receive_ring
   1472   1.28   msaitoh  ************************************************************************/
   1473   1.28   msaitoh static void
   1474    1.1   msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1475   1.27   msaitoh {
   1476    1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1477   1.27   msaitoh 		ixgbe_rx_discard(rxr, i);
   1478    1.1   msaitoh 	}
   1479   1.28   msaitoh } /* ixgbe_free_receive_ring */
   1480    1.1   msaitoh 
   1481   1.28   msaitoh /************************************************************************
   1482   1.28   msaitoh  * ixgbe_setup_receive_ring
   1483    1.1   msaitoh  *
   1484   1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1485   1.28   msaitoh  ************************************************************************/
   1486    1.1   msaitoh static int
   1487    1.1   msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1488    1.1   msaitoh {
   1489  1.102   msaitoh 	struct ixgbe_softc    *sc;
   1490   1.28   msaitoh 	struct ixgbe_rx_buf   *rxbuf;
   1491    1.1   msaitoh #ifdef LRO
   1492   1.28   msaitoh 	struct ifnet          *ifp;
   1493   1.28   msaitoh 	struct lro_ctrl       *lro = &rxr->lro;
   1494    1.1   msaitoh #endif /* LRO */
   1495    1.1   msaitoh #ifdef DEV_NETMAP
   1496  1.102   msaitoh 	struct netmap_sc      *na = NA(rxr->sc->ifp);
   1497   1.28   msaitoh 	struct netmap_slot    *slot;
   1498    1.1   msaitoh #endif /* DEV_NETMAP */
   1499   1.28   msaitoh 	int                   rsize, error = 0;
   1500    1.1   msaitoh 
   1501  1.102   msaitoh 	sc = rxr->sc;
   1502    1.1   msaitoh #ifdef LRO
   1503  1.102   msaitoh 	ifp = sc->ifp;
   1504    1.1   msaitoh #endif /* LRO */
   1505    1.1   msaitoh 
   1506    1.1   msaitoh 	/* Clear the ring contents */
   1507    1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1508   1.28   msaitoh 
   1509    1.1   msaitoh #ifdef DEV_NETMAP
   1510  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
   1511   1.28   msaitoh 		slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1512    1.1   msaitoh #endif /* DEV_NETMAP */
   1513   1.28   msaitoh 
   1514  1.113   msaitoh 	rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
   1515  1.113   msaitoh 	KASSERT((rsize % DBA_ALIGN) == 0);
   1516    1.1   msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1517    1.1   msaitoh 	/* Cache the size */
   1518  1.102   msaitoh 	rxr->mbuf_sz = sc->rx_mbuf_sz;
   1519    1.1   msaitoh 
   1520    1.1   msaitoh 	/* Free current RX buffer structs and their mbufs */
   1521    1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1522    1.1   msaitoh 
   1523    1.1   msaitoh 	/* Now replenish the mbufs */
   1524  1.108   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1525   1.28   msaitoh 		struct mbuf *mp;
   1526    1.1   msaitoh 
   1527  1.108   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1528   1.28   msaitoh 
   1529    1.1   msaitoh #ifdef DEV_NETMAP
   1530    1.1   msaitoh 		/*
   1531    1.1   msaitoh 		 * In netmap mode, fill the map and set the buffer
   1532    1.1   msaitoh 		 * address in the NIC ring, considering the offset
   1533    1.1   msaitoh 		 * between the netmap and NIC rings (see comment in
   1534    1.1   msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1535    1.1   msaitoh 		 * an mbuf, so end the block with a continue;
   1536    1.1   msaitoh 		 */
   1537  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
   1538  1.108   msaitoh 			int sj = netmap_idx_n2k(na->rx_rings[rxr->me], i);
   1539    1.1   msaitoh 			uint64_t paddr;
   1540    1.1   msaitoh 			void *addr;
   1541    1.1   msaitoh 
   1542    1.1   msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1543    1.1   msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1544    1.1   msaitoh 			/* Update descriptor and the cached value */
   1545  1.108   msaitoh 			rxr->rx_base[i].read.pkt_addr = htole64(paddr);
   1546    1.1   msaitoh 			rxbuf->addr = htole64(paddr);
   1547    1.1   msaitoh 			continue;
   1548    1.1   msaitoh 		}
   1549    1.1   msaitoh #endif /* DEV_NETMAP */
   1550   1.28   msaitoh 
   1551   1.28   msaitoh 		rxbuf->flags = 0;
   1552   1.87   msaitoh 		rxbuf->buf = ixgbe_getcl();
   1553    1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1554   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1555    1.1   msaitoh 			error = ENOBUFS;
   1556   1.28   msaitoh 			goto fail;
   1557    1.1   msaitoh 		}
   1558    1.1   msaitoh 		mp = rxbuf->buf;
   1559    1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1560  1.102   msaitoh 		IXGBE_M_ADJ(sc, rxr, mp);
   1561    1.1   msaitoh 		/* Get the memory mapping */
   1562   1.28   msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
   1563   1.28   msaitoh 		    mp, BUS_DMA_NOWAIT);
   1564   1.75   msaitoh 		if (error != 0) {
   1565   1.75   msaitoh 			/*
   1566   1.75   msaitoh 			 * Clear this entry for later cleanup in
   1567   1.75   msaitoh 			 * ixgbe_discard() which is called via
   1568   1.75   msaitoh 			 * ixgbe_free_receive_ring().
   1569   1.75   msaitoh 			 */
   1570   1.75   msaitoh 			m_freem(mp);
   1571   1.75   msaitoh 			rxbuf->buf = NULL;
   1572   1.85   msaitoh 			goto fail;
   1573   1.75   msaitoh 		}
   1574    1.1   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1575   1.83   msaitoh 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1576    1.1   msaitoh 		/* Update the descriptor and the cached value */
   1577  1.108   msaitoh 		rxr->rx_base[i].read.pkt_addr =
   1578    1.1   msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1579    1.1   msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1580    1.1   msaitoh 	}
   1581    1.1   msaitoh 
   1582    1.1   msaitoh 	/* Setup our descriptor indices */
   1583    1.1   msaitoh 	rxr->next_to_check = 0;
   1584  1.102   msaitoh 	rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */
   1585  1.115   msaitoh #ifdef LRO
   1586    1.1   msaitoh 	rxr->lro_enabled = FALSE;
   1587  1.115   msaitoh #endif
   1588   1.90   msaitoh 	rxr->discard_multidesc = false;
   1589   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_copies, 0);
   1590   1.13   msaitoh #if 0 /* NetBSD */
   1591   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
   1592   1.13   msaitoh #if 1	/* Fix inconsistency */
   1593   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_packets, 0);
   1594   1.13   msaitoh #endif
   1595   1.13   msaitoh #endif
   1596    1.1   msaitoh 	rxr->vtag_strip = FALSE;
   1597    1.1   msaitoh 
   1598    1.1   msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1599    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1600    1.1   msaitoh 
   1601    1.1   msaitoh 	/*
   1602   1.28   msaitoh 	 * Now set up the LRO interface
   1603   1.28   msaitoh 	 */
   1604  1.116   msaitoh #ifdef RSC
   1605    1.1   msaitoh 	if (ixgbe_rsc_enable)
   1606    1.1   msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1607  1.116   msaitoh #endif
   1608    1.1   msaitoh #ifdef LRO
   1609  1.116   msaitoh #ifdef RSC
   1610  1.116   msaitoh 	else
   1611  1.116   msaitoh #endif
   1612  1.116   msaitoh 	if (ifp->if_capenable & IFCAP_LRO) {
   1613  1.102   msaitoh 		device_t dev = sc->dev;
   1614    1.1   msaitoh 		int err = tcp_lro_init(lro);
   1615    1.1   msaitoh 		if (err) {
   1616    1.1   msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1617    1.1   msaitoh 			goto fail;
   1618    1.1   msaitoh 		}
   1619    1.1   msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1620    1.1   msaitoh 		rxr->lro_enabled = TRUE;
   1621  1.102   msaitoh 		lro->ifp = sc->ifp;
   1622    1.1   msaitoh 	}
   1623    1.1   msaitoh #endif /* LRO */
   1624    1.1   msaitoh 
   1625    1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1626   1.28   msaitoh 
   1627    1.1   msaitoh 	return (0);
   1628    1.1   msaitoh 
   1629    1.1   msaitoh fail:
   1630    1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1631    1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1632   1.28   msaitoh 
   1633    1.1   msaitoh 	return (error);
   1634   1.28   msaitoh } /* ixgbe_setup_receive_ring */
   1635    1.1   msaitoh 
   1636   1.28   msaitoh /************************************************************************
   1637   1.28   msaitoh  * ixgbe_setup_receive_structures - Initialize all receive rings.
   1638   1.28   msaitoh  ************************************************************************/
   1639    1.1   msaitoh int
   1640  1.102   msaitoh ixgbe_setup_receive_structures(struct ixgbe_softc *sc)
   1641    1.1   msaitoh {
   1642  1.102   msaitoh 	struct rx_ring *rxr = sc->rx_rings;
   1643   1.28   msaitoh 	int            j;
   1644    1.1   msaitoh 
   1645   1.62   msaitoh 	INIT_DEBUGOUT("ixgbe_setup_receive_structures");
   1646  1.102   msaitoh 	for (j = 0; j < sc->num_queues; j++, rxr++)
   1647    1.1   msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1648    1.1   msaitoh 			goto fail;
   1649    1.1   msaitoh 
   1650    1.1   msaitoh 	return (0);
   1651    1.1   msaitoh fail:
   1652    1.1   msaitoh 	/*
   1653    1.1   msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1654    1.1   msaitoh 	 * the rings that completed, the failing case will have
   1655    1.1   msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1656    1.1   msaitoh 	 */
   1657    1.1   msaitoh 	for (int i = 0; i < j; ++i) {
   1658  1.102   msaitoh 		rxr = &sc->rx_rings[i];
   1659   1.27   msaitoh 		IXGBE_RX_LOCK(rxr);
   1660    1.1   msaitoh 		ixgbe_free_receive_ring(rxr);
   1661   1.27   msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1662    1.1   msaitoh 	}
   1663    1.1   msaitoh 
   1664    1.1   msaitoh 	return (ENOBUFS);
   1665   1.28   msaitoh } /* ixgbe_setup_receive_structures */
   1666    1.1   msaitoh 
   1667    1.3   msaitoh 
   1668   1.28   msaitoh /************************************************************************
   1669   1.28   msaitoh  * ixgbe_free_receive_structures - Free all receive rings.
   1670   1.28   msaitoh  ************************************************************************/
   1671    1.1   msaitoh void
   1672  1.102   msaitoh ixgbe_free_receive_structures(struct ixgbe_softc *sc)
   1673    1.1   msaitoh {
   1674  1.102   msaitoh 	struct rx_ring *rxr = sc->rx_rings;
   1675    1.1   msaitoh 
   1676    1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1677    1.1   msaitoh 
   1678  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, rxr++) {
   1679    1.1   msaitoh 		ixgbe_free_receive_buffers(rxr);
   1680    1.1   msaitoh #ifdef LRO
   1681    1.1   msaitoh 		/* Free LRO memory */
   1682   1.28   msaitoh 		tcp_lro_free(&rxr->lro);
   1683    1.1   msaitoh #endif /* LRO */
   1684    1.1   msaitoh 		/* Free the ring memory as well */
   1685  1.102   msaitoh 		ixgbe_dma_free(sc, &rxr->rxdma);
   1686    1.1   msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1687    1.1   msaitoh 	}
   1688    1.1   msaitoh 
   1689  1.114   msaitoh 	kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
   1690   1.28   msaitoh } /* ixgbe_free_receive_structures */
   1691    1.1   msaitoh 
   1692    1.1   msaitoh 
   1693   1.28   msaitoh /************************************************************************
   1694   1.28   msaitoh  * ixgbe_free_receive_buffers - Free receive ring data structures
   1695   1.28   msaitoh  ************************************************************************/
   1696    1.1   msaitoh static void
   1697    1.1   msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1698    1.1   msaitoh {
   1699  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1700   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1701    1.1   msaitoh 
   1702    1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1703    1.1   msaitoh 
   1704    1.1   msaitoh 	/* Cleanup any existing buffers */
   1705    1.1   msaitoh 	if (rxr->rx_buffers != NULL) {
   1706  1.102   msaitoh 		for (int i = 0; i < sc->num_rx_desc; i++) {
   1707    1.1   msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1708   1.27   msaitoh 			ixgbe_rx_discard(rxr, i);
   1709    1.1   msaitoh 			if (rxbuf->pmap != NULL) {
   1710    1.1   msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1711    1.1   msaitoh 				rxbuf->pmap = NULL;
   1712    1.1   msaitoh 			}
   1713    1.1   msaitoh 		}
   1714   1.59   msaitoh 
   1715    1.1   msaitoh 		if (rxr->rx_buffers != NULL) {
   1716  1.114   msaitoh 			kmem_free(rxr->rx_buffers,
   1717  1.114   msaitoh 			    sizeof(struct ixgbe_rx_buf) * rxr->num_desc);
   1718    1.1   msaitoh 			rxr->rx_buffers = NULL;
   1719    1.1   msaitoh 		}
   1720    1.1   msaitoh 	}
   1721    1.1   msaitoh 
   1722    1.1   msaitoh 	if (rxr->ptag != NULL) {
   1723    1.1   msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1724    1.1   msaitoh 		rxr->ptag = NULL;
   1725    1.1   msaitoh 	}
   1726    1.1   msaitoh 
   1727    1.1   msaitoh 	return;
   1728   1.28   msaitoh } /* ixgbe_free_receive_buffers */
   1729    1.1   msaitoh 
   1730   1.28   msaitoh /************************************************************************
   1731   1.28   msaitoh  * ixgbe_rx_input
   1732   1.28   msaitoh  ************************************************************************/
   1733    1.1   msaitoh static __inline void
   1734   1.28   msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
   1735   1.28   msaitoh     u32 ptype)
   1736    1.1   msaitoh {
   1737  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
   1738    1.1   msaitoh 
   1739    1.1   msaitoh #ifdef LRO
   1740  1.102   msaitoh 	struct ethercom *ec = &sc->osdep.ec;
   1741    1.1   msaitoh 
   1742   1.28   msaitoh 	/*
   1743   1.28   msaitoh 	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1744   1.28   msaitoh 	 * should be computed by hardware. Also it should not have VLAN tag in
   1745   1.28   msaitoh 	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1746   1.28   msaitoh 	 */
   1747    1.1   msaitoh         if (rxr->lro_enabled &&
   1748    1.1   msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1749    1.1   msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1750    1.1   msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1751    1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1752    1.1   msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1753    1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1754    1.1   msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1755    1.1   msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1756    1.1   msaitoh                 /*
   1757    1.1   msaitoh                  * Send to the stack if:
   1758  1.103   msaitoh                  *  - LRO not enabled, or
   1759  1.103   msaitoh                  *  - no LRO resources, or
   1760  1.103   msaitoh                  *  - lro enqueue fails
   1761    1.1   msaitoh                  */
   1762    1.1   msaitoh                 if (rxr->lro.lro_cnt != 0)
   1763    1.1   msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1764    1.1   msaitoh                                 return;
   1765    1.1   msaitoh         }
   1766    1.1   msaitoh #endif /* LRO */
   1767    1.1   msaitoh 
   1768  1.102   msaitoh 	if_percpuq_enqueue(sc->ipq, m);
   1769   1.28   msaitoh } /* ixgbe_rx_input */
   1770    1.1   msaitoh 
   1771   1.28   msaitoh /************************************************************************
   1772   1.28   msaitoh  * ixgbe_rx_discard
   1773   1.28   msaitoh  ************************************************************************/
   1774    1.1   msaitoh static __inline void
   1775    1.1   msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1776    1.1   msaitoh {
   1777   1.28   msaitoh 	struct ixgbe_rx_buf *rbuf;
   1778    1.1   msaitoh 
   1779    1.1   msaitoh 	rbuf = &rxr->rx_buffers[i];
   1780    1.1   msaitoh 
   1781    1.1   msaitoh 	/*
   1782   1.70   msaitoh 	 * With advanced descriptors the writeback clobbers the buffer addrs,
   1783   1.70   msaitoh 	 * so its easier to just free the existing mbufs and take the normal
   1784   1.70   msaitoh 	 * refresh path to get new buffers and mapping.
   1785   1.28   msaitoh 	 */
   1786    1.1   msaitoh 
   1787   1.26   msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   1788   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1789   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1790   1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1791    1.1   msaitoh 		m_freem(rbuf->fmp);
   1792    1.1   msaitoh 		rbuf->fmp = NULL;
   1793    1.1   msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1794    1.1   msaitoh 	} else if (rbuf->buf) {
   1795   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1796   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1797   1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1798    1.1   msaitoh 		m_free(rbuf->buf);
   1799    1.1   msaitoh 		rbuf->buf = NULL;
   1800    1.1   msaitoh 	}
   1801    1.1   msaitoh 
   1802    1.1   msaitoh 	rbuf->flags = 0;
   1803    1.1   msaitoh 
   1804    1.1   msaitoh 	return;
   1805   1.28   msaitoh } /* ixgbe_rx_discard */
   1806    1.1   msaitoh 
   1807    1.1   msaitoh 
   1808   1.28   msaitoh /************************************************************************
   1809   1.28   msaitoh  * ixgbe_rxeof
   1810    1.1   msaitoh  *
   1811   1.28   msaitoh  *   Executes in interrupt context. It replenishes the
   1812   1.28   msaitoh  *   mbufs in the descriptor and sends data which has
   1813   1.28   msaitoh  *   been dma'ed into host memory to upper layer.
   1814    1.1   msaitoh  *
   1815   1.28   msaitoh  *   Return TRUE for more work, FALSE for all clean.
   1816   1.28   msaitoh  ************************************************************************/
   1817    1.1   msaitoh bool
   1818    1.1   msaitoh ixgbe_rxeof(struct ix_queue *que)
   1819    1.1   msaitoh {
   1820  1.102   msaitoh 	struct ixgbe_softc	*sc = que->sc;
   1821    1.1   msaitoh 	struct rx_ring		*rxr = que->rxr;
   1822  1.102   msaitoh 	struct ifnet		*ifp = sc->ifp;
   1823    1.1   msaitoh #ifdef LRO
   1824    1.1   msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1825    1.1   msaitoh #endif /* LRO */
   1826   1.28   msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1827   1.28   msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1828    1.1   msaitoh 	int			i, nextp, processed = 0;
   1829    1.1   msaitoh 	u32			staterr = 0;
   1830   1.94   msaitoh 	u32			loopcount = 0, numdesc;
   1831  1.102   msaitoh 	u32			limit = sc->rx_process_limit;
   1832  1.102   msaitoh 	u32			rx_copy_len = sc->rx_copy_len;
   1833   1.90   msaitoh 	bool			discard_multidesc = rxr->discard_multidesc;
   1834   1.94   msaitoh 	bool			wraparound = false;
   1835   1.94   msaitoh 	unsigned int		syncremain;
   1836    1.1   msaitoh #ifdef RSS
   1837    1.1   msaitoh 	u16			pkt_info;
   1838    1.1   msaitoh #endif
   1839    1.1   msaitoh 
   1840    1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1841    1.1   msaitoh 
   1842    1.1   msaitoh #ifdef DEV_NETMAP
   1843  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP) {
   1844   1.28   msaitoh 		/* Same as the txeof routine: wakeup clients on intr. */
   1845   1.28   msaitoh 		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1846   1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1847   1.28   msaitoh 			return (FALSE);
   1848   1.28   msaitoh 		}
   1849    1.1   msaitoh 	}
   1850    1.1   msaitoh #endif /* DEV_NETMAP */
   1851    1.1   msaitoh 
   1852   1.94   msaitoh 	/* Sync the ring. The size is rx_process_limit or the first half */
   1853   1.94   msaitoh 	if ((rxr->next_to_check + limit) <= rxr->num_desc) {
   1854   1.94   msaitoh 		/* Non-wraparound */
   1855   1.94   msaitoh 		numdesc = limit;
   1856   1.94   msaitoh 		syncremain = 0;
   1857   1.94   msaitoh 	} else {
   1858   1.94   msaitoh 		/* Wraparound. Sync the first half. */
   1859   1.94   msaitoh 		numdesc = rxr->num_desc - rxr->next_to_check;
   1860   1.94   msaitoh 
   1861   1.94   msaitoh 		/* Set the size of the last half */
   1862   1.94   msaitoh 		syncremain = limit - numdesc;
   1863   1.94   msaitoh 	}
   1864   1.94   msaitoh 	bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
   1865   1.94   msaitoh 	    rxr->rxdma.dma_map,
   1866   1.94   msaitoh 	    sizeof(union ixgbe_adv_rx_desc) * rxr->next_to_check,
   1867   1.94   msaitoh 	    sizeof(union ixgbe_adv_rx_desc) * numdesc,
   1868   1.94   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1869   1.94   msaitoh 
   1870   1.65   msaitoh 	/*
   1871   1.65   msaitoh 	 * The max number of loop is rx_process_limit. If discard_multidesc is
   1872   1.65   msaitoh 	 * true, continue processing to not to send broken packet to the upper
   1873   1.65   msaitoh 	 * layer.
   1874   1.65   msaitoh 	 */
   1875   1.65   msaitoh 	for (i = rxr->next_to_check;
   1876   1.89   msaitoh 	     (loopcount < limit) || (discard_multidesc == true);) {
   1877   1.65   msaitoh 
   1878   1.28   msaitoh 		struct mbuf *sendmp, *mp;
   1879   1.64  knakahar 		struct mbuf *newmp;
   1880  1.116   msaitoh #ifdef RSC
   1881  1.116   msaitoh 		u32         rsc;
   1882  1.116   msaitoh #endif
   1883  1.116   msaitoh 		u32         ptype;
   1884   1.28   msaitoh 		u16         len;
   1885   1.28   msaitoh 		u16         vtag = 0;
   1886   1.28   msaitoh 		bool        eop;
   1887   1.93   msaitoh 		bool        discard = false;
   1888   1.53   msaitoh 
   1889   1.94   msaitoh 		if (wraparound) {
   1890   1.94   msaitoh 			/* Sync the last half. */
   1891   1.94   msaitoh 			KASSERT(syncremain != 0);
   1892   1.94   msaitoh 			numdesc = syncremain;
   1893   1.94   msaitoh 			wraparound = false;
   1894   1.94   msaitoh 		} else if (__predict_false(loopcount >= limit)) {
   1895   1.94   msaitoh 			KASSERT(discard_multidesc == true);
   1896   1.94   msaitoh 			numdesc = 1;
   1897   1.94   msaitoh 		} else
   1898   1.94   msaitoh 			numdesc = 0;
   1899   1.94   msaitoh 
   1900   1.94   msaitoh 		if (numdesc != 0)
   1901   1.94   msaitoh 			bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
   1902   1.94   msaitoh 			    rxr->rxdma.dma_map, 0,
   1903   1.94   msaitoh 			    sizeof(union ixgbe_adv_rx_desc) * numdesc,
   1904   1.94   msaitoh 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1905    1.1   msaitoh 
   1906    1.1   msaitoh 		cur = &rxr->rx_base[i];
   1907    1.1   msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1908    1.1   msaitoh #ifdef RSS
   1909    1.1   msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1910    1.1   msaitoh #endif
   1911    1.1   msaitoh 
   1912    1.1   msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1913    1.1   msaitoh 			break;
   1914    1.1   msaitoh 
   1915   1.89   msaitoh 		loopcount++;
   1916   1.93   msaitoh 		sendmp = newmp = NULL;
   1917    1.1   msaitoh 		nbuf = NULL;
   1918  1.116   msaitoh #ifdef RSC
   1919    1.1   msaitoh 		rsc = 0;
   1920  1.116   msaitoh #endif
   1921    1.1   msaitoh 		cur->wb.upper.status_error = 0;
   1922    1.1   msaitoh 		rbuf = &rxr->rx_buffers[i];
   1923    1.1   msaitoh 		mp = rbuf->buf;
   1924    1.1   msaitoh 
   1925    1.1   msaitoh 		len = le16toh(cur->wb.upper.length);
   1926    1.1   msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1927    1.1   msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1928    1.1   msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1929    1.1   msaitoh 
   1930    1.1   msaitoh 		/* Make sure bad packets are discarded */
   1931    1.1   msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1932    1.3   msaitoh #if __FreeBSD_version >= 1100036
   1933  1.102   msaitoh 			if (sc->feat_en & IXGBE_FEATURE_VF)
   1934    1.4   msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1935    1.3   msaitoh #endif
   1936   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_discarded, 1);
   1937    1.1   msaitoh 			ixgbe_rx_discard(rxr, i);
   1938   1.65   msaitoh 			discard_multidesc = false;
   1939    1.1   msaitoh 			goto next_desc;
   1940    1.1   msaitoh 		}
   1941    1.1   msaitoh 
   1942   1.93   msaitoh 		if (__predict_false(discard_multidesc))
   1943   1.93   msaitoh 			discard = true;
   1944   1.93   msaitoh 		else {
   1945   1.93   msaitoh 			/* Pre-alloc new mbuf. */
   1946   1.93   msaitoh 
   1947   1.93   msaitoh 			if ((rbuf->fmp == NULL) &&
   1948   1.97   msaitoh 			    eop && (len <= rx_copy_len)) {
   1949   1.93   msaitoh 				/* For short packet. See below. */
   1950   1.93   msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1951   1.93   msaitoh 				if (__predict_false(sendmp == NULL)) {
   1952   1.95   msaitoh 					IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1953   1.93   msaitoh 					discard = true;
   1954   1.93   msaitoh 				}
   1955   1.93   msaitoh 			} else {
   1956   1.93   msaitoh 				/* For long packet. */
   1957   1.93   msaitoh 				newmp = ixgbe_getcl();
   1958   1.93   msaitoh 				if (__predict_false(newmp == NULL)) {
   1959   1.95   msaitoh 					IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1960   1.93   msaitoh 					discard = true;
   1961   1.93   msaitoh 				}
   1962   1.93   msaitoh 			}
   1963   1.93   msaitoh 		}
   1964   1.92   msaitoh 
   1965   1.93   msaitoh 		if (__predict_false(discard)) {
   1966   1.65   msaitoh 			/*
   1967   1.65   msaitoh 			 * Descriptor initialization is already done by the
   1968   1.65   msaitoh 			 * above code (cur->wb.upper.status_error = 0).
   1969   1.65   msaitoh 			 * So, we can reuse current rbuf->buf for new packet.
   1970   1.65   msaitoh 			 *
   1971   1.65   msaitoh 			 * Rewrite the buffer addr, see comment in
   1972   1.65   msaitoh 			 * ixgbe_rx_discard().
   1973   1.65   msaitoh 			 */
   1974   1.65   msaitoh 			cur->read.pkt_addr = rbuf->addr;
   1975   1.65   msaitoh 			m_freem(rbuf->fmp);
   1976   1.65   msaitoh 			rbuf->fmp = NULL;
   1977   1.65   msaitoh 			if (!eop) {
   1978   1.65   msaitoh 				/* Discard the entire packet. */
   1979   1.65   msaitoh 				discard_multidesc = true;
   1980   1.65   msaitoh 			} else
   1981   1.65   msaitoh 				discard_multidesc = false;
   1982   1.64  knakahar 			goto next_desc;
   1983   1.64  knakahar 		}
   1984   1.65   msaitoh 		discard_multidesc = false;
   1985   1.64  knakahar 
   1986   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1987   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1988   1.27   msaitoh 
   1989    1.1   msaitoh 		/*
   1990   1.28   msaitoh 		 * On 82599 which supports a hardware
   1991   1.28   msaitoh 		 * LRO (called HW RSC), packets need
   1992   1.28   msaitoh 		 * not be fragmented across sequential
   1993   1.28   msaitoh 		 * descriptors, rather the next descriptor
   1994   1.28   msaitoh 		 * is indicated in bits of the descriptor.
   1995   1.99    andvar 		 * This also means that we might process
   1996   1.28   msaitoh 		 * more than one packet at a time, something
   1997   1.28   msaitoh 		 * that has never been true before, it
   1998   1.28   msaitoh 		 * required eliminating global chain pointers
   1999   1.28   msaitoh 		 * in favor of what we are doing here.  -jfv
   2000   1.28   msaitoh 		 */
   2001    1.1   msaitoh 		if (!eop) {
   2002    1.1   msaitoh 			/*
   2003   1.28   msaitoh 			 * Figure out the next descriptor
   2004   1.28   msaitoh 			 * of this frame.
   2005   1.28   msaitoh 			 */
   2006  1.116   msaitoh #ifdef RSC
   2007    1.1   msaitoh 			if (rxr->hw_rsc == TRUE) {
   2008    1.1   msaitoh 				rsc = ixgbe_rsc_count(cur);
   2009    1.1   msaitoh 				rxr->rsc_num += (rsc - 1);
   2010    1.1   msaitoh 			}
   2011    1.1   msaitoh 			if (rsc) { /* Get hardware index */
   2012   1.28   msaitoh 				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
   2013    1.1   msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   2014  1.116   msaitoh 			} else
   2015  1.116   msaitoh #endif
   2016  1.116   msaitoh 			{ /* Just sequential */
   2017    1.1   msaitoh 				nextp = i + 1;
   2018  1.102   msaitoh 				if (nextp == sc->num_rx_desc)
   2019    1.1   msaitoh 					nextp = 0;
   2020    1.1   msaitoh 			}
   2021    1.1   msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   2022    1.1   msaitoh 			prefetch(nbuf);
   2023    1.1   msaitoh 		}
   2024    1.1   msaitoh 		/*
   2025   1.28   msaitoh 		 * Rather than using the fmp/lmp global pointers
   2026   1.28   msaitoh 		 * we now keep the head of a packet chain in the
   2027   1.28   msaitoh 		 * buffer struct and pass this along from one
   2028   1.28   msaitoh 		 * descriptor to the next, until we get EOP.
   2029   1.28   msaitoh 		 */
   2030    1.1   msaitoh 		/*
   2031   1.28   msaitoh 		 * See if there is a stored head
   2032   1.28   msaitoh 		 * that determines what we are
   2033   1.28   msaitoh 		 */
   2034   1.93   msaitoh 		if (rbuf->fmp != NULL) {
   2035   1.93   msaitoh 			/* Secondary frag */
   2036   1.93   msaitoh 			sendmp = rbuf->fmp;
   2037   1.93   msaitoh 
   2038   1.86   msaitoh 			/* Update new (used in future) mbuf */
   2039   1.86   msaitoh 			newmp->m_pkthdr.len = newmp->m_len = rxr->mbuf_sz;
   2040  1.102   msaitoh 			IXGBE_M_ADJ(sc, rxr, newmp);
   2041   1.64  knakahar 			rbuf->buf = newmp;
   2042   1.64  knakahar 			rbuf->fmp = NULL;
   2043   1.86   msaitoh 
   2044   1.86   msaitoh 			/* For secondary frag */
   2045   1.74   msaitoh 			mp->m_len = len;
   2046    1.1   msaitoh 			mp->m_flags &= ~M_PKTHDR;
   2047   1.86   msaitoh 
   2048   1.86   msaitoh 			/* For sendmp */
   2049    1.1   msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   2050    1.1   msaitoh 		} else {
   2051    1.1   msaitoh 			/*
   2052   1.86   msaitoh 			 * It's the first segment of a multi descriptor
   2053   1.86   msaitoh 			 * packet or a single segment which contains a full
   2054   1.86   msaitoh 			 * packet.
   2055   1.86   msaitoh 			 */
   2056   1.86   msaitoh 
   2057   1.97   msaitoh 			if (eop && (len <= rx_copy_len)) {
   2058   1.93   msaitoh 				/*
   2059   1.93   msaitoh 				 * Optimize.  This might be a small packet, may
   2060   1.93   msaitoh 				 * be just a TCP ACK. Copy into a new mbuf, and
   2061   1.93   msaitoh 				 * Leave the old mbuf+cluster for re-use.
   2062   1.93   msaitoh 				 */
   2063   1.93   msaitoh 				sendmp->m_data += ETHER_ALIGN;
   2064   1.93   msaitoh 				memcpy(mtod(sendmp, void *),
   2065   1.93   msaitoh 				    mtod(mp, void *), len);
   2066   1.95   msaitoh 				IXGBE_EVC_ADD(&rxr->rx_copies, 1);
   2067   1.93   msaitoh 				rbuf->flags |= IXGBE_RX_COPY;
   2068   1.93   msaitoh 			} else {
   2069   1.96   msaitoh 				/* For long packet */
   2070   1.86   msaitoh 
   2071   1.86   msaitoh 				/* Update new (used in future) mbuf */
   2072   1.86   msaitoh 				newmp->m_pkthdr.len = newmp->m_len
   2073   1.86   msaitoh 				    = rxr->mbuf_sz;
   2074  1.102   msaitoh 				IXGBE_M_ADJ(sc, rxr, newmp);
   2075   1.64  knakahar 				rbuf->buf = newmp;
   2076   1.64  knakahar 				rbuf->fmp = NULL;
   2077   1.86   msaitoh 
   2078   1.86   msaitoh 				/* For sendmp */
   2079    1.1   msaitoh 				sendmp = mp;
   2080    1.1   msaitoh 			}
   2081    1.1   msaitoh 
   2082    1.1   msaitoh 			/* first desc of a non-ps chain */
   2083   1.86   msaitoh 			sendmp->m_pkthdr.len = sendmp->m_len = len;
   2084    1.1   msaitoh 		}
   2085    1.1   msaitoh 		++processed;
   2086    1.1   msaitoh 
   2087    1.1   msaitoh 		/* Pass the head pointer on */
   2088    1.1   msaitoh 		if (eop == 0) {
   2089    1.1   msaitoh 			nbuf->fmp = sendmp;
   2090    1.1   msaitoh 			sendmp = NULL;
   2091    1.1   msaitoh 			mp->m_next = nbuf->buf;
   2092    1.1   msaitoh 		} else { /* Sending this frame */
   2093    1.1   msaitoh 			m_set_rcvif(sendmp, ifp);
   2094   1.31   msaitoh 			++rxr->packets;
   2095   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_packets, 1);
   2096    1.1   msaitoh 			/* capture data for AIM */
   2097    1.1   msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   2098   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_bytes, sendmp->m_pkthdr.len);
   2099    1.1   msaitoh 			/* Process vlan info */
   2100   1.28   msaitoh 			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
   2101    1.1   msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   2102    1.1   msaitoh 			if (vtag) {
   2103   1.29  knakahar 				vlan_set_tag(sendmp, vtag);
   2104    1.1   msaitoh 			}
   2105    1.1   msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   2106    1.1   msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   2107  1.102   msaitoh 				   &sc->stats.pf);
   2108    1.1   msaitoh 			}
   2109    1.8   msaitoh 
   2110    1.6   msaitoh #if 0 /* FreeBSD */
   2111   1.28   msaitoh 			/*
   2112   1.28   msaitoh 			 * In case of multiqueue, we have RXCSUM.PCSD bit set
   2113   1.28   msaitoh 			 * and never cleared. This means we have RSS hash
   2114   1.28   msaitoh 			 * available to be used.
   2115   1.28   msaitoh 			 */
   2116  1.102   msaitoh 			if (sc->num_queues > 1) {
   2117   1.28   msaitoh 				sendmp->m_pkthdr.flowid =
   2118   1.28   msaitoh 				    le32toh(cur->wb.lower.hi_dword.rss);
   2119   1.44   msaitoh 				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   2120   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4:
   2121   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2122   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV4);
   2123   1.28   msaitoh 					break;
   2124   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   2125   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2126   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV4);
   2127   1.28   msaitoh 					break;
   2128   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6:
   2129   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2130   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6);
   2131   1.28   msaitoh 					break;
   2132   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   2133   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2134   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6);
   2135   1.28   msaitoh 					break;
   2136   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   2137   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2138   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6_EX);
   2139   1.28   msaitoh 					break;
   2140   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   2141   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2142   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6_EX);
   2143   1.28   msaitoh 					break;
   2144    1.6   msaitoh #if __FreeBSD_version > 1100000
   2145   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   2146   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2147   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV4);
   2148   1.28   msaitoh 					break;
   2149   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   2150   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2151   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6);
   2152   1.28   msaitoh 					break;
   2153   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   2154   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2155   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6_EX);
   2156   1.28   msaitoh 					break;
   2157   1.28   msaitoh #endif
   2158   1.44   msaitoh 				default:
   2159   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2160   1.28   msaitoh 					    M_HASHTYPE_OPAQUE_HASH);
   2161   1.28   msaitoh 				}
   2162   1.28   msaitoh 			} else {
   2163   1.28   msaitoh 				sendmp->m_pkthdr.flowid = que->msix;
   2164    1.1   msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   2165    1.1   msaitoh 			}
   2166    1.8   msaitoh #endif
   2167    1.1   msaitoh 		}
   2168    1.1   msaitoh next_desc:
   2169    1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   2170    1.1   msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2171    1.1   msaitoh 
   2172    1.1   msaitoh 		/* Advance our pointers to the next descriptor. */
   2173   1.94   msaitoh 		if (++i == rxr->num_desc) {
   2174   1.94   msaitoh 			wraparound = true;
   2175    1.1   msaitoh 			i = 0;
   2176   1.94   msaitoh 		}
   2177   1.82   msaitoh 		rxr->next_to_check = i;
   2178    1.1   msaitoh 
   2179    1.1   msaitoh 		/* Now send to the stack or do LRO */
   2180   1.85   msaitoh 		if (sendmp != NULL)
   2181    1.1   msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   2182    1.1   msaitoh 
   2183   1.28   msaitoh 		/* Every 8 descriptors we go to refresh mbufs */
   2184    1.1   msaitoh 		if (processed == 8) {
   2185    1.1   msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   2186    1.1   msaitoh 			processed = 0;
   2187    1.1   msaitoh 		}
   2188    1.1   msaitoh 	}
   2189    1.1   msaitoh 
   2190   1.90   msaitoh 	/* Save the current status */
   2191   1.90   msaitoh 	rxr->discard_multidesc = discard_multidesc;
   2192   1.90   msaitoh 
   2193    1.1   msaitoh 	/* Refresh any remaining buf structs */
   2194    1.1   msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   2195    1.1   msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   2196    1.1   msaitoh 
   2197   1.28   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   2198   1.28   msaitoh 
   2199    1.1   msaitoh #ifdef LRO
   2200    1.1   msaitoh 	/*
   2201    1.1   msaitoh 	 * Flush any outstanding LRO work
   2202    1.1   msaitoh 	 */
   2203   1.10   msaitoh 	tcp_lro_flush_all(lro);
   2204    1.1   msaitoh #endif /* LRO */
   2205    1.1   msaitoh 
   2206    1.1   msaitoh 	/*
   2207   1.28   msaitoh 	 * Still have cleaning to do?
   2208   1.28   msaitoh 	 */
   2209    1.1   msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   2210   1.28   msaitoh 		return (TRUE);
   2211   1.28   msaitoh 
   2212   1.28   msaitoh 	return (FALSE);
   2213   1.28   msaitoh } /* ixgbe_rxeof */
   2214    1.1   msaitoh 
   2215    1.1   msaitoh 
   2216   1.28   msaitoh /************************************************************************
   2217   1.28   msaitoh  * ixgbe_rx_checksum
   2218    1.1   msaitoh  *
   2219   1.28   msaitoh  *   Verify that the hardware indicated that the checksum is valid.
   2220   1.28   msaitoh  *   Inform the stack about the status of checksum so that stack
   2221   1.28   msaitoh  *   doesn't spend time verifying the checksum.
   2222   1.28   msaitoh  ************************************************************************/
   2223    1.1   msaitoh static void
   2224    1.1   msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2225    1.1   msaitoh     struct ixgbe_hw_stats *stats)
   2226    1.1   msaitoh {
   2227   1.28   msaitoh 	u16  status = (u16)staterr;
   2228   1.28   msaitoh 	u8   errors = (u8)(staterr >> 24);
   2229    1.1   msaitoh #if 0
   2230   1.28   msaitoh 	bool sctp = false;
   2231    1.1   msaitoh 
   2232    1.1   msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2233    1.1   msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2234    1.8   msaitoh 		sctp = true;
   2235    1.1   msaitoh #endif
   2236    1.1   msaitoh 
   2237    1.8   msaitoh 	/* IPv4 checksum */
   2238    1.1   msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2239   1.95   msaitoh 		IXGBE_EVC_ADD(&stats->ipcs, 1);
   2240    1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2241    1.1   msaitoh 			/* IP Checksum Good */
   2242    1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2243    1.1   msaitoh 		} else {
   2244   1.95   msaitoh 			IXGBE_EVC_ADD(&stats->ipcs_bad, 1);
   2245    1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2246    1.1   msaitoh 		}
   2247    1.1   msaitoh 	}
   2248    1.8   msaitoh 	/* TCP/UDP/SCTP checksum */
   2249    1.1   msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2250   1.95   msaitoh 		IXGBE_EVC_ADD(&stats->l4cs, 1);
   2251    1.1   msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2252    1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2253    1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2254    1.1   msaitoh 		} else {
   2255   1.95   msaitoh 			IXGBE_EVC_ADD(&stats->l4cs_bad, 1);
   2256    1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2257    1.1   msaitoh 		}
   2258    1.1   msaitoh 	}
   2259   1.28   msaitoh } /* ixgbe_rx_checksum */
   2260    1.1   msaitoh 
   2261   1.28   msaitoh /************************************************************************
   2262   1.28   msaitoh  * ixgbe_dma_malloc
   2263   1.28   msaitoh  ************************************************************************/
   2264    1.1   msaitoh int
   2265  1.102   msaitoh ixgbe_dma_malloc(struct ixgbe_softc *sc, const bus_size_t size,
   2266    1.1   msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2267    1.1   msaitoh {
   2268  1.102   msaitoh 	device_t dev = sc->dev;
   2269   1.28   msaitoh 	int      r, rsegs;
   2270    1.1   msaitoh 
   2271   1.28   msaitoh 	r = ixgbe_dma_tag_create(
   2272  1.102   msaitoh 	     /*      parent */ sc->osdep.dmat,
   2273   1.28   msaitoh 	     /*   alignment */ DBA_ALIGN,
   2274   1.28   msaitoh 	     /*      bounds */ 0,
   2275   1.28   msaitoh 	     /*     maxsize */ size,
   2276   1.28   msaitoh 	     /*   nsegments */ 1,
   2277   1.28   msaitoh 	     /*  maxsegsize */ size,
   2278   1.28   msaitoh 	     /*       flags */ BUS_DMA_ALLOCNOW,
   2279    1.1   msaitoh 			       &dma->dma_tag);
   2280    1.1   msaitoh 	if (r != 0) {
   2281    1.1   msaitoh 		aprint_error_dev(dev,
   2282   1.44   msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
   2283   1.44   msaitoh 		    r);
   2284    1.1   msaitoh 		goto fail_0;
   2285    1.1   msaitoh 	}
   2286    1.1   msaitoh 
   2287   1.28   msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
   2288   1.28   msaitoh 	    dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
   2289   1.28   msaitoh 	    &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2290    1.1   msaitoh 	if (r != 0) {
   2291    1.1   msaitoh 		aprint_error_dev(dev,
   2292    1.1   msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2293    1.1   msaitoh 		goto fail_1;
   2294    1.1   msaitoh 	}
   2295    1.1   msaitoh 
   2296    1.1   msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2297   1.76       ryo 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   2298    1.1   msaitoh 	if (r != 0) {
   2299    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2300    1.1   msaitoh 		    __func__, r);
   2301    1.1   msaitoh 		goto fail_2;
   2302    1.1   msaitoh 	}
   2303    1.1   msaitoh 
   2304    1.1   msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2305    1.1   msaitoh 	if (r != 0) {
   2306    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2307    1.1   msaitoh 		    __func__, r);
   2308    1.1   msaitoh 		goto fail_3;
   2309    1.1   msaitoh 	}
   2310    1.1   msaitoh 
   2311   1.28   msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
   2312   1.28   msaitoh 	    dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
   2313    1.1   msaitoh 	if (r != 0) {
   2314    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2315    1.1   msaitoh 		    __func__, r);
   2316    1.1   msaitoh 		goto fail_4;
   2317    1.1   msaitoh 	}
   2318    1.1   msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2319    1.1   msaitoh 	dma->dma_size = size;
   2320    1.1   msaitoh 	return 0;
   2321    1.1   msaitoh fail_4:
   2322    1.1   msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2323    1.1   msaitoh fail_3:
   2324    1.1   msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2325    1.1   msaitoh fail_2:
   2326    1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2327    1.1   msaitoh fail_1:
   2328    1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2329    1.1   msaitoh fail_0:
   2330    1.1   msaitoh 
   2331   1.28   msaitoh 	return (r);
   2332   1.28   msaitoh } /* ixgbe_dma_malloc */
   2333   1.28   msaitoh 
   2334   1.28   msaitoh /************************************************************************
   2335   1.28   msaitoh  * ixgbe_dma_free
   2336   1.28   msaitoh  ************************************************************************/
   2337    1.3   msaitoh void
   2338  1.102   msaitoh ixgbe_dma_free(struct ixgbe_softc *sc, struct ixgbe_dma_alloc *dma)
   2339    1.1   msaitoh {
   2340    1.1   msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2341    1.1   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2342    1.1   msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2343   1.98    bouyer 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, dma->dma_size);
   2344    1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2345    1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2346   1.28   msaitoh } /* ixgbe_dma_free */
   2347    1.1   msaitoh 
   2348    1.1   msaitoh 
   2349   1.28   msaitoh /************************************************************************
   2350   1.28   msaitoh  * ixgbe_allocate_queues
   2351    1.1   msaitoh  *
   2352   1.28   msaitoh  *   Allocate memory for the transmit and receive rings, and then
   2353   1.28   msaitoh  *   the descriptors associated with each, called only once at attach.
   2354   1.28   msaitoh  ************************************************************************/
   2355    1.1   msaitoh int
   2356  1.102   msaitoh ixgbe_allocate_queues(struct ixgbe_softc *sc)
   2357    1.1   msaitoh {
   2358  1.102   msaitoh 	device_t	dev = sc->dev;
   2359    1.1   msaitoh 	struct ix_queue	*que;
   2360    1.1   msaitoh 	struct tx_ring	*txr;
   2361    1.1   msaitoh 	struct rx_ring	*rxr;
   2362   1.28   msaitoh 	int             rsize, tsize, error = IXGBE_SUCCESS;
   2363   1.28   msaitoh 	int             txconf = 0, rxconf = 0;
   2364    1.1   msaitoh 
   2365   1.28   msaitoh 	/* First, allocate the top level queue structs */
   2366  1.114   msaitoh 	sc->queues = kmem_zalloc(sizeof(struct ix_queue) * sc->num_queues,
   2367  1.114   msaitoh 	    KM_SLEEP);
   2368    1.1   msaitoh 
   2369   1.28   msaitoh 	/* Second, allocate the TX ring struct memory */
   2370  1.114   msaitoh 	sc->tx_rings = kmem_zalloc(sizeof(struct tx_ring) * sc->num_queues,
   2371  1.114   msaitoh 	    KM_SLEEP);
   2372    1.1   msaitoh 
   2373   1.28   msaitoh 	/* Third, allocate the RX ring */
   2374  1.114   msaitoh 	sc->rx_rings = kmem_zalloc(sizeof(struct rx_ring) * sc->num_queues,
   2375  1.114   msaitoh 	    KM_SLEEP);
   2376    1.1   msaitoh 
   2377    1.1   msaitoh 	/* For the ring itself */
   2378  1.113   msaitoh 	tsize = sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc);
   2379  1.113   msaitoh 	KASSERT((tsize % DBA_ALIGN) == 0);
   2380    1.1   msaitoh 
   2381    1.1   msaitoh 	/*
   2382    1.1   msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2383    1.1   msaitoh 	 * possibility that things fail midcourse and we need to
   2384    1.1   msaitoh 	 * undo memory gracefully
   2385   1.28   msaitoh 	 */
   2386  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txconf++) {
   2387    1.1   msaitoh 		/* Set up some basics */
   2388  1.102   msaitoh 		txr = &sc->tx_rings[i];
   2389  1.102   msaitoh 		txr->sc = sc;
   2390   1.28   msaitoh 		txr->txr_interq = NULL;
   2391   1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2392    1.5   msaitoh #ifdef PCI_IOV
   2393  1.102   msaitoh 		txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
   2394   1.28   msaitoh 		    i);
   2395    1.5   msaitoh #else
   2396    1.1   msaitoh 		txr->me = i;
   2397    1.5   msaitoh #endif
   2398  1.102   msaitoh 		txr->num_desc = sc->num_tx_desc;
   2399    1.1   msaitoh 
   2400    1.1   msaitoh 		/* Initialize the TX side lock */
   2401    1.1   msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2402    1.1   msaitoh 
   2403  1.102   msaitoh 		if (ixgbe_dma_malloc(sc, tsize, &txr->txdma,
   2404   1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2405    1.1   msaitoh 			aprint_error_dev(dev,
   2406    1.1   msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2407    1.1   msaitoh 			error = ENOMEM;
   2408    1.1   msaitoh 			goto err_tx_desc;
   2409    1.1   msaitoh 		}
   2410    1.1   msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2411    1.1   msaitoh 		bzero((void *)txr->tx_base, tsize);
   2412    1.1   msaitoh 
   2413   1.28   msaitoh 		/* Now allocate transmit buffers for the ring */
   2414   1.28   msaitoh 		if (ixgbe_allocate_transmit_buffers(txr)) {
   2415    1.1   msaitoh 			aprint_error_dev(dev,
   2416    1.1   msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2417    1.1   msaitoh 			error = ENOMEM;
   2418    1.1   msaitoh 			goto err_tx_desc;
   2419   1.63   msaitoh 		}
   2420  1.102   msaitoh 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   2421   1.28   msaitoh 			/* Allocate a buf ring */
   2422   1.28   msaitoh 			txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
   2423   1.28   msaitoh 			if (txr->txr_interq == NULL) {
   2424   1.28   msaitoh 				aprint_error_dev(dev,
   2425   1.28   msaitoh 				    "Critical Failure setting up buf ring\n");
   2426   1.28   msaitoh 				error = ENOMEM;
   2427   1.28   msaitoh 				goto err_tx_desc;
   2428   1.28   msaitoh 			}
   2429   1.28   msaitoh 		}
   2430    1.1   msaitoh 	}
   2431    1.1   msaitoh 
   2432    1.1   msaitoh 	/*
   2433    1.1   msaitoh 	 * Next the RX queues...
   2434   1.53   msaitoh 	 */
   2435  1.113   msaitoh 	rsize = sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc);
   2436  1.113   msaitoh 	KASSERT((rsize % DBA_ALIGN) == 0);
   2437  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, rxconf++) {
   2438  1.102   msaitoh 		rxr = &sc->rx_rings[i];
   2439    1.1   msaitoh 		/* Set up some basics */
   2440  1.102   msaitoh 		rxr->sc = sc;
   2441    1.5   msaitoh #ifdef PCI_IOV
   2442   1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2443  1.102   msaitoh 		rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
   2444   1.28   msaitoh 		    i);
   2445    1.5   msaitoh #else
   2446    1.1   msaitoh 		rxr->me = i;
   2447    1.5   msaitoh #endif
   2448  1.102   msaitoh 		rxr->num_desc = sc->num_rx_desc;
   2449    1.1   msaitoh 
   2450    1.1   msaitoh 		/* Initialize the RX side lock */
   2451    1.1   msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2452    1.1   msaitoh 
   2453  1.102   msaitoh 		if (ixgbe_dma_malloc(sc, rsize, &rxr->rxdma,
   2454   1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2455    1.1   msaitoh 			aprint_error_dev(dev,
   2456    1.1   msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2457    1.1   msaitoh 			error = ENOMEM;
   2458    1.1   msaitoh 			goto err_rx_desc;
   2459    1.1   msaitoh 		}
   2460    1.1   msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2461    1.1   msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2462    1.1   msaitoh 
   2463   1.28   msaitoh 		/* Allocate receive buffers for the ring */
   2464    1.1   msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2465    1.1   msaitoh 			aprint_error_dev(dev,
   2466    1.1   msaitoh 			    "Critical Failure setting up receive buffers\n");
   2467    1.1   msaitoh 			error = ENOMEM;
   2468    1.1   msaitoh 			goto err_rx_desc;
   2469    1.1   msaitoh 		}
   2470    1.1   msaitoh 	}
   2471    1.1   msaitoh 
   2472    1.1   msaitoh 	/*
   2473   1.28   msaitoh 	 * Finally set up the queue holding structs
   2474   1.28   msaitoh 	 */
   2475  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++) {
   2476  1.102   msaitoh 		que = &sc->queues[i];
   2477  1.102   msaitoh 		que->sc = sc;
   2478    1.3   msaitoh 		que->me = i;
   2479  1.102   msaitoh 		que->txr = &sc->tx_rings[i];
   2480  1.102   msaitoh 		que->rxr = &sc->rx_rings[i];
   2481   1.33  knakahar 
   2482   1.37  knakahar 		mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
   2483   1.37  knakahar 		que->disabled_count = 0;
   2484    1.1   msaitoh 	}
   2485    1.1   msaitoh 
   2486    1.1   msaitoh 	return (0);
   2487    1.1   msaitoh 
   2488    1.1   msaitoh err_rx_desc:
   2489  1.102   msaitoh 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
   2490  1.102   msaitoh 		ixgbe_dma_free(sc, &rxr->rxdma);
   2491    1.1   msaitoh err_tx_desc:
   2492  1.102   msaitoh 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
   2493  1.102   msaitoh 		ixgbe_dma_free(sc, &txr->txdma);
   2494  1.114   msaitoh 	kmem_free(sc->rx_rings, sizeof(struct rx_ring) * sc->num_queues);
   2495  1.114   msaitoh 	kmem_free(sc->tx_rings, sizeof(struct tx_ring) * sc->num_queues);
   2496  1.114   msaitoh 	kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
   2497    1.1   msaitoh 	return (error);
   2498   1.28   msaitoh } /* ixgbe_allocate_queues */
   2499   1.60   msaitoh 
   2500   1.60   msaitoh /************************************************************************
   2501   1.60   msaitoh  * ixgbe_free_queues
   2502   1.60   msaitoh  *
   2503   1.60   msaitoh  *   Free descriptors for the transmit and receive rings, and then
   2504   1.60   msaitoh  *   the memory associated with each.
   2505   1.60   msaitoh  ************************************************************************/
   2506   1.60   msaitoh void
   2507  1.102   msaitoh ixgbe_free_queues(struct ixgbe_softc *sc)
   2508   1.60   msaitoh {
   2509   1.60   msaitoh 	struct ix_queue *que;
   2510   1.60   msaitoh 	int i;
   2511   1.60   msaitoh 
   2512  1.102   msaitoh 	ixgbe_free_transmit_structures(sc);
   2513  1.102   msaitoh 	ixgbe_free_receive_structures(sc);
   2514  1.102   msaitoh 	for (i = 0; i < sc->num_queues; i++) {
   2515  1.102   msaitoh 		que = &sc->queues[i];
   2516   1.60   msaitoh 		mutex_destroy(&que->dc_mtx);
   2517   1.60   msaitoh 	}
   2518  1.114   msaitoh 	kmem_free(sc->queues, sizeof(struct ix_queue) * sc->num_queues);
   2519   1.60   msaitoh } /* ixgbe_free_queues */
   2520