Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.107
      1  1.107   msaitoh /* $NetBSD: ix_txrx.c,v 1.107 2023/11/14 03:03:18 msaitoh Exp $ */
      2   1.28   msaitoh 
      3    1.1   msaitoh /******************************************************************************
      4    1.1   msaitoh 
      5   1.28   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      6    1.1   msaitoh   All rights reserved.
      7   1.28   msaitoh 
      8   1.28   msaitoh   Redistribution and use in source and binary forms, with or without
      9    1.1   msaitoh   modification, are permitted provided that the following conditions are met:
     10   1.28   msaitoh 
     11   1.28   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     12    1.1   msaitoh       this list of conditions and the following disclaimer.
     13   1.28   msaitoh 
     14   1.28   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     15   1.28   msaitoh       notice, this list of conditions and the following disclaimer in the
     16    1.1   msaitoh       documentation and/or other materials provided with the distribution.
     17   1.28   msaitoh 
     18   1.28   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     19   1.28   msaitoh       contributors may be used to endorse or promote products derived from
     20    1.1   msaitoh       this software without specific prior written permission.
     21   1.28   msaitoh 
     22    1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   1.28   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   1.28   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   1.28   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   1.28   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   1.28   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   1.28   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   1.28   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   1.28   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31    1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32    1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     33    1.1   msaitoh 
     34    1.1   msaitoh ******************************************************************************/
     35   1.39   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
     36   1.28   msaitoh 
     37    1.1   msaitoh /*
     38    1.1   msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39    1.1   msaitoh  * All rights reserved.
     40    1.1   msaitoh  *
     41    1.1   msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     42    1.1   msaitoh  * by Coyote Point Systems, Inc.
     43    1.1   msaitoh  *
     44    1.1   msaitoh  * Redistribution and use in source and binary forms, with or without
     45    1.1   msaitoh  * modification, are permitted provided that the following conditions
     46    1.1   msaitoh  * are met:
     47    1.1   msaitoh  * 1. Redistributions of source code must retain the above copyright
     48    1.1   msaitoh  *    notice, this list of conditions and the following disclaimer.
     49    1.1   msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     50    1.1   msaitoh  *    notice, this list of conditions and the following disclaimer in the
     51    1.1   msaitoh  *    documentation and/or other materials provided with the distribution.
     52    1.1   msaitoh  *
     53    1.1   msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54    1.1   msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55    1.1   msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56    1.1   msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57    1.1   msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58    1.1   msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59    1.1   msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60    1.1   msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61    1.1   msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62    1.1   msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63    1.1   msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     64    1.1   msaitoh  */
     65    1.1   msaitoh 
     66   1.71   msaitoh #include <sys/cdefs.h>
     67  1.107   msaitoh __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.107 2023/11/14 03:03:18 msaitoh Exp $");
     68   1.71   msaitoh 
     69    1.8   msaitoh #include "opt_inet.h"
     70    1.8   msaitoh #include "opt_inet6.h"
     71    1.8   msaitoh 
     72    1.1   msaitoh #include "ixgbe.h"
     73    1.1   msaitoh 
     74    1.1   msaitoh /*
     75   1.28   msaitoh  * HW RSC control:
     76   1.28   msaitoh  *  this feature only works with
     77   1.28   msaitoh  *  IPv4, and only on 82599 and later.
     78   1.28   msaitoh  *  Also this will cause IP forwarding to
     79   1.28   msaitoh  *  fail and that can't be controlled by
     80   1.28   msaitoh  *  the stack as LRO can. For all these
     81   1.28   msaitoh  *  reasons I've deemed it best to leave
     82   1.28   msaitoh  *  this off and not bother with a tuneable
     83   1.28   msaitoh  *  interface, this would need to be compiled
     84   1.28   msaitoh  *  to enable.
     85   1.28   msaitoh  */
     86    1.1   msaitoh static bool ixgbe_rsc_enable = FALSE;
     87    1.1   msaitoh 
     88    1.3   msaitoh /*
     89   1.28   msaitoh  * For Flow Director: this is the
     90   1.28   msaitoh  * number of TX packets we sample
     91   1.28   msaitoh  * for the filter pool, this means
     92   1.28   msaitoh  * every 20th packet will be probed.
     93   1.28   msaitoh  *
     94   1.28   msaitoh  * This feature can be disabled by
     95   1.28   msaitoh  * setting this to 0.
     96   1.28   msaitoh  */
     97    1.3   msaitoh static int atr_sample_rate = 20;
     98    1.3   msaitoh 
     99  1.102   msaitoh #define IXGBE_M_ADJ(sc, rxr, mp)					\
    100  1.102   msaitoh 	if (sc->max_frame_size <= (rxr->mbuf_sz - ETHER_ALIGN))	\
    101   1.86   msaitoh 		m_adj(mp, ETHER_ALIGN)
    102   1.86   msaitoh 
    103   1.28   msaitoh /************************************************************************
    104    1.3   msaitoh  *  Local Function prototypes
    105   1.28   msaitoh  ************************************************************************/
    106   1.28   msaitoh static void          ixgbe_setup_transmit_ring(struct tx_ring *);
    107   1.28   msaitoh static void          ixgbe_free_transmit_buffers(struct tx_ring *);
    108   1.28   msaitoh static int           ixgbe_setup_receive_ring(struct rx_ring *);
    109   1.28   msaitoh static void          ixgbe_free_receive_buffers(struct rx_ring *);
    110   1.28   msaitoh static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
    111   1.28   msaitoh                                        struct ixgbe_hw_stats *);
    112   1.28   msaitoh static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
    113   1.38  knakahar static void          ixgbe_drain(struct ifnet *, struct tx_ring *);
    114   1.28   msaitoh static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
    115   1.28   msaitoh static int           ixgbe_tx_ctx_setup(struct tx_ring *,
    116   1.28   msaitoh                                         struct mbuf *, u32 *, u32 *);
    117   1.28   msaitoh static int           ixgbe_tso_setup(struct tx_ring *,
    118   1.28   msaitoh                                      struct mbuf *, u32 *, u32 *);
    119    1.1   msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    120    1.1   msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    121   1.28   msaitoh                                     struct mbuf *, u32);
    122  1.102   msaitoh static int           ixgbe_dma_malloc(struct ixgbe_softc *, bus_size_t,
    123   1.28   msaitoh                                       struct ixgbe_dma_alloc *, int);
    124  1.102   msaitoh static void          ixgbe_dma_free(struct ixgbe_softc *, struct ixgbe_dma_alloc *);
    125    1.1   msaitoh 
    126  1.105   msaitoh static void	     ixgbe_setup_hw_rsc(struct rx_ring *);
    127    1.1   msaitoh 
    128   1.28   msaitoh /************************************************************************
    129   1.28   msaitoh  * ixgbe_legacy_start_locked - Transmit entry point
    130    1.1   msaitoh  *
    131   1.28   msaitoh  *   Called by the stack to initiate a transmit.
    132   1.28   msaitoh  *   The driver will remain in this routine as long as there are
    133   1.28   msaitoh  *   packets to transmit and transmit resources are available.
    134   1.28   msaitoh  *   In case resources are not available, the stack is notified
    135   1.28   msaitoh  *   and the packet is requeued.
    136   1.28   msaitoh  ************************************************************************/
    137   1.28   msaitoh int
    138   1.28   msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    139    1.1   msaitoh {
    140   1.45   msaitoh 	int rc;
    141    1.1   msaitoh 	struct mbuf    *m_head;
    142  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    143    1.1   msaitoh 
    144    1.1   msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    145    1.1   msaitoh 
    146  1.102   msaitoh 	if (sc->link_active != LINK_STATE_UP) {
    147   1.38  knakahar 		/*
    148   1.38  knakahar 		 * discard all packets buffered in IFQ to avoid
    149   1.38  knakahar 		 * sending old packets at next link up timing.
    150   1.38  knakahar 		 */
    151   1.38  knakahar 		ixgbe_drain(ifp, txr);
    152   1.38  knakahar 		return (ENETDOWN);
    153   1.38  knakahar 	}
    154    1.1   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    155   1.28   msaitoh 		return (ENETDOWN);
    156   1.47   msaitoh 	if (txr->txr_no_space)
    157   1.47   msaitoh 		return (ENETDOWN);
    158   1.58   msaitoh 
    159    1.1   msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    160    1.1   msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    161    1.1   msaitoh 			break;
    162    1.1   msaitoh 
    163    1.1   msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    164    1.1   msaitoh 		if (m_head == NULL)
    165    1.1   msaitoh 			break;
    166    1.1   msaitoh 
    167    1.1   msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    168    1.1   msaitoh 			break;
    169    1.1   msaitoh 		}
    170    1.1   msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    171    1.1   msaitoh 		if (rc != 0) {
    172    1.1   msaitoh 			m_freem(m_head);
    173    1.1   msaitoh 			continue;
    174    1.1   msaitoh 		}
    175    1.1   msaitoh 
    176    1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    177   1.48   msaitoh 		bpf_mtap(ifp, m_head, BPF_D_OUT);
    178    1.1   msaitoh 	}
    179   1.44   msaitoh 
    180   1.28   msaitoh 	return IXGBE_SUCCESS;
    181   1.28   msaitoh } /* ixgbe_legacy_start_locked */
    182   1.28   msaitoh 
    183   1.28   msaitoh /************************************************************************
    184   1.28   msaitoh  * ixgbe_legacy_start
    185   1.28   msaitoh  *
    186   1.28   msaitoh  *   Called by the stack, this always uses the first tx ring,
    187   1.28   msaitoh  *   and should not be used with multiqueue tx enabled.
    188   1.28   msaitoh  ************************************************************************/
    189    1.1   msaitoh void
    190   1.28   msaitoh ixgbe_legacy_start(struct ifnet *ifp)
    191    1.1   msaitoh {
    192  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
    193  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    194    1.1   msaitoh 
    195    1.1   msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    196    1.1   msaitoh 		IXGBE_TX_LOCK(txr);
    197   1.28   msaitoh 		ixgbe_legacy_start_locked(ifp, txr);
    198    1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    199    1.1   msaitoh 	}
    200   1.28   msaitoh } /* ixgbe_legacy_start */
    201    1.1   msaitoh 
    202   1.28   msaitoh /************************************************************************
    203   1.28   msaitoh  * ixgbe_mq_start - Multiqueue Transmit Entry Point
    204   1.28   msaitoh  *
    205   1.28   msaitoh  *   (if_transmit function)
    206   1.28   msaitoh  ************************************************************************/
    207    1.1   msaitoh int
    208    1.1   msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    209    1.1   msaitoh {
    210  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
    211    1.1   msaitoh 	struct tx_ring	*txr;
    212   1.70   msaitoh 	int		i;
    213   1.28   msaitoh #ifdef RSS
    214    1.1   msaitoh 	uint32_t bucket_id;
    215    1.1   msaitoh #endif
    216    1.1   msaitoh 
    217    1.1   msaitoh 	/*
    218    1.1   msaitoh 	 * When doing RSS, map it to the same outbound queue
    219    1.1   msaitoh 	 * as the incoming flow would be mapped to.
    220    1.1   msaitoh 	 *
    221    1.1   msaitoh 	 * If everything is setup correctly, it should be the
    222    1.1   msaitoh 	 * same bucket that the current CPU we're on is.
    223    1.1   msaitoh 	 */
    224   1.28   msaitoh #ifdef RSS
    225    1.1   msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    226  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
    227   1.28   msaitoh 		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
    228   1.28   msaitoh 		    &bucket_id) == 0)) {
    229  1.102   msaitoh 			i = bucket_id % sc->num_queues;
    230    1.8   msaitoh #ifdef IXGBE_DEBUG
    231  1.102   msaitoh 			if (bucket_id > sc->num_queues)
    232   1.28   msaitoh 				if_printf(ifp,
    233   1.28   msaitoh 				    "bucket_id (%d) > num_queues (%d)\n",
    234  1.102   msaitoh 				    bucket_id, sc->num_queues);
    235    1.8   msaitoh #endif
    236    1.8   msaitoh 		} else
    237  1.102   msaitoh 			i = m->m_pkthdr.flowid % sc->num_queues;
    238    1.3   msaitoh 	} else
    239   1.28   msaitoh #endif /* 0 */
    240  1.102   msaitoh 		i = (cpu_index(curcpu()) % ncpu) % sc->num_queues;
    241    1.3   msaitoh 
    242    1.3   msaitoh 	/* Check for a hung queue and pick alternative */
    243  1.102   msaitoh 	if (((1ULL << i) & sc->active_queues) == 0)
    244  1.102   msaitoh 		i = ffs64(sc->active_queues);
    245    1.1   msaitoh 
    246  1.102   msaitoh 	txr = &sc->tx_rings[i];
    247    1.1   msaitoh 
    248   1.50   msaitoh 	if (__predict_false(!pcq_put(txr->txr_interq, m))) {
    249   1.18   msaitoh 		m_freem(m);
    250   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->pcq_drops, 1);
    251   1.50   msaitoh 		return ENOBUFS;
    252   1.18   msaitoh 	}
    253  1.100  knakahar #ifdef IXGBE_ALWAYS_TXDEFER
    254  1.100  knakahar 	kpreempt_disable();
    255  1.100  knakahar 	softint_schedule(txr->txr_si);
    256  1.100  knakahar 	kpreempt_enable();
    257  1.100  knakahar #else
    258    1.1   msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    259    1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    260    1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    261   1.34  knakahar 	} else {
    262  1.102   msaitoh 		if (sc->txrx_use_workqueue) {
    263   1.44   msaitoh 			u_int *enqueued;
    264   1.44   msaitoh 
    265   1.34  knakahar 			/*
    266   1.34  knakahar 			 * This function itself is not called in interrupt
    267   1.34  knakahar 			 * context, however it can be called in fast softint
    268   1.34  knakahar 			 * context right after receiving forwarding packets.
    269   1.34  knakahar 			 * So, it is required to protect workqueue from twice
    270   1.34  knakahar 			 * enqueuing when the machine uses both spontaneous
    271   1.34  knakahar 			 * packets and forwarding packets.
    272   1.34  knakahar 			 */
    273  1.102   msaitoh 			enqueued = percpu_getref(sc->txr_wq_enqueued);
    274   1.34  knakahar 			if (*enqueued == 0) {
    275   1.34  knakahar 				*enqueued = 1;
    276  1.102   msaitoh 				percpu_putref(sc->txr_wq_enqueued);
    277  1.102   msaitoh 				workqueue_enqueue(sc->txr_wq,
    278   1.44   msaitoh 				    &txr->wq_cookie, curcpu());
    279   1.34  knakahar 			} else
    280  1.102   msaitoh 				percpu_putref(sc->txr_wq_enqueued);
    281   1.56  knakahar 		} else {
    282   1.56  knakahar 			kpreempt_disable();
    283   1.34  knakahar 			softint_schedule(txr->txr_si);
    284   1.56  knakahar 			kpreempt_enable();
    285   1.56  knakahar 		}
    286   1.34  knakahar 	}
    287  1.100  knakahar #endif
    288    1.1   msaitoh 
    289    1.1   msaitoh 	return (0);
    290   1.28   msaitoh } /* ixgbe_mq_start */
    291    1.1   msaitoh 
    292   1.28   msaitoh /************************************************************************
    293   1.28   msaitoh  * ixgbe_mq_start_locked
    294   1.28   msaitoh  ************************************************************************/
    295    1.1   msaitoh int
    296    1.1   msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    297    1.1   msaitoh {
    298   1.28   msaitoh 	struct mbuf    *next;
    299   1.28   msaitoh 	int            enqueued = 0, err = 0;
    300    1.1   msaitoh 
    301  1.102   msaitoh 	if (txr->sc->link_active != LINK_STATE_UP) {
    302   1.38  knakahar 		/*
    303   1.38  knakahar 		 * discard all packets buffered in txr_interq to avoid
    304   1.38  knakahar 		 * sending old packets at next link up timing.
    305   1.38  knakahar 		 */
    306   1.38  knakahar 		ixgbe_drain(ifp, txr);
    307   1.38  knakahar 		return (ENETDOWN);
    308   1.38  knakahar 	}
    309   1.28   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    310   1.28   msaitoh 		return (ENETDOWN);
    311   1.47   msaitoh 	if (txr->txr_no_space)
    312   1.47   msaitoh 		return (ENETDOWN);
    313    1.1   msaitoh 
    314    1.1   msaitoh 	/* Process the queue */
    315   1.18   msaitoh 	while ((next = pcq_get(txr->txr_interq)) != NULL) {
    316   1.18   msaitoh 		if ((err = ixgbe_xmit(txr, next)) != 0) {
    317   1.18   msaitoh 			m_freem(next);
    318   1.18   msaitoh 			/* All errors are counted in ixgbe_xmit() */
    319    1.1   msaitoh 			break;
    320    1.1   msaitoh 		}
    321    1.1   msaitoh 		enqueued++;
    322    1.3   msaitoh #if __FreeBSD_version >= 1100036
    323    1.4   msaitoh 		/*
    324    1.4   msaitoh 		 * Since we're looking at the tx ring, we can check
    325   1.99    andvar 		 * to see if we're a VF by examining our tail register
    326    1.4   msaitoh 		 * address.
    327    1.4   msaitoh 		 */
    328  1.102   msaitoh 		if ((txr->sc->feat_en & IXGBE_FEATURE_VF) &&
    329   1.28   msaitoh 		    (next->m_flags & M_MCAST))
    330    1.3   msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    331    1.3   msaitoh #endif
    332    1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    333   1.48   msaitoh 		bpf_mtap(ifp, next, BPF_D_OUT);
    334    1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    335    1.1   msaitoh 			break;
    336    1.1   msaitoh 	}
    337    1.1   msaitoh 
    338  1.102   msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->sc))
    339    1.1   msaitoh 		ixgbe_txeof(txr);
    340    1.1   msaitoh 
    341    1.1   msaitoh 	return (err);
    342   1.28   msaitoh } /* ixgbe_mq_start_locked */
    343    1.1   msaitoh 
    344   1.28   msaitoh /************************************************************************
    345   1.28   msaitoh  * ixgbe_deferred_mq_start
    346   1.28   msaitoh  *
    347   1.34  knakahar  *   Called from a softint and workqueue (indirectly) to drain queued
    348   1.34  knakahar  *   transmit packets.
    349   1.28   msaitoh  ************************************************************************/
    350    1.1   msaitoh void
    351   1.18   msaitoh ixgbe_deferred_mq_start(void *arg)
    352    1.1   msaitoh {
    353    1.1   msaitoh 	struct tx_ring *txr = arg;
    354  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    355  1.102   msaitoh 	struct ifnet   *ifp = sc->ifp;
    356    1.1   msaitoh 
    357    1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    358   1.18   msaitoh 	if (pcq_peek(txr->txr_interq) != NULL)
    359    1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    360    1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    361   1.28   msaitoh } /* ixgbe_deferred_mq_start */
    362    1.3   msaitoh 
    363   1.28   msaitoh /************************************************************************
    364   1.34  knakahar  * ixgbe_deferred_mq_start_work
    365   1.34  knakahar  *
    366   1.34  knakahar  *   Called from a workqueue to drain queued transmit packets.
    367   1.34  knakahar  ************************************************************************/
    368   1.34  knakahar void
    369   1.34  knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
    370   1.34  knakahar {
    371   1.34  knakahar 	struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
    372  1.102   msaitoh 	struct ixgbe_softc *sc = txr->sc;
    373  1.102   msaitoh 	u_int *enqueued = percpu_getref(sc->txr_wq_enqueued);
    374   1.34  knakahar 	*enqueued = 0;
    375  1.102   msaitoh 	percpu_putref(sc->txr_wq_enqueued);
    376   1.34  knakahar 
    377   1.34  knakahar 	ixgbe_deferred_mq_start(txr);
    378   1.34  knakahar } /* ixgbe_deferred_mq_start */
    379   1.34  knakahar 
    380   1.38  knakahar /************************************************************************
    381   1.38  knakahar  * ixgbe_drain_all
    382   1.38  knakahar  ************************************************************************/
    383   1.38  knakahar void
    384  1.102   msaitoh ixgbe_drain_all(struct ixgbe_softc *sc)
    385   1.38  knakahar {
    386  1.102   msaitoh 	struct ifnet *ifp = sc->ifp;
    387  1.102   msaitoh 	struct ix_queue *que = sc->queues;
    388   1.38  knakahar 
    389  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, que++) {
    390   1.38  knakahar 		struct tx_ring  *txr = que->txr;
    391   1.38  knakahar 
    392   1.38  knakahar 		IXGBE_TX_LOCK(txr);
    393   1.38  knakahar 		ixgbe_drain(ifp, txr);
    394   1.38  knakahar 		IXGBE_TX_UNLOCK(txr);
    395   1.38  knakahar 	}
    396   1.38  knakahar }
    397   1.34  knakahar 
    398   1.34  knakahar /************************************************************************
    399   1.28   msaitoh  * ixgbe_xmit
    400    1.1   msaitoh  *
    401   1.28   msaitoh  *   Maps the mbufs to tx descriptors, allowing the
    402   1.28   msaitoh  *   TX engine to transmit the packets.
    403    1.1   msaitoh  *
    404   1.28   msaitoh  *   Return 0 on success, positive on failure
    405   1.28   msaitoh  ************************************************************************/
    406    1.1   msaitoh static int
    407    1.1   msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    408    1.1   msaitoh {
    409  1.105   msaitoh 	struct ixgbe_softc      *sc = txr->sc;
    410   1.28   msaitoh 	struct ixgbe_tx_buf     *txbuf;
    411    1.1   msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    412  1.102   msaitoh 	struct ifnet	        *ifp = sc->ifp;
    413   1.28   msaitoh 	int                     i, j, error;
    414   1.28   msaitoh 	int                     first;
    415   1.28   msaitoh 	u32                     olinfo_status = 0, cmd_type_len;
    416   1.28   msaitoh 	bool                    remap = TRUE;
    417   1.28   msaitoh 	bus_dmamap_t            map;
    418    1.1   msaitoh 
    419    1.1   msaitoh 	/* Basic descriptor defines */
    420   1.28   msaitoh 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    421    1.1   msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    422    1.1   msaitoh 
    423   1.29  knakahar 	if (vlan_has_tag(m_head))
    424   1.28   msaitoh 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    425    1.1   msaitoh 
    426   1.28   msaitoh 	/*
    427   1.28   msaitoh 	 * Important to capture the first descriptor
    428   1.28   msaitoh 	 * used because it will contain the index of
    429   1.28   msaitoh 	 * the one we tell the hardware to report back
    430   1.28   msaitoh 	 */
    431   1.28   msaitoh 	first = txr->next_avail_desc;
    432    1.1   msaitoh 	txbuf = &txr->tx_buffers[first];
    433    1.1   msaitoh 	map = txbuf->map;
    434    1.1   msaitoh 
    435    1.1   msaitoh 	/*
    436    1.1   msaitoh 	 * Map the packet for DMA.
    437    1.1   msaitoh 	 */
    438   1.22   msaitoh retry:
    439   1.28   msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
    440   1.28   msaitoh 	    BUS_DMA_NOWAIT);
    441    1.1   msaitoh 
    442    1.1   msaitoh 	if (__predict_false(error)) {
    443   1.22   msaitoh 		struct mbuf *m;
    444    1.1   msaitoh 
    445    1.1   msaitoh 		switch (error) {
    446    1.1   msaitoh 		case EAGAIN:
    447   1.35   msaitoh 			txr->q_eagain_tx_dma_setup++;
    448    1.1   msaitoh 			return EAGAIN;
    449    1.1   msaitoh 		case ENOMEM:
    450   1.35   msaitoh 			txr->q_enomem_tx_dma_setup++;
    451    1.1   msaitoh 			return EAGAIN;
    452    1.1   msaitoh 		case EFBIG:
    453   1.22   msaitoh 			/* Try it again? - one try */
    454   1.22   msaitoh 			if (remap == TRUE) {
    455   1.22   msaitoh 				remap = FALSE;
    456   1.22   msaitoh 				/*
    457   1.22   msaitoh 				 * XXX: m_defrag will choke on
    458   1.22   msaitoh 				 * non-MCLBYTES-sized clusters
    459   1.22   msaitoh 				 */
    460   1.35   msaitoh 				txr->q_efbig_tx_dma_setup++;
    461   1.22   msaitoh 				m = m_defrag(m_head, M_NOWAIT);
    462   1.22   msaitoh 				if (m == NULL) {
    463   1.35   msaitoh 					txr->q_mbuf_defrag_failed++;
    464   1.22   msaitoh 					return ENOBUFS;
    465   1.22   msaitoh 				}
    466   1.22   msaitoh 				m_head = m;
    467   1.22   msaitoh 				goto retry;
    468   1.22   msaitoh 			} else {
    469   1.35   msaitoh 				txr->q_efbig2_tx_dma_setup++;
    470   1.22   msaitoh 				return error;
    471   1.22   msaitoh 			}
    472    1.1   msaitoh 		case EINVAL:
    473   1.35   msaitoh 			txr->q_einval_tx_dma_setup++;
    474    1.1   msaitoh 			return error;
    475    1.1   msaitoh 		default:
    476   1.35   msaitoh 			txr->q_other_tx_dma_setup++;
    477    1.1   msaitoh 			return error;
    478    1.1   msaitoh 		}
    479    1.1   msaitoh 	}
    480    1.1   msaitoh 
    481    1.1   msaitoh 	/* Make certain there are enough descriptors */
    482   1.10   msaitoh 	if (txr->tx_avail < (map->dm_nsegs + 2)) {
    483   1.47   msaitoh 		txr->txr_no_space = true;
    484   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->no_desc_avail, 1);
    485    1.1   msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    486    1.1   msaitoh 		return EAGAIN;
    487    1.1   msaitoh 	}
    488    1.1   msaitoh 
    489    1.1   msaitoh 	/*
    490  1.107   msaitoh 	 * Set up the appropriate offload context if requested,
    491  1.107   msaitoh 	 * this may consume one TX descriptor.
    492    1.4   msaitoh 	 */
    493    1.1   msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    494    1.1   msaitoh 	if (__predict_false(error)) {
    495    1.1   msaitoh 		return (error);
    496    1.1   msaitoh 	}
    497    1.1   msaitoh 
    498   1.73  knakahar #ifdef IXGBE_FDIR
    499    1.1   msaitoh 	/* Do the flow director magic */
    500  1.102   msaitoh 	if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
    501  1.102   msaitoh 	    (txr->atr_sample) && (!sc->fdir_reinit)) {
    502    1.1   msaitoh 		++txr->atr_count;
    503    1.1   msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    504    1.1   msaitoh 			ixgbe_atr(txr, m_head);
    505    1.1   msaitoh 			txr->atr_count = 0;
    506    1.1   msaitoh 		}
    507    1.1   msaitoh 	}
    508   1.73  knakahar #endif
    509    1.1   msaitoh 
    510    1.8   msaitoh 	olinfo_status |= IXGBE_ADVTXD_CC;
    511    1.1   msaitoh 	i = txr->next_avail_desc;
    512    1.1   msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    513    1.1   msaitoh 		bus_size_t seglen;
    514   1.77   msaitoh 		uint64_t segaddr;
    515    1.1   msaitoh 
    516    1.1   msaitoh 		txbuf = &txr->tx_buffers[i];
    517    1.1   msaitoh 		txd = &txr->tx_base[i];
    518    1.1   msaitoh 		seglen = map->dm_segs[j].ds_len;
    519    1.1   msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    520    1.1   msaitoh 
    521    1.1   msaitoh 		txd->read.buffer_addr = segaddr;
    522   1.40   msaitoh 		txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
    523    1.1   msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    524    1.1   msaitoh 
    525    1.1   msaitoh 		if (++i == txr->num_desc)
    526    1.1   msaitoh 			i = 0;
    527    1.1   msaitoh 	}
    528    1.1   msaitoh 
    529   1.28   msaitoh 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    530    1.1   msaitoh 	txr->tx_avail -= map->dm_nsegs;
    531    1.1   msaitoh 	txr->next_avail_desc = i;
    532    1.1   msaitoh 
    533    1.1   msaitoh 	txbuf->m_head = m_head;
    534    1.1   msaitoh 	/*
    535    1.4   msaitoh 	 * Here we swap the map so the last descriptor,
    536    1.4   msaitoh 	 * which gets the completion interrupt has the
    537    1.4   msaitoh 	 * real map, and the first descriptor gets the
    538    1.4   msaitoh 	 * unused map from this descriptor.
    539    1.4   msaitoh 	 */
    540    1.1   msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    541    1.1   msaitoh 	txbuf->map = map;
    542    1.1   msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    543    1.1   msaitoh 	    BUS_DMASYNC_PREWRITE);
    544    1.1   msaitoh 
    545   1.28   msaitoh 	/* Set the EOP descriptor that will be marked done */
    546   1.28   msaitoh 	txbuf = &txr->tx_buffers[first];
    547    1.1   msaitoh 	txbuf->eop = txd;
    548    1.1   msaitoh 
    549   1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    550    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    551    1.1   msaitoh 	/*
    552    1.1   msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    553    1.1   msaitoh 	 * hardware that this frame is available to transmit.
    554    1.1   msaitoh 	 */
    555   1.95   msaitoh 	IXGBE_EVC_ADD(&txr->total_packets, 1);
    556  1.102   msaitoh 	IXGBE_WRITE_REG(&sc->hw, txr->tail, i);
    557    1.3   msaitoh 
    558   1.61   thorpej 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
    559   1.61   thorpej 	if_statadd_ref(nsr, if_obytes, m_head->m_pkthdr.len);
    560   1.23   msaitoh 	if (m_head->m_flags & M_MCAST)
    561   1.61   thorpej 		if_statinc_ref(nsr, if_omcasts);
    562   1.61   thorpej 	IF_STAT_PUTREF(ifp);
    563   1.23   msaitoh 
    564   1.45   msaitoh 	/* Mark queue as having work */
    565   1.45   msaitoh 	if (txr->busy == 0)
    566   1.45   msaitoh 		txr->busy = 1;
    567   1.45   msaitoh 
    568   1.28   msaitoh 	return (0);
    569   1.28   msaitoh } /* ixgbe_xmit */
    570    1.1   msaitoh 
    571   1.38  knakahar /************************************************************************
    572   1.38  knakahar  * ixgbe_drain
    573   1.38  knakahar  ************************************************************************/
    574   1.38  knakahar static void
    575   1.38  knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
    576   1.38  knakahar {
    577   1.38  knakahar 	struct mbuf *m;
    578   1.38  knakahar 
    579   1.38  knakahar 	IXGBE_TX_LOCK_ASSERT(txr);
    580   1.38  knakahar 
    581   1.38  knakahar 	if (txr->me == 0) {
    582   1.38  knakahar 		while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    583   1.38  knakahar 			IFQ_DEQUEUE(&ifp->if_snd, m);
    584   1.38  knakahar 			m_freem(m);
    585   1.38  knakahar 			IF_DROP(&ifp->if_snd);
    586   1.38  knakahar 		}
    587   1.38  knakahar 	}
    588   1.38  knakahar 
    589   1.38  knakahar 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
    590   1.38  knakahar 		m_freem(m);
    591   1.95   msaitoh 		IXGBE_EVC_ADD(&txr->pcq_drops, 1);
    592   1.38  knakahar 	}
    593   1.38  knakahar }
    594   1.16   msaitoh 
    595   1.28   msaitoh /************************************************************************
    596   1.28   msaitoh  * ixgbe_allocate_transmit_buffers
    597    1.1   msaitoh  *
    598   1.28   msaitoh  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
    599   1.28   msaitoh  *   the information needed to transmit a packet on the wire. This is
    600   1.28   msaitoh  *   called only once at attach, setup is done every reset.
    601   1.28   msaitoh  ************************************************************************/
    602   1.28   msaitoh static int
    603    1.1   msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    604    1.1   msaitoh {
    605  1.102   msaitoh 	struct ixgbe_softc  *sc = txr->sc;
    606  1.102   msaitoh 	device_t            dev = sc->dev;
    607    1.1   msaitoh 	struct ixgbe_tx_buf *txbuf;
    608   1.28   msaitoh 	int                 error, i;
    609    1.1   msaitoh 
    610    1.1   msaitoh 	/*
    611    1.1   msaitoh 	 * Setup DMA descriptor areas.
    612    1.1   msaitoh 	 */
    613   1.28   msaitoh 	error = ixgbe_dma_tag_create(
    614  1.102   msaitoh 	         /*      parent */ sc->osdep.dmat,
    615   1.28   msaitoh 	         /*   alignment */ 1,
    616   1.28   msaitoh 	         /*      bounds */ 0,
    617   1.28   msaitoh 	         /*     maxsize */ IXGBE_TSO_SIZE,
    618  1.102   msaitoh 	         /*   nsegments */ sc->num_segs,
    619   1.28   msaitoh 	         /*  maxsegsize */ PAGE_SIZE,
    620   1.28   msaitoh 	         /*       flags */ 0,
    621   1.28   msaitoh 	                           &txr->txtag);
    622   1.28   msaitoh 	if (error != 0) {
    623    1.1   msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    624    1.1   msaitoh 		goto fail;
    625    1.1   msaitoh 	}
    626    1.1   msaitoh 
    627   1.57       chs 	txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) *
    628  1.102   msaitoh 	    sc->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO);
    629    1.1   msaitoh 
    630   1.28   msaitoh 	/* Create the descriptor buffer dma maps */
    631    1.1   msaitoh 	txbuf = txr->tx_buffers;
    632  1.102   msaitoh 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
    633    1.1   msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    634    1.1   msaitoh 		if (error != 0) {
    635    1.1   msaitoh 			aprint_error_dev(dev,
    636    1.1   msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    637    1.1   msaitoh 			goto fail;
    638    1.1   msaitoh 		}
    639    1.1   msaitoh 	}
    640    1.1   msaitoh 
    641    1.1   msaitoh 	return 0;
    642    1.1   msaitoh fail:
    643    1.1   msaitoh 	/* We free all, it handles case where we are in the middle */
    644   1.15   msaitoh #if 0 /* XXX was FreeBSD */
    645  1.102   msaitoh 	ixgbe_free_transmit_structures(sc);
    646   1.15   msaitoh #else
    647   1.15   msaitoh 	ixgbe_free_transmit_buffers(txr);
    648   1.15   msaitoh #endif
    649    1.1   msaitoh 	return (error);
    650   1.28   msaitoh } /* ixgbe_allocate_transmit_buffers */
    651    1.1   msaitoh 
    652   1.28   msaitoh /************************************************************************
    653   1.28   msaitoh  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
    654   1.28   msaitoh  ************************************************************************/
    655    1.1   msaitoh static void
    656    1.1   msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    657    1.1   msaitoh {
    658  1.102   msaitoh 	struct ixgbe_softc    *sc = txr->sc;
    659   1.28   msaitoh 	struct ixgbe_tx_buf   *txbuf;
    660    1.1   msaitoh #ifdef DEV_NETMAP
    661  1.102   msaitoh 	struct netmap_sc      *na = NA(sc->ifp);
    662   1.28   msaitoh 	struct netmap_slot    *slot;
    663    1.1   msaitoh #endif /* DEV_NETMAP */
    664    1.1   msaitoh 
    665    1.1   msaitoh 	/* Clear the old ring contents */
    666    1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    667   1.28   msaitoh 
    668    1.1   msaitoh #ifdef DEV_NETMAP
    669  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP) {
    670   1.28   msaitoh 		/*
    671   1.28   msaitoh 		 * (under lock): if in netmap mode, do some consistency
    672   1.28   msaitoh 		 * checks and set slot to entry 0 of the netmap ring.
    673   1.28   msaitoh 		 */
    674   1.28   msaitoh 		slot = netmap_reset(na, NR_TX, txr->me, 0);
    675   1.28   msaitoh 	}
    676    1.1   msaitoh #endif /* DEV_NETMAP */
    677   1.28   msaitoh 
    678    1.1   msaitoh 	bzero((void *)txr->tx_base,
    679  1.102   msaitoh 	    (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
    680    1.1   msaitoh 	/* Reset indices */
    681    1.1   msaitoh 	txr->next_avail_desc = 0;
    682    1.1   msaitoh 	txr->next_to_clean = 0;
    683    1.1   msaitoh 
    684    1.1   msaitoh 	/* Free any existing tx buffers. */
    685   1.28   msaitoh 	txbuf = txr->tx_buffers;
    686    1.5   msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    687    1.1   msaitoh 		if (txbuf->m_head != NULL) {
    688    1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    689    1.1   msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    690    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    691    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    692    1.1   msaitoh 			m_freem(txbuf->m_head);
    693    1.1   msaitoh 			txbuf->m_head = NULL;
    694    1.1   msaitoh 		}
    695   1.28   msaitoh 
    696    1.1   msaitoh #ifdef DEV_NETMAP
    697    1.1   msaitoh 		/*
    698    1.1   msaitoh 		 * In netmap mode, set the map for the packet buffer.
    699    1.1   msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    700    1.1   msaitoh 		 * the physical buffer address in the NIC ring.
    701    1.1   msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    702    1.1   msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    703    1.1   msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    704    1.1   msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    705    1.1   msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    706    1.1   msaitoh 		 */
    707  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
    708   1.53   msaitoh 			int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
    709    1.5   msaitoh 			netmap_load_map(na, txr->txtag,
    710    1.5   msaitoh 			    txbuf->map, NMB(na, slot + si));
    711    1.1   msaitoh 		}
    712    1.1   msaitoh #endif /* DEV_NETMAP */
    713   1.28   msaitoh 
    714    1.1   msaitoh 		/* Clear the EOP descriptor pointer */
    715    1.1   msaitoh 		txbuf->eop = NULL;
    716   1.28   msaitoh 	}
    717    1.1   msaitoh 
    718    1.1   msaitoh 	/* Set the rate at which we sample packets */
    719  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_FDIR)
    720    1.1   msaitoh 		txr->atr_sample = atr_sample_rate;
    721    1.1   msaitoh 
    722    1.1   msaitoh 	/* Set number of descriptors available */
    723  1.102   msaitoh 	txr->tx_avail = sc->num_tx_desc;
    724    1.1   msaitoh 
    725    1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    726    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    727    1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    728   1.28   msaitoh } /* ixgbe_setup_transmit_ring */
    729    1.1   msaitoh 
    730   1.28   msaitoh /************************************************************************
    731   1.28   msaitoh  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
    732   1.28   msaitoh  ************************************************************************/
    733    1.1   msaitoh int
    734  1.102   msaitoh ixgbe_setup_transmit_structures(struct ixgbe_softc *sc)
    735    1.1   msaitoh {
    736  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    737    1.1   msaitoh 
    738  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txr++)
    739    1.1   msaitoh 		ixgbe_setup_transmit_ring(txr);
    740    1.1   msaitoh 
    741    1.1   msaitoh 	return (0);
    742   1.28   msaitoh } /* ixgbe_setup_transmit_structures */
    743    1.1   msaitoh 
    744   1.28   msaitoh /************************************************************************
    745   1.28   msaitoh  * ixgbe_free_transmit_structures - Free all transmit rings.
    746   1.28   msaitoh  ************************************************************************/
    747    1.1   msaitoh void
    748  1.102   msaitoh ixgbe_free_transmit_structures(struct ixgbe_softc *sc)
    749    1.1   msaitoh {
    750  1.102   msaitoh 	struct tx_ring *txr = sc->tx_rings;
    751    1.1   msaitoh 
    752  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txr++) {
    753    1.1   msaitoh 		ixgbe_free_transmit_buffers(txr);
    754  1.102   msaitoh 		ixgbe_dma_free(sc, &txr->txdma);
    755    1.1   msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    756    1.1   msaitoh 	}
    757  1.102   msaitoh 	free(sc->tx_rings, M_DEVBUF);
    758   1.28   msaitoh } /* ixgbe_free_transmit_structures */
    759    1.1   msaitoh 
    760   1.28   msaitoh /************************************************************************
    761   1.28   msaitoh  * ixgbe_free_transmit_buffers
    762    1.1   msaitoh  *
    763   1.28   msaitoh  *   Free transmit ring related data structures.
    764   1.28   msaitoh  ************************************************************************/
    765    1.1   msaitoh static void
    766    1.1   msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    767    1.1   msaitoh {
    768  1.105   msaitoh 	struct ixgbe_softc  *sc = txr->sc;
    769    1.1   msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    770   1.28   msaitoh 	int                 i;
    771    1.1   msaitoh 
    772   1.14   msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
    773    1.1   msaitoh 
    774    1.1   msaitoh 	if (txr->tx_buffers == NULL)
    775    1.1   msaitoh 		return;
    776    1.1   msaitoh 
    777    1.1   msaitoh 	tx_buffer = txr->tx_buffers;
    778  1.102   msaitoh 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
    779    1.1   msaitoh 		if (tx_buffer->m_head != NULL) {
    780    1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    781    1.1   msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    782    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    783    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    784    1.1   msaitoh 			m_freem(tx_buffer->m_head);
    785    1.1   msaitoh 			tx_buffer->m_head = NULL;
    786    1.1   msaitoh 			if (tx_buffer->map != NULL) {
    787    1.1   msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    788    1.1   msaitoh 				    tx_buffer->map);
    789    1.1   msaitoh 				tx_buffer->map = NULL;
    790    1.1   msaitoh 			}
    791    1.1   msaitoh 		} else if (tx_buffer->map != NULL) {
    792    1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    793    1.1   msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    794    1.1   msaitoh 			tx_buffer->map = NULL;
    795    1.1   msaitoh 		}
    796    1.1   msaitoh 	}
    797   1.18   msaitoh 	if (txr->txr_interq != NULL) {
    798   1.18   msaitoh 		struct mbuf *m;
    799   1.18   msaitoh 
    800   1.18   msaitoh 		while ((m = pcq_get(txr->txr_interq)) != NULL)
    801   1.18   msaitoh 			m_freem(m);
    802   1.18   msaitoh 		pcq_destroy(txr->txr_interq);
    803   1.18   msaitoh 	}
    804    1.1   msaitoh 	if (txr->tx_buffers != NULL) {
    805    1.1   msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    806    1.1   msaitoh 		txr->tx_buffers = NULL;
    807    1.1   msaitoh 	}
    808    1.1   msaitoh 	if (txr->txtag != NULL) {
    809    1.1   msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    810    1.1   msaitoh 		txr->txtag = NULL;
    811    1.1   msaitoh 	}
    812   1.28   msaitoh } /* ixgbe_free_transmit_buffers */
    813    1.1   msaitoh 
    814   1.28   msaitoh /************************************************************************
    815   1.28   msaitoh  * ixgbe_tx_ctx_setup
    816    1.1   msaitoh  *
    817   1.28   msaitoh  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
    818   1.28   msaitoh  ************************************************************************/
    819    1.1   msaitoh static int
    820    1.1   msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    821    1.1   msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    822    1.1   msaitoh {
    823  1.102   msaitoh 	struct ixgbe_softc               *sc = txr->sc;
    824    1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    825   1.28   msaitoh 	struct ether_vlan_header         *eh;
    826    1.8   msaitoh #ifdef INET
    827   1.28   msaitoh 	struct ip                        *ip;
    828    1.8   msaitoh #endif
    829    1.8   msaitoh #ifdef INET6
    830   1.28   msaitoh 	struct ip6_hdr                   *ip6;
    831    1.8   msaitoh #endif
    832   1.28   msaitoh 	int                              ehdrlen, ip_hlen = 0;
    833   1.28   msaitoh 	int                              offload = TRUE;
    834   1.28   msaitoh 	int                              ctxd = txr->next_avail_desc;
    835   1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    836   1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    837   1.28   msaitoh 	u16                              vtag = 0;
    838   1.28   msaitoh 	u16                              etype;
    839   1.28   msaitoh 	u8                               ipproto = 0;
    840   1.28   msaitoh 	char                             *l3d;
    841    1.8   msaitoh 
    842    1.1   msaitoh 	/* First check if TSO is to be used */
    843   1.28   msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
    844   1.17   msaitoh 		int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
    845   1.17   msaitoh 
    846   1.21   msaitoh 		if (rv != 0)
    847  1.102   msaitoh 			IXGBE_EVC_ADD(&sc->tso_err, 1);
    848   1.21   msaitoh 		return rv;
    849   1.17   msaitoh 	}
    850    1.1   msaitoh 
    851    1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    852    1.1   msaitoh 		offload = FALSE;
    853    1.1   msaitoh 
    854    1.1   msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    855   1.28   msaitoh 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    856    1.1   msaitoh 
    857    1.1   msaitoh 	/*
    858   1.28   msaitoh 	 * In advanced descriptors the vlan tag must
    859   1.28   msaitoh 	 * be placed into the context descriptor. Hence
    860   1.28   msaitoh 	 * we need to make one even if not doing offloads.
    861   1.28   msaitoh 	 */
    862   1.29  knakahar 	if (vlan_has_tag(mp)) {
    863   1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    864    1.1   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    865  1.102   msaitoh 	} else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
    866   1.28   msaitoh 	           (offload == FALSE))
    867    1.4   msaitoh 		return (0);
    868    1.1   msaitoh 
    869    1.1   msaitoh 	/*
    870    1.1   msaitoh 	 * Determine where frame payload starts.
    871    1.1   msaitoh 	 * Jump over vlan headers if already present,
    872    1.1   msaitoh 	 * helpful for QinQ too.
    873    1.1   msaitoh 	 */
    874    1.1   msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    875    1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    876    1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    877    1.1   msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    878    1.1   msaitoh 		etype = ntohs(eh->evl_proto);
    879    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    880    1.1   msaitoh 	} else {
    881    1.1   msaitoh 		etype = ntohs(eh->evl_encap_proto);
    882    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    883    1.1   msaitoh 	}
    884    1.1   msaitoh 
    885    1.1   msaitoh 	/* Set the ether header length */
    886    1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    887    1.1   msaitoh 
    888    1.3   msaitoh 	if (offload == FALSE)
    889    1.3   msaitoh 		goto no_offloads;
    890    1.3   msaitoh 
    891    1.8   msaitoh 	/*
    892   1.28   msaitoh 	 * If the first mbuf only includes the ethernet header,
    893   1.28   msaitoh 	 * jump to the next one
    894   1.28   msaitoh 	 * XXX: This assumes the stack splits mbufs containing headers
    895   1.28   msaitoh 	 *      on header boundaries
    896    1.8   msaitoh 	 * XXX: And assumes the entire IP header is contained in one mbuf
    897    1.8   msaitoh 	 */
    898    1.8   msaitoh 	if (mp->m_len == ehdrlen && mp->m_next)
    899    1.8   msaitoh 		l3d = mtod(mp->m_next, char *);
    900    1.8   msaitoh 	else
    901    1.8   msaitoh 		l3d = mtod(mp, char *) + ehdrlen;
    902    1.8   msaitoh 
    903    1.1   msaitoh 	switch (etype) {
    904    1.9   msaitoh #ifdef INET
    905    1.1   msaitoh 	case ETHERTYPE_IP:
    906    1.8   msaitoh 		ip = (struct ip *)(l3d);
    907    1.8   msaitoh 		ip_hlen = ip->ip_hl << 2;
    908    1.8   msaitoh 		ipproto = ip->ip_p;
    909    1.8   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    910    1.1   msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    911    1.8   msaitoh 		    ip->ip_sum == 0);
    912    1.1   msaitoh 		break;
    913    1.9   msaitoh #endif
    914    1.9   msaitoh #ifdef INET6
    915    1.1   msaitoh 	case ETHERTYPE_IPV6:
    916    1.8   msaitoh 		ip6 = (struct ip6_hdr *)(l3d);
    917    1.8   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    918    1.8   msaitoh 		ipproto = ip6->ip6_nxt;
    919    1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    920    1.1   msaitoh 		break;
    921    1.9   msaitoh #endif
    922    1.1   msaitoh 	default:
    923   1.11   msaitoh 		offload = false;
    924    1.1   msaitoh 		break;
    925    1.1   msaitoh 	}
    926    1.1   msaitoh 
    927    1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    928    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    929    1.1   msaitoh 
    930    1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    931    1.1   msaitoh 
    932    1.8   msaitoh 	/* No support for offloads for non-L4 next headers */
    933   1.63   msaitoh 	switch (ipproto) {
    934   1.36   msaitoh 	case IPPROTO_TCP:
    935   1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    936   1.36   msaitoh 		    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
    937   1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    938   1.36   msaitoh 		else
    939   1.36   msaitoh 			offload = false;
    940   1.36   msaitoh 		break;
    941   1.36   msaitoh 	case IPPROTO_UDP:
    942   1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    943   1.36   msaitoh 		    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
    944   1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    945   1.36   msaitoh 		else
    946   1.11   msaitoh 			offload = false;
    947   1.36   msaitoh 		break;
    948   1.36   msaitoh 	default:
    949   1.36   msaitoh 		offload = false;
    950   1.36   msaitoh 		break;
    951    1.8   msaitoh 	}
    952    1.8   msaitoh 
    953    1.8   msaitoh 	if (offload) /* Insert L4 checksum into data descriptors */
    954    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    955    1.1   msaitoh 
    956    1.3   msaitoh no_offloads:
    957    1.3   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    958    1.3   msaitoh 
    959  1.106   msaitoh 	/* Now ready a context descriptor */
    960  1.106   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    961  1.106   msaitoh 
    962    1.1   msaitoh 	/* Now copy bits into descriptor */
    963    1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    964    1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    965    1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    966    1.1   msaitoh 	TXD->mss_l4len_idx = htole32(0);
    967    1.1   msaitoh 
    968    1.1   msaitoh 	/* We've consumed the first desc, adjust counters */
    969    1.1   msaitoh 	if (++ctxd == txr->num_desc)
    970    1.1   msaitoh 		ctxd = 0;
    971    1.1   msaitoh 	txr->next_avail_desc = ctxd;
    972    1.1   msaitoh 	--txr->tx_avail;
    973    1.1   msaitoh 
    974   1.28   msaitoh 	return (0);
    975   1.28   msaitoh } /* ixgbe_tx_ctx_setup */
    976    1.1   msaitoh 
    977   1.28   msaitoh /************************************************************************
    978   1.28   msaitoh  * ixgbe_tso_setup
    979    1.1   msaitoh  *
    980   1.28   msaitoh  *   Setup work for hardware segmentation offload (TSO) on
    981   1.28   msaitoh  *   adapters using advanced tx descriptors
    982   1.28   msaitoh  ************************************************************************/
    983    1.1   msaitoh static int
    984   1.28   msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
    985   1.28   msaitoh     u32 *olinfo_status)
    986    1.1   msaitoh {
    987    1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    988   1.28   msaitoh 	struct ether_vlan_header         *eh;
    989    1.1   msaitoh #ifdef INET6
    990   1.28   msaitoh 	struct ip6_hdr                   *ip6;
    991    1.1   msaitoh #endif
    992    1.1   msaitoh #ifdef INET
    993   1.28   msaitoh 	struct ip                        *ip;
    994    1.1   msaitoh #endif
    995   1.28   msaitoh 	struct tcphdr                    *th;
    996   1.28   msaitoh 	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
    997   1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    998   1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    999   1.28   msaitoh 	u32                              mss_l4len_idx = 0, paylen;
   1000   1.28   msaitoh 	u16                              vtag = 0, eh_type;
   1001    1.1   msaitoh 
   1002    1.1   msaitoh 	/*
   1003    1.1   msaitoh 	 * Determine where frame payload starts.
   1004    1.1   msaitoh 	 * Jump over vlan headers if already present
   1005    1.1   msaitoh 	 */
   1006    1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
   1007    1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   1008    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1009    1.1   msaitoh 		eh_type = eh->evl_proto;
   1010    1.1   msaitoh 	} else {
   1011    1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1012    1.1   msaitoh 		eh_type = eh->evl_encap_proto;
   1013    1.1   msaitoh 	}
   1014    1.1   msaitoh 
   1015    1.1   msaitoh 	switch (ntohs(eh_type)) {
   1016    1.1   msaitoh #ifdef INET
   1017    1.1   msaitoh 	case ETHERTYPE_IP:
   1018    1.1   msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
   1019    1.1   msaitoh 		if (ip->ip_p != IPPROTO_TCP)
   1020    1.1   msaitoh 			return (ENXIO);
   1021    1.1   msaitoh 		ip->ip_sum = 0;
   1022    1.1   msaitoh 		ip_hlen = ip->ip_hl << 2;
   1023    1.1   msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1024    1.1   msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1025    1.1   msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1026    1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   1027    1.1   msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
   1028    1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1029    1.1   msaitoh 		break;
   1030    1.1   msaitoh #endif
   1031   1.28   msaitoh #ifdef INET6
   1032   1.28   msaitoh 	case ETHERTYPE_IPV6:
   1033   1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1034   1.28   msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
   1035   1.28   msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
   1036   1.28   msaitoh 			return (ENXIO);
   1037   1.28   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
   1038   1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1039   1.28   msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
   1040   1.28   msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1041   1.28   msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1042   1.28   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   1043   1.28   msaitoh 		break;
   1044   1.28   msaitoh #endif
   1045    1.1   msaitoh 	default:
   1046    1.1   msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
   1047    1.1   msaitoh 		    __func__, ntohs(eh_type));
   1048    1.1   msaitoh 		break;
   1049    1.1   msaitoh 	}
   1050    1.1   msaitoh 
   1051    1.1   msaitoh 	ctxd = txr->next_avail_desc;
   1052   1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
   1053    1.1   msaitoh 
   1054    1.1   msaitoh 	tcp_hlen = th->th_off << 2;
   1055    1.1   msaitoh 
   1056    1.1   msaitoh 	/* This is used in the transmit desc in encap */
   1057    1.1   msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
   1058    1.1   msaitoh 
   1059    1.1   msaitoh 	/* VLAN MACLEN IPLEN */
   1060   1.29  knakahar 	if (vlan_has_tag(mp)) {
   1061   1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
   1062   1.28   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   1063    1.1   msaitoh 	}
   1064    1.1   msaitoh 
   1065    1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   1066    1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
   1067    1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
   1068    1.1   msaitoh 
   1069    1.1   msaitoh 	/* ADV DTYPE TUCMD */
   1070    1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   1071    1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   1072    1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
   1073    1.1   msaitoh 
   1074    1.1   msaitoh 	/* MSS L4LEN IDX */
   1075    1.1   msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   1076    1.1   msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   1077    1.1   msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   1078    1.1   msaitoh 
   1079    1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
   1080    1.1   msaitoh 
   1081    1.1   msaitoh 	if (++ctxd == txr->num_desc)
   1082    1.1   msaitoh 		ctxd = 0;
   1083    1.1   msaitoh 
   1084    1.1   msaitoh 	txr->tx_avail--;
   1085    1.1   msaitoh 	txr->next_avail_desc = ctxd;
   1086    1.1   msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1087    1.1   msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1088    1.1   msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1089   1.95   msaitoh 	IXGBE_EVC_ADD(&txr->tso_tx, 1);
   1090   1.28   msaitoh 
   1091    1.1   msaitoh 	return (0);
   1092   1.28   msaitoh } /* ixgbe_tso_setup */
   1093    1.1   msaitoh 
   1094    1.3   msaitoh 
   1095   1.28   msaitoh /************************************************************************
   1096   1.28   msaitoh  * ixgbe_txeof
   1097    1.1   msaitoh  *
   1098   1.28   msaitoh  *   Examine each tx_buffer in the used queue. If the hardware is done
   1099   1.28   msaitoh  *   processing the packet then free associated resources. The
   1100   1.28   msaitoh  *   tx_buffer is put back on the free queue.
   1101   1.28   msaitoh  ************************************************************************/
   1102   1.32   msaitoh bool
   1103    1.1   msaitoh ixgbe_txeof(struct tx_ring *txr)
   1104    1.1   msaitoh {
   1105  1.102   msaitoh 	struct ixgbe_softc	*sc = txr->sc;
   1106  1.102   msaitoh 	struct ifnet		*ifp = sc->ifp;
   1107   1.28   msaitoh 	struct ixgbe_tx_buf	*buf;
   1108   1.28   msaitoh 	union ixgbe_adv_tx_desc *txd;
   1109    1.1   msaitoh 	u32			work, processed = 0;
   1110  1.102   msaitoh 	u32			limit = sc->tx_process_limit;
   1111    1.1   msaitoh 
   1112    1.1   msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1113    1.1   msaitoh 
   1114    1.1   msaitoh #ifdef DEV_NETMAP
   1115  1.102   msaitoh 	if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
   1116  1.102   msaitoh 	    (sc->ifp->if_capenable & IFCAP_NETMAP)) {
   1117  1.102   msaitoh 		struct netmap_sc *na = NA(sc->ifp);
   1118   1.53   msaitoh 		struct netmap_kring *kring = na->tx_rings[txr->me];
   1119    1.1   msaitoh 		txd = txr->tx_base;
   1120    1.1   msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1121    1.1   msaitoh 		    BUS_DMASYNC_POSTREAD);
   1122    1.1   msaitoh 		/*
   1123    1.1   msaitoh 		 * In netmap mode, all the work is done in the context
   1124    1.1   msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1125    1.1   msaitoh 		 * clients, which may be sleeping on individual rings
   1126    1.1   msaitoh 		 * or on a global resource for all rings.
   1127    1.1   msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1128    1.1   msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1129    1.1   msaitoh 		 * more frequently. This is implemented as follows:
   1130    1.1   msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1131    1.1   msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1132    1.1   msaitoh 		 *   means the user thread should not be woken up);
   1133    1.1   msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1134    1.1   msaitoh 		 *   or the slot has the DD bit set.
   1135    1.1   msaitoh 		 */
   1136   1.53   msaitoh 		if (kring->nr_kflags < kring->nkr_num_slots &&
   1137   1.78       ryo 		    le32toh(txd[kring->nr_kflags].wb.status) & IXGBE_TXD_STAT_DD) {
   1138    1.1   msaitoh 			netmap_tx_irq(ifp, txr->me);
   1139    1.1   msaitoh 		}
   1140   1.32   msaitoh 		return false;
   1141    1.1   msaitoh 	}
   1142    1.1   msaitoh #endif /* DEV_NETMAP */
   1143    1.1   msaitoh 
   1144    1.1   msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1145   1.45   msaitoh 		txr->busy = 0;
   1146   1.32   msaitoh 		return false;
   1147    1.1   msaitoh 	}
   1148    1.1   msaitoh 
   1149    1.1   msaitoh 	/* Get work starting point */
   1150    1.1   msaitoh 	work = txr->next_to_clean;
   1151    1.1   msaitoh 	buf = &txr->tx_buffers[work];
   1152    1.1   msaitoh 	txd = &txr->tx_base[work];
   1153    1.1   msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1154   1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1155    1.1   msaitoh 	    BUS_DMASYNC_POSTREAD);
   1156    1.8   msaitoh 
   1157    1.1   msaitoh 	do {
   1158    1.8   msaitoh 		union ixgbe_adv_tx_desc *eop = buf->eop;
   1159    1.1   msaitoh 		if (eop == NULL) /* No work */
   1160    1.1   msaitoh 			break;
   1161    1.1   msaitoh 
   1162   1.78       ryo 		if ((le32toh(eop->wb.status) & IXGBE_TXD_STAT_DD) == 0)
   1163    1.1   msaitoh 			break;	/* I/O not complete */
   1164    1.1   msaitoh 
   1165    1.1   msaitoh 		if (buf->m_head) {
   1166   1.28   msaitoh 			txr->bytes += buf->m_head->m_pkthdr.len;
   1167   1.28   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
   1168    1.1   msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1169    1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1170   1.28   msaitoh 			ixgbe_dmamap_unload(txr->txtag, buf->map);
   1171    1.1   msaitoh 			m_freem(buf->m_head);
   1172    1.1   msaitoh 			buf->m_head = NULL;
   1173    1.1   msaitoh 		}
   1174    1.1   msaitoh 		buf->eop = NULL;
   1175   1.47   msaitoh 		txr->txr_no_space = false;
   1176    1.1   msaitoh 		++txr->tx_avail;
   1177    1.1   msaitoh 
   1178    1.1   msaitoh 		/* We clean the range if multi segment */
   1179    1.1   msaitoh 		while (txd != eop) {
   1180    1.1   msaitoh 			++txd;
   1181    1.1   msaitoh 			++buf;
   1182    1.1   msaitoh 			++work;
   1183    1.1   msaitoh 			/* wrap the ring? */
   1184    1.1   msaitoh 			if (__predict_false(!work)) {
   1185    1.1   msaitoh 				work -= txr->num_desc;
   1186    1.1   msaitoh 				buf = txr->tx_buffers;
   1187    1.1   msaitoh 				txd = txr->tx_base;
   1188    1.1   msaitoh 			}
   1189    1.1   msaitoh 			if (buf->m_head) {
   1190    1.1   msaitoh 				txr->bytes +=
   1191    1.1   msaitoh 				    buf->m_head->m_pkthdr.len;
   1192    1.1   msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1193    1.1   msaitoh 				    buf->map,
   1194    1.1   msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1195    1.1   msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1196    1.1   msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1197    1.1   msaitoh 				    buf->map);
   1198    1.1   msaitoh 				m_freem(buf->m_head);
   1199    1.1   msaitoh 				buf->m_head = NULL;
   1200    1.1   msaitoh 			}
   1201    1.1   msaitoh 			++txr->tx_avail;
   1202    1.1   msaitoh 			buf->eop = NULL;
   1203    1.1   msaitoh 
   1204    1.1   msaitoh 		}
   1205    1.1   msaitoh 		++txr->packets;
   1206    1.1   msaitoh 		++processed;
   1207   1.61   thorpej 		if_statinc(ifp, if_opackets);
   1208    1.1   msaitoh 
   1209    1.1   msaitoh 		/* Try the next packet */
   1210    1.1   msaitoh 		++txd;
   1211    1.1   msaitoh 		++buf;
   1212    1.1   msaitoh 		++work;
   1213    1.1   msaitoh 		/* reset with a wrap */
   1214    1.1   msaitoh 		if (__predict_false(!work)) {
   1215    1.1   msaitoh 			work -= txr->num_desc;
   1216    1.1   msaitoh 			buf = txr->tx_buffers;
   1217    1.1   msaitoh 			txd = txr->tx_base;
   1218    1.1   msaitoh 		}
   1219    1.1   msaitoh 		prefetch(txd);
   1220    1.1   msaitoh 	} while (__predict_true(--limit));
   1221    1.1   msaitoh 
   1222    1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1223    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1224    1.1   msaitoh 
   1225    1.1   msaitoh 	work += txr->num_desc;
   1226    1.1   msaitoh 	txr->next_to_clean = work;
   1227    1.1   msaitoh 
   1228   1.45   msaitoh 	/*
   1229   1.45   msaitoh 	 * Queue Hang detection, we know there's
   1230   1.45   msaitoh 	 * work outstanding or the first return
   1231   1.45   msaitoh 	 * would have been taken, so increment busy
   1232   1.45   msaitoh 	 * if nothing managed to get cleaned, then
   1233   1.45   msaitoh 	 * in local_timer it will be checked and
   1234   1.45   msaitoh 	 * marked as HUNG if it exceeds a MAX attempt.
   1235   1.45   msaitoh 	 */
   1236   1.45   msaitoh 	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
   1237   1.45   msaitoh 		++txr->busy;
   1238   1.45   msaitoh 	/*
   1239   1.45   msaitoh 	 * If anything gets cleaned we reset state to 1,
   1240   1.45   msaitoh 	 * note this will turn off HUNG if its set.
   1241   1.45   msaitoh 	 */
   1242   1.45   msaitoh 	if (processed)
   1243   1.45   msaitoh 		txr->busy = 1;
   1244   1.45   msaitoh 
   1245   1.43   msaitoh 	if (txr->tx_avail == txr->num_desc)
   1246   1.45   msaitoh 		txr->busy = 0;
   1247   1.43   msaitoh 
   1248   1.32   msaitoh 	return ((limit > 0) ? false : true);
   1249   1.28   msaitoh } /* ixgbe_txeof */
   1250    1.1   msaitoh 
   1251   1.28   msaitoh /************************************************************************
   1252   1.28   msaitoh  * ixgbe_rsc_count
   1253   1.28   msaitoh  *
   1254   1.28   msaitoh  *   Used to detect a descriptor that has been merged by Hardware RSC.
   1255   1.28   msaitoh  ************************************************************************/
   1256    1.1   msaitoh static inline u32
   1257    1.1   msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1258    1.1   msaitoh {
   1259    1.1   msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1260    1.1   msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1261   1.28   msaitoh } /* ixgbe_rsc_count */
   1262    1.1   msaitoh 
   1263   1.28   msaitoh /************************************************************************
   1264   1.28   msaitoh  * ixgbe_setup_hw_rsc
   1265    1.1   msaitoh  *
   1266   1.28   msaitoh  *   Initialize Hardware RSC (LRO) feature on 82599
   1267   1.28   msaitoh  *   for an RX ring, this is toggled by the LRO capability
   1268   1.28   msaitoh  *   even though it is transparent to the stack.
   1269   1.28   msaitoh  *
   1270   1.28   msaitoh  *   NOTE: Since this HW feature only works with IPv4 and
   1271   1.28   msaitoh  *         testing has shown soft LRO to be as effective,
   1272   1.28   msaitoh  *         this feature will be disabled by default.
   1273   1.28   msaitoh  ************************************************************************/
   1274    1.1   msaitoh static void
   1275    1.1   msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1276    1.1   msaitoh {
   1277  1.104   msaitoh 	struct ixgbe_softc *sc = rxr->sc;
   1278  1.104   msaitoh 	struct ixgbe_hw	*hw = &sc->hw;
   1279  1.104   msaitoh 	u32		rscctrl, rdrxctl;
   1280    1.1   msaitoh 
   1281    1.1   msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1282  1.102   msaitoh 	if ((sc->ifp->if_capenable & IFCAP_LRO) == 0) {
   1283    1.1   msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1284    1.1   msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1285    1.1   msaitoh 		return;
   1286    1.1   msaitoh 	}
   1287    1.1   msaitoh 
   1288    1.1   msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1289    1.1   msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1290   1.28   msaitoh #ifdef DEV_NETMAP
   1291   1.28   msaitoh 	/* Always strip CRC unless Netmap disabled it */
   1292  1.102   msaitoh 	if (!(sc->feat_en & IXGBE_FEATURE_NETMAP) ||
   1293  1.102   msaitoh 	    !(sc->ifp->if_capenable & IFCAP_NETMAP) ||
   1294   1.28   msaitoh 	    ix_crcstrip)
   1295    1.1   msaitoh #endif /* DEV_NETMAP */
   1296   1.28   msaitoh 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1297    1.1   msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1298    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1299    1.1   msaitoh 
   1300    1.1   msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1301    1.1   msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1302    1.1   msaitoh 	/*
   1303   1.28   msaitoh 	 * Limit the total number of descriptors that
   1304   1.28   msaitoh 	 * can be combined, so it does not exceed 64K
   1305   1.28   msaitoh 	 */
   1306    1.1   msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1307    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1308    1.1   msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1309    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1310    1.1   msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1311    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1312    1.1   msaitoh 	else  /* Using 16K cluster */
   1313    1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1314    1.1   msaitoh 
   1315    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1316    1.1   msaitoh 
   1317    1.1   msaitoh 	/* Enable TCP header recognition */
   1318    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1319   1.28   msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
   1320    1.1   msaitoh 
   1321    1.1   msaitoh 	/* Disable RSC for ACK packets */
   1322    1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1323    1.1   msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1324    1.1   msaitoh 
   1325    1.1   msaitoh 	rxr->hw_rsc = TRUE;
   1326   1.28   msaitoh } /* ixgbe_setup_hw_rsc */
   1327    1.8   msaitoh 
   1328   1.28   msaitoh /************************************************************************
   1329   1.28   msaitoh  * ixgbe_refresh_mbufs
   1330    1.1   msaitoh  *
   1331   1.28   msaitoh  *   Refresh mbuf buffers for RX descriptor rings
   1332   1.28   msaitoh  *    - now keeps its own state so discards due to resource
   1333   1.28   msaitoh  *      exhaustion are unnecessary, if an mbuf cannot be obtained
   1334   1.28   msaitoh  *      it just returns, keeping its placeholder, thus it can simply
   1335   1.28   msaitoh  *      be recalled to try again.
   1336   1.28   msaitoh  ************************************************************************/
   1337    1.1   msaitoh static void
   1338    1.1   msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1339    1.1   msaitoh {
   1340  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1341   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1342   1.28   msaitoh 	struct mbuf         *mp;
   1343   1.82   msaitoh 	int                 i, error;
   1344   1.28   msaitoh 	bool                refreshed = false;
   1345    1.1   msaitoh 
   1346   1.82   msaitoh 	i = rxr->next_to_refresh;
   1347   1.82   msaitoh 	/* next_to_refresh points to the previous one */
   1348   1.82   msaitoh 	if (++i == rxr->num_desc)
   1349   1.82   msaitoh 		i = 0;
   1350    1.1   msaitoh 
   1351   1.82   msaitoh 	while (i != limit) {
   1352    1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1353   1.91   msaitoh 		if (__predict_false(rxbuf->buf == NULL)) {
   1354   1.87   msaitoh 			mp = ixgbe_getcl();
   1355    1.1   msaitoh 			if (mp == NULL) {
   1356   1.95   msaitoh 				IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1357    1.1   msaitoh 				goto update;
   1358    1.1   msaitoh 			}
   1359   1.86   msaitoh 			mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1360  1.102   msaitoh 			IXGBE_M_ADJ(sc, rxr, mp);
   1361    1.1   msaitoh 		} else
   1362    1.1   msaitoh 			mp = rxbuf->buf;
   1363    1.1   msaitoh 
   1364    1.1   msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1365    1.1   msaitoh 		 * than replaced, there's no need to go through busdma.
   1366    1.1   msaitoh 		 */
   1367    1.1   msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1368    1.1   msaitoh 			/* Get the memory mapping */
   1369    1.4   msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1370    1.1   msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1371    1.1   msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1372   1.91   msaitoh 			if (__predict_false(error != 0)) {
   1373  1.102   msaitoh 				device_printf(sc->dev, "Refresh mbufs: "
   1374   1.55   msaitoh 				    "payload dmamap load failure - %d\n",
   1375   1.55   msaitoh 				    error);
   1376    1.1   msaitoh 				m_free(mp);
   1377    1.1   msaitoh 				rxbuf->buf = NULL;
   1378    1.1   msaitoh 				goto update;
   1379    1.1   msaitoh 			}
   1380    1.1   msaitoh 			rxbuf->buf = mp;
   1381    1.1   msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1382    1.1   msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1383    1.1   msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1384    1.1   msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1385    1.1   msaitoh 		} else {
   1386    1.1   msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1387    1.1   msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1388    1.1   msaitoh 		}
   1389    1.1   msaitoh 
   1390    1.1   msaitoh 		refreshed = true;
   1391   1.82   msaitoh 		/* next_to_refresh points to the previous one */
   1392    1.1   msaitoh 		rxr->next_to_refresh = i;
   1393   1.82   msaitoh 		if (++i == rxr->num_desc)
   1394   1.82   msaitoh 			i = 0;
   1395    1.1   msaitoh 	}
   1396   1.28   msaitoh 
   1397    1.1   msaitoh update:
   1398    1.1   msaitoh 	if (refreshed) /* Update hardware tail index */
   1399  1.102   msaitoh 		IXGBE_WRITE_REG(&sc->hw, rxr->tail, rxr->next_to_refresh);
   1400   1.28   msaitoh 
   1401    1.1   msaitoh 	return;
   1402   1.28   msaitoh } /* ixgbe_refresh_mbufs */
   1403    1.1   msaitoh 
   1404   1.28   msaitoh /************************************************************************
   1405   1.28   msaitoh  * ixgbe_allocate_receive_buffers
   1406    1.1   msaitoh  *
   1407   1.28   msaitoh  *   Allocate memory for rx_buffer structures. Since we use one
   1408   1.28   msaitoh  *   rx_buffer per received packet, the maximum number of rx_buffer's
   1409   1.28   msaitoh  *   that we'll need is equal to the number of receive descriptors
   1410   1.28   msaitoh  *   that we've allocated.
   1411   1.28   msaitoh  ************************************************************************/
   1412   1.28   msaitoh static int
   1413    1.1   msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1414    1.1   msaitoh {
   1415  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1416  1.102   msaitoh 	device_t            dev = sc->dev;
   1417   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1418   1.28   msaitoh 	int                 bsize, error;
   1419    1.1   msaitoh 
   1420    1.1   msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1421   1.57       chs 	rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO);
   1422    1.1   msaitoh 
   1423   1.28   msaitoh 	error = ixgbe_dma_tag_create(
   1424  1.102   msaitoh 	         /*      parent */ sc->osdep.dmat,
   1425   1.28   msaitoh 	         /*   alignment */ 1,
   1426   1.28   msaitoh 	         /*      bounds */ 0,
   1427   1.28   msaitoh 	         /*     maxsize */ MJUM16BYTES,
   1428   1.28   msaitoh 	         /*   nsegments */ 1,
   1429   1.28   msaitoh 	         /*  maxsegsize */ MJUM16BYTES,
   1430   1.28   msaitoh 	         /*       flags */ 0,
   1431   1.28   msaitoh 	                           &rxr->ptag);
   1432   1.28   msaitoh 	if (error != 0) {
   1433    1.1   msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1434    1.1   msaitoh 		goto fail;
   1435    1.1   msaitoh 	}
   1436    1.1   msaitoh 
   1437    1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1438    1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1439    1.4   msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1440    1.1   msaitoh 		if (error) {
   1441    1.1   msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1442    1.1   msaitoh 			goto fail;
   1443    1.1   msaitoh 		}
   1444    1.1   msaitoh 	}
   1445    1.1   msaitoh 
   1446    1.1   msaitoh 	return (0);
   1447    1.1   msaitoh 
   1448    1.1   msaitoh fail:
   1449    1.1   msaitoh 	/* Frees all, but can handle partial completion */
   1450  1.102   msaitoh 	ixgbe_free_receive_structures(sc);
   1451   1.28   msaitoh 
   1452    1.1   msaitoh 	return (error);
   1453   1.28   msaitoh } /* ixgbe_allocate_receive_buffers */
   1454    1.1   msaitoh 
   1455   1.28   msaitoh /************************************************************************
   1456   1.30   msaitoh  * ixgbe_free_receive_ring
   1457   1.28   msaitoh  ************************************************************************/
   1458   1.28   msaitoh static void
   1459    1.1   msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1460   1.27   msaitoh {
   1461    1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1462   1.27   msaitoh 		ixgbe_rx_discard(rxr, i);
   1463    1.1   msaitoh 	}
   1464   1.28   msaitoh } /* ixgbe_free_receive_ring */
   1465    1.1   msaitoh 
   1466   1.28   msaitoh /************************************************************************
   1467   1.28   msaitoh  * ixgbe_setup_receive_ring
   1468    1.1   msaitoh  *
   1469   1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1470   1.28   msaitoh  ************************************************************************/
   1471    1.1   msaitoh static int
   1472    1.1   msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1473    1.1   msaitoh {
   1474  1.102   msaitoh 	struct ixgbe_softc    *sc;
   1475   1.28   msaitoh 	struct ixgbe_rx_buf   *rxbuf;
   1476    1.1   msaitoh #ifdef LRO
   1477   1.28   msaitoh 	struct ifnet          *ifp;
   1478   1.28   msaitoh 	struct lro_ctrl       *lro = &rxr->lro;
   1479    1.1   msaitoh #endif /* LRO */
   1480    1.1   msaitoh #ifdef DEV_NETMAP
   1481  1.102   msaitoh 	struct netmap_sc      *na = NA(rxr->sc->ifp);
   1482   1.28   msaitoh 	struct netmap_slot    *slot;
   1483    1.1   msaitoh #endif /* DEV_NETMAP */
   1484   1.28   msaitoh 	int                   rsize, error = 0;
   1485    1.1   msaitoh 
   1486  1.102   msaitoh 	sc = rxr->sc;
   1487    1.1   msaitoh #ifdef LRO
   1488  1.102   msaitoh 	ifp = sc->ifp;
   1489    1.1   msaitoh #endif /* LRO */
   1490    1.1   msaitoh 
   1491    1.1   msaitoh 	/* Clear the ring contents */
   1492    1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1493   1.28   msaitoh 
   1494    1.1   msaitoh #ifdef DEV_NETMAP
   1495  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP)
   1496   1.28   msaitoh 		slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1497    1.1   msaitoh #endif /* DEV_NETMAP */
   1498   1.28   msaitoh 
   1499  1.102   msaitoh 	rsize = roundup2(sc->num_rx_desc *
   1500    1.1   msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1501    1.1   msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1502    1.1   msaitoh 	/* Cache the size */
   1503  1.102   msaitoh 	rxr->mbuf_sz = sc->rx_mbuf_sz;
   1504    1.1   msaitoh 
   1505    1.1   msaitoh 	/* Free current RX buffer structs and their mbufs */
   1506    1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1507    1.1   msaitoh 
   1508    1.1   msaitoh 	/* Now replenish the mbufs */
   1509    1.1   msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1510   1.28   msaitoh 		struct mbuf *mp;
   1511    1.1   msaitoh 
   1512    1.1   msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1513   1.28   msaitoh 
   1514    1.1   msaitoh #ifdef DEV_NETMAP
   1515    1.1   msaitoh 		/*
   1516    1.1   msaitoh 		 * In netmap mode, fill the map and set the buffer
   1517    1.1   msaitoh 		 * address in the NIC ring, considering the offset
   1518    1.1   msaitoh 		 * between the netmap and NIC rings (see comment in
   1519    1.1   msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1520    1.1   msaitoh 		 * an mbuf, so end the block with a continue;
   1521    1.1   msaitoh 		 */
   1522  1.102   msaitoh 		if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
   1523   1.53   msaitoh 			int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
   1524    1.1   msaitoh 			uint64_t paddr;
   1525    1.1   msaitoh 			void *addr;
   1526    1.1   msaitoh 
   1527    1.1   msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1528    1.1   msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1529    1.1   msaitoh 			/* Update descriptor and the cached value */
   1530    1.1   msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1531    1.1   msaitoh 			rxbuf->addr = htole64(paddr);
   1532    1.1   msaitoh 			continue;
   1533    1.1   msaitoh 		}
   1534    1.1   msaitoh #endif /* DEV_NETMAP */
   1535   1.28   msaitoh 
   1536   1.28   msaitoh 		rxbuf->flags = 0;
   1537   1.87   msaitoh 		rxbuf->buf = ixgbe_getcl();
   1538    1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1539   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1540    1.1   msaitoh 			error = ENOBUFS;
   1541   1.28   msaitoh 			goto fail;
   1542    1.1   msaitoh 		}
   1543    1.1   msaitoh 		mp = rxbuf->buf;
   1544    1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1545  1.102   msaitoh 		IXGBE_M_ADJ(sc, rxr, mp);
   1546    1.1   msaitoh 		/* Get the memory mapping */
   1547   1.28   msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
   1548   1.28   msaitoh 		    mp, BUS_DMA_NOWAIT);
   1549   1.75   msaitoh 		if (error != 0) {
   1550   1.75   msaitoh 			/*
   1551   1.75   msaitoh 			 * Clear this entry for later cleanup in
   1552   1.75   msaitoh 			 * ixgbe_discard() which is called via
   1553   1.75   msaitoh 			 * ixgbe_free_receive_ring().
   1554   1.75   msaitoh 			 */
   1555   1.75   msaitoh 			m_freem(mp);
   1556   1.75   msaitoh 			rxbuf->buf = NULL;
   1557   1.85   msaitoh 			goto fail;
   1558   1.75   msaitoh 		}
   1559    1.1   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1560   1.83   msaitoh 		    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1561    1.1   msaitoh 		/* Update the descriptor and the cached value */
   1562    1.1   msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1563    1.1   msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1564    1.1   msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1565    1.1   msaitoh 	}
   1566    1.1   msaitoh 
   1567    1.1   msaitoh 	/* Setup our descriptor indices */
   1568    1.1   msaitoh 	rxr->next_to_check = 0;
   1569  1.102   msaitoh 	rxr->next_to_refresh = sc->num_rx_desc - 1; /* Fully allocated */
   1570    1.1   msaitoh 	rxr->lro_enabled = FALSE;
   1571   1.90   msaitoh 	rxr->discard_multidesc = false;
   1572   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_copies, 0);
   1573   1.13   msaitoh #if 0 /* NetBSD */
   1574   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
   1575   1.13   msaitoh #if 1	/* Fix inconsistency */
   1576   1.95   msaitoh 	IXGBE_EVC_STORE(&rxr->rx_packets, 0);
   1577   1.13   msaitoh #endif
   1578   1.13   msaitoh #endif
   1579    1.1   msaitoh 	rxr->vtag_strip = FALSE;
   1580    1.1   msaitoh 
   1581    1.1   msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1582    1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1583    1.1   msaitoh 
   1584    1.1   msaitoh 	/*
   1585   1.28   msaitoh 	 * Now set up the LRO interface
   1586   1.28   msaitoh 	 */
   1587    1.1   msaitoh 	if (ixgbe_rsc_enable)
   1588    1.1   msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1589    1.1   msaitoh #ifdef LRO
   1590    1.1   msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1591  1.102   msaitoh 		device_t dev = sc->dev;
   1592    1.1   msaitoh 		int err = tcp_lro_init(lro);
   1593    1.1   msaitoh 		if (err) {
   1594    1.1   msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1595    1.1   msaitoh 			goto fail;
   1596    1.1   msaitoh 		}
   1597    1.1   msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1598    1.1   msaitoh 		rxr->lro_enabled = TRUE;
   1599  1.102   msaitoh 		lro->ifp = sc->ifp;
   1600    1.1   msaitoh 	}
   1601    1.1   msaitoh #endif /* LRO */
   1602    1.1   msaitoh 
   1603    1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1604   1.28   msaitoh 
   1605    1.1   msaitoh 	return (0);
   1606    1.1   msaitoh 
   1607    1.1   msaitoh fail:
   1608    1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1609    1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1610   1.28   msaitoh 
   1611    1.1   msaitoh 	return (error);
   1612   1.28   msaitoh } /* ixgbe_setup_receive_ring */
   1613    1.1   msaitoh 
   1614   1.28   msaitoh /************************************************************************
   1615   1.28   msaitoh  * ixgbe_setup_receive_structures - Initialize all receive rings.
   1616   1.28   msaitoh  ************************************************************************/
   1617    1.1   msaitoh int
   1618  1.102   msaitoh ixgbe_setup_receive_structures(struct ixgbe_softc *sc)
   1619    1.1   msaitoh {
   1620  1.102   msaitoh 	struct rx_ring *rxr = sc->rx_rings;
   1621   1.28   msaitoh 	int            j;
   1622    1.1   msaitoh 
   1623   1.62   msaitoh 	INIT_DEBUGOUT("ixgbe_setup_receive_structures");
   1624  1.102   msaitoh 	for (j = 0; j < sc->num_queues; j++, rxr++)
   1625    1.1   msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1626    1.1   msaitoh 			goto fail;
   1627    1.1   msaitoh 
   1628    1.1   msaitoh 	return (0);
   1629    1.1   msaitoh fail:
   1630    1.1   msaitoh 	/*
   1631    1.1   msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1632    1.1   msaitoh 	 * the rings that completed, the failing case will have
   1633    1.1   msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1634    1.1   msaitoh 	 */
   1635    1.1   msaitoh 	for (int i = 0; i < j; ++i) {
   1636  1.102   msaitoh 		rxr = &sc->rx_rings[i];
   1637   1.27   msaitoh 		IXGBE_RX_LOCK(rxr);
   1638    1.1   msaitoh 		ixgbe_free_receive_ring(rxr);
   1639   1.27   msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1640    1.1   msaitoh 	}
   1641    1.1   msaitoh 
   1642    1.1   msaitoh 	return (ENOBUFS);
   1643   1.28   msaitoh } /* ixgbe_setup_receive_structures */
   1644    1.1   msaitoh 
   1645    1.3   msaitoh 
   1646   1.28   msaitoh /************************************************************************
   1647   1.28   msaitoh  * ixgbe_free_receive_structures - Free all receive rings.
   1648   1.28   msaitoh  ************************************************************************/
   1649    1.1   msaitoh void
   1650  1.102   msaitoh ixgbe_free_receive_structures(struct ixgbe_softc *sc)
   1651    1.1   msaitoh {
   1652  1.102   msaitoh 	struct rx_ring *rxr = sc->rx_rings;
   1653    1.1   msaitoh 
   1654    1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1655    1.1   msaitoh 
   1656  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, rxr++) {
   1657    1.1   msaitoh 		ixgbe_free_receive_buffers(rxr);
   1658    1.1   msaitoh #ifdef LRO
   1659    1.1   msaitoh 		/* Free LRO memory */
   1660   1.28   msaitoh 		tcp_lro_free(&rxr->lro);
   1661    1.1   msaitoh #endif /* LRO */
   1662    1.1   msaitoh 		/* Free the ring memory as well */
   1663  1.102   msaitoh 		ixgbe_dma_free(sc, &rxr->rxdma);
   1664    1.1   msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1665    1.1   msaitoh 	}
   1666    1.1   msaitoh 
   1667  1.102   msaitoh 	free(sc->rx_rings, M_DEVBUF);
   1668   1.28   msaitoh } /* ixgbe_free_receive_structures */
   1669    1.1   msaitoh 
   1670    1.1   msaitoh 
   1671   1.28   msaitoh /************************************************************************
   1672   1.28   msaitoh  * ixgbe_free_receive_buffers - Free receive ring data structures
   1673   1.28   msaitoh  ************************************************************************/
   1674    1.1   msaitoh static void
   1675    1.1   msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1676    1.1   msaitoh {
   1677  1.102   msaitoh 	struct ixgbe_softc  *sc = rxr->sc;
   1678   1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1679    1.1   msaitoh 
   1680    1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1681    1.1   msaitoh 
   1682    1.1   msaitoh 	/* Cleanup any existing buffers */
   1683    1.1   msaitoh 	if (rxr->rx_buffers != NULL) {
   1684  1.102   msaitoh 		for (int i = 0; i < sc->num_rx_desc; i++) {
   1685    1.1   msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1686   1.27   msaitoh 			ixgbe_rx_discard(rxr, i);
   1687    1.1   msaitoh 			if (rxbuf->pmap != NULL) {
   1688    1.1   msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1689    1.1   msaitoh 				rxbuf->pmap = NULL;
   1690    1.1   msaitoh 			}
   1691    1.1   msaitoh 		}
   1692   1.59   msaitoh 
   1693    1.1   msaitoh 		if (rxr->rx_buffers != NULL) {
   1694    1.1   msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1695    1.1   msaitoh 			rxr->rx_buffers = NULL;
   1696    1.1   msaitoh 		}
   1697    1.1   msaitoh 	}
   1698    1.1   msaitoh 
   1699    1.1   msaitoh 	if (rxr->ptag != NULL) {
   1700    1.1   msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1701    1.1   msaitoh 		rxr->ptag = NULL;
   1702    1.1   msaitoh 	}
   1703    1.1   msaitoh 
   1704    1.1   msaitoh 	return;
   1705   1.28   msaitoh } /* ixgbe_free_receive_buffers */
   1706    1.1   msaitoh 
   1707   1.28   msaitoh /************************************************************************
   1708   1.28   msaitoh  * ixgbe_rx_input
   1709   1.28   msaitoh  ************************************************************************/
   1710    1.1   msaitoh static __inline void
   1711   1.28   msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
   1712   1.28   msaitoh     u32 ptype)
   1713    1.1   msaitoh {
   1714  1.102   msaitoh 	struct ixgbe_softc *sc = ifp->if_softc;
   1715    1.1   msaitoh 
   1716    1.1   msaitoh #ifdef LRO
   1717  1.102   msaitoh 	struct ethercom *ec = &sc->osdep.ec;
   1718    1.1   msaitoh 
   1719   1.28   msaitoh 	/*
   1720   1.28   msaitoh 	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1721   1.28   msaitoh 	 * should be computed by hardware. Also it should not have VLAN tag in
   1722   1.28   msaitoh 	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1723   1.28   msaitoh 	 */
   1724    1.1   msaitoh         if (rxr->lro_enabled &&
   1725    1.1   msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1726    1.1   msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1727    1.1   msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1728    1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1729    1.1   msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1730    1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1731    1.1   msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1732    1.1   msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1733    1.1   msaitoh                 /*
   1734    1.1   msaitoh                  * Send to the stack if:
   1735  1.103   msaitoh                  *  - LRO not enabled, or
   1736  1.103   msaitoh                  *  - no LRO resources, or
   1737  1.103   msaitoh                  *  - lro enqueue fails
   1738    1.1   msaitoh                  */
   1739    1.1   msaitoh                 if (rxr->lro.lro_cnt != 0)
   1740    1.1   msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1741    1.1   msaitoh                                 return;
   1742    1.1   msaitoh         }
   1743    1.1   msaitoh #endif /* LRO */
   1744    1.1   msaitoh 
   1745  1.102   msaitoh 	if_percpuq_enqueue(sc->ipq, m);
   1746   1.28   msaitoh } /* ixgbe_rx_input */
   1747    1.1   msaitoh 
   1748   1.28   msaitoh /************************************************************************
   1749   1.28   msaitoh  * ixgbe_rx_discard
   1750   1.28   msaitoh  ************************************************************************/
   1751    1.1   msaitoh static __inline void
   1752    1.1   msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1753    1.1   msaitoh {
   1754   1.28   msaitoh 	struct ixgbe_rx_buf *rbuf;
   1755    1.1   msaitoh 
   1756    1.1   msaitoh 	rbuf = &rxr->rx_buffers[i];
   1757    1.1   msaitoh 
   1758    1.1   msaitoh 	/*
   1759   1.70   msaitoh 	 * With advanced descriptors the writeback clobbers the buffer addrs,
   1760   1.70   msaitoh 	 * so its easier to just free the existing mbufs and take the normal
   1761   1.70   msaitoh 	 * refresh path to get new buffers and mapping.
   1762   1.28   msaitoh 	 */
   1763    1.1   msaitoh 
   1764   1.26   msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   1765   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1766   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1767   1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1768    1.1   msaitoh 		m_freem(rbuf->fmp);
   1769    1.1   msaitoh 		rbuf->fmp = NULL;
   1770    1.1   msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1771    1.1   msaitoh 	} else if (rbuf->buf) {
   1772   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1773   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1774   1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1775    1.1   msaitoh 		m_free(rbuf->buf);
   1776    1.1   msaitoh 		rbuf->buf = NULL;
   1777    1.1   msaitoh 	}
   1778    1.1   msaitoh 
   1779    1.1   msaitoh 	rbuf->flags = 0;
   1780    1.1   msaitoh 
   1781    1.1   msaitoh 	return;
   1782   1.28   msaitoh } /* ixgbe_rx_discard */
   1783    1.1   msaitoh 
   1784    1.1   msaitoh 
   1785   1.28   msaitoh /************************************************************************
   1786   1.28   msaitoh  * ixgbe_rxeof
   1787    1.1   msaitoh  *
   1788   1.28   msaitoh  *   Executes in interrupt context. It replenishes the
   1789   1.28   msaitoh  *   mbufs in the descriptor and sends data which has
   1790   1.28   msaitoh  *   been dma'ed into host memory to upper layer.
   1791    1.1   msaitoh  *
   1792   1.28   msaitoh  *   Return TRUE for more work, FALSE for all clean.
   1793   1.28   msaitoh  ************************************************************************/
   1794    1.1   msaitoh bool
   1795    1.1   msaitoh ixgbe_rxeof(struct ix_queue *que)
   1796    1.1   msaitoh {
   1797  1.102   msaitoh 	struct ixgbe_softc	*sc = que->sc;
   1798    1.1   msaitoh 	struct rx_ring		*rxr = que->rxr;
   1799  1.102   msaitoh 	struct ifnet		*ifp = sc->ifp;
   1800    1.1   msaitoh #ifdef LRO
   1801    1.1   msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1802    1.1   msaitoh #endif /* LRO */
   1803   1.28   msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1804   1.28   msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1805    1.1   msaitoh 	int			i, nextp, processed = 0;
   1806    1.1   msaitoh 	u32			staterr = 0;
   1807   1.94   msaitoh 	u32			loopcount = 0, numdesc;
   1808  1.102   msaitoh 	u32			limit = sc->rx_process_limit;
   1809  1.102   msaitoh 	u32			rx_copy_len = sc->rx_copy_len;
   1810   1.90   msaitoh 	bool			discard_multidesc = rxr->discard_multidesc;
   1811   1.94   msaitoh 	bool			wraparound = false;
   1812   1.94   msaitoh 	unsigned int		syncremain;
   1813    1.1   msaitoh #ifdef RSS
   1814    1.1   msaitoh 	u16			pkt_info;
   1815    1.1   msaitoh #endif
   1816    1.1   msaitoh 
   1817    1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1818    1.1   msaitoh 
   1819    1.1   msaitoh #ifdef DEV_NETMAP
   1820  1.102   msaitoh 	if (sc->feat_en & IXGBE_FEATURE_NETMAP) {
   1821   1.28   msaitoh 		/* Same as the txeof routine: wakeup clients on intr. */
   1822   1.28   msaitoh 		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1823   1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1824   1.28   msaitoh 			return (FALSE);
   1825   1.28   msaitoh 		}
   1826    1.1   msaitoh 	}
   1827    1.1   msaitoh #endif /* DEV_NETMAP */
   1828    1.1   msaitoh 
   1829   1.94   msaitoh 	/* Sync the ring. The size is rx_process_limit or the first half */
   1830   1.94   msaitoh 	if ((rxr->next_to_check + limit) <= rxr->num_desc) {
   1831   1.94   msaitoh 		/* Non-wraparound */
   1832   1.94   msaitoh 		numdesc = limit;
   1833   1.94   msaitoh 		syncremain = 0;
   1834   1.94   msaitoh 	} else {
   1835   1.94   msaitoh 		/* Wraparound. Sync the first half. */
   1836   1.94   msaitoh 		numdesc = rxr->num_desc - rxr->next_to_check;
   1837   1.94   msaitoh 
   1838   1.94   msaitoh 		/* Set the size of the last half */
   1839   1.94   msaitoh 		syncremain = limit - numdesc;
   1840   1.94   msaitoh 	}
   1841   1.94   msaitoh 	bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
   1842   1.94   msaitoh 	    rxr->rxdma.dma_map,
   1843   1.94   msaitoh 	    sizeof(union ixgbe_adv_rx_desc) * rxr->next_to_check,
   1844   1.94   msaitoh 	    sizeof(union ixgbe_adv_rx_desc) * numdesc,
   1845   1.94   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1846   1.94   msaitoh 
   1847   1.65   msaitoh 	/*
   1848   1.65   msaitoh 	 * The max number of loop is rx_process_limit. If discard_multidesc is
   1849   1.65   msaitoh 	 * true, continue processing to not to send broken packet to the upper
   1850   1.65   msaitoh 	 * layer.
   1851   1.65   msaitoh 	 */
   1852   1.65   msaitoh 	for (i = rxr->next_to_check;
   1853   1.89   msaitoh 	     (loopcount < limit) || (discard_multidesc == true);) {
   1854   1.65   msaitoh 
   1855   1.28   msaitoh 		struct mbuf *sendmp, *mp;
   1856   1.64  knakahar 		struct mbuf *newmp;
   1857   1.28   msaitoh 		u32         rsc, ptype;
   1858   1.28   msaitoh 		u16         len;
   1859   1.28   msaitoh 		u16         vtag = 0;
   1860   1.28   msaitoh 		bool        eop;
   1861   1.93   msaitoh 		bool        discard = false;
   1862   1.53   msaitoh 
   1863   1.94   msaitoh 		if (wraparound) {
   1864   1.94   msaitoh 			/* Sync the last half. */
   1865   1.94   msaitoh 			KASSERT(syncremain != 0);
   1866   1.94   msaitoh 			numdesc = syncremain;
   1867   1.94   msaitoh 			wraparound = false;
   1868   1.94   msaitoh 		} else if (__predict_false(loopcount >= limit)) {
   1869   1.94   msaitoh 			KASSERT(discard_multidesc == true);
   1870   1.94   msaitoh 			numdesc = 1;
   1871   1.94   msaitoh 		} else
   1872   1.94   msaitoh 			numdesc = 0;
   1873   1.94   msaitoh 
   1874   1.94   msaitoh 		if (numdesc != 0)
   1875   1.94   msaitoh 			bus_dmamap_sync(rxr->rxdma.dma_tag->dt_dmat,
   1876   1.94   msaitoh 			    rxr->rxdma.dma_map, 0,
   1877   1.94   msaitoh 			    sizeof(union ixgbe_adv_rx_desc) * numdesc,
   1878   1.94   msaitoh 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1879    1.1   msaitoh 
   1880    1.1   msaitoh 		cur = &rxr->rx_base[i];
   1881    1.1   msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1882    1.1   msaitoh #ifdef RSS
   1883    1.1   msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1884    1.1   msaitoh #endif
   1885    1.1   msaitoh 
   1886    1.1   msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1887    1.1   msaitoh 			break;
   1888    1.1   msaitoh 
   1889   1.89   msaitoh 		loopcount++;
   1890   1.93   msaitoh 		sendmp = newmp = NULL;
   1891    1.1   msaitoh 		nbuf = NULL;
   1892    1.1   msaitoh 		rsc = 0;
   1893    1.1   msaitoh 		cur->wb.upper.status_error = 0;
   1894    1.1   msaitoh 		rbuf = &rxr->rx_buffers[i];
   1895    1.1   msaitoh 		mp = rbuf->buf;
   1896    1.1   msaitoh 
   1897    1.1   msaitoh 		len = le16toh(cur->wb.upper.length);
   1898    1.1   msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1899    1.1   msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1900    1.1   msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1901    1.1   msaitoh 
   1902    1.1   msaitoh 		/* Make sure bad packets are discarded */
   1903    1.1   msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1904    1.3   msaitoh #if __FreeBSD_version >= 1100036
   1905  1.102   msaitoh 			if (sc->feat_en & IXGBE_FEATURE_VF)
   1906    1.4   msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1907    1.3   msaitoh #endif
   1908   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_discarded, 1);
   1909    1.1   msaitoh 			ixgbe_rx_discard(rxr, i);
   1910   1.65   msaitoh 			discard_multidesc = false;
   1911    1.1   msaitoh 			goto next_desc;
   1912    1.1   msaitoh 		}
   1913    1.1   msaitoh 
   1914   1.93   msaitoh 		if (__predict_false(discard_multidesc))
   1915   1.93   msaitoh 			discard = true;
   1916   1.93   msaitoh 		else {
   1917   1.93   msaitoh 			/* Pre-alloc new mbuf. */
   1918   1.93   msaitoh 
   1919   1.93   msaitoh 			if ((rbuf->fmp == NULL) &&
   1920   1.97   msaitoh 			    eop && (len <= rx_copy_len)) {
   1921   1.93   msaitoh 				/* For short packet. See below. */
   1922   1.93   msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1923   1.93   msaitoh 				if (__predict_false(sendmp == NULL)) {
   1924   1.95   msaitoh 					IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1925   1.93   msaitoh 					discard = true;
   1926   1.93   msaitoh 				}
   1927   1.93   msaitoh 			} else {
   1928   1.93   msaitoh 				/* For long packet. */
   1929   1.93   msaitoh 				newmp = ixgbe_getcl();
   1930   1.93   msaitoh 				if (__predict_false(newmp == NULL)) {
   1931   1.95   msaitoh 					IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
   1932   1.93   msaitoh 					discard = true;
   1933   1.93   msaitoh 				}
   1934   1.93   msaitoh 			}
   1935   1.93   msaitoh 		}
   1936   1.92   msaitoh 
   1937   1.93   msaitoh 		if (__predict_false(discard)) {
   1938   1.65   msaitoh 			/*
   1939   1.65   msaitoh 			 * Descriptor initialization is already done by the
   1940   1.65   msaitoh 			 * above code (cur->wb.upper.status_error = 0).
   1941   1.65   msaitoh 			 * So, we can reuse current rbuf->buf for new packet.
   1942   1.65   msaitoh 			 *
   1943   1.65   msaitoh 			 * Rewrite the buffer addr, see comment in
   1944   1.65   msaitoh 			 * ixgbe_rx_discard().
   1945   1.65   msaitoh 			 */
   1946   1.65   msaitoh 			cur->read.pkt_addr = rbuf->addr;
   1947   1.65   msaitoh 			m_freem(rbuf->fmp);
   1948   1.65   msaitoh 			rbuf->fmp = NULL;
   1949   1.65   msaitoh 			if (!eop) {
   1950   1.65   msaitoh 				/* Discard the entire packet. */
   1951   1.65   msaitoh 				discard_multidesc = true;
   1952   1.65   msaitoh 			} else
   1953   1.65   msaitoh 				discard_multidesc = false;
   1954   1.64  knakahar 			goto next_desc;
   1955   1.64  knakahar 		}
   1956   1.65   msaitoh 		discard_multidesc = false;
   1957   1.64  knakahar 
   1958   1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1959   1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1960   1.27   msaitoh 
   1961    1.1   msaitoh 		/*
   1962   1.28   msaitoh 		 * On 82599 which supports a hardware
   1963   1.28   msaitoh 		 * LRO (called HW RSC), packets need
   1964   1.28   msaitoh 		 * not be fragmented across sequential
   1965   1.28   msaitoh 		 * descriptors, rather the next descriptor
   1966   1.28   msaitoh 		 * is indicated in bits of the descriptor.
   1967   1.99    andvar 		 * This also means that we might process
   1968   1.28   msaitoh 		 * more than one packet at a time, something
   1969   1.28   msaitoh 		 * that has never been true before, it
   1970   1.28   msaitoh 		 * required eliminating global chain pointers
   1971   1.28   msaitoh 		 * in favor of what we are doing here.  -jfv
   1972   1.28   msaitoh 		 */
   1973    1.1   msaitoh 		if (!eop) {
   1974    1.1   msaitoh 			/*
   1975   1.28   msaitoh 			 * Figure out the next descriptor
   1976   1.28   msaitoh 			 * of this frame.
   1977   1.28   msaitoh 			 */
   1978    1.1   msaitoh 			if (rxr->hw_rsc == TRUE) {
   1979    1.1   msaitoh 				rsc = ixgbe_rsc_count(cur);
   1980    1.1   msaitoh 				rxr->rsc_num += (rsc - 1);
   1981    1.1   msaitoh 			}
   1982    1.1   msaitoh 			if (rsc) { /* Get hardware index */
   1983   1.28   msaitoh 				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
   1984    1.1   msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1985    1.1   msaitoh 			} else { /* Just sequential */
   1986    1.1   msaitoh 				nextp = i + 1;
   1987  1.102   msaitoh 				if (nextp == sc->num_rx_desc)
   1988    1.1   msaitoh 					nextp = 0;
   1989    1.1   msaitoh 			}
   1990    1.1   msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1991    1.1   msaitoh 			prefetch(nbuf);
   1992    1.1   msaitoh 		}
   1993    1.1   msaitoh 		/*
   1994   1.28   msaitoh 		 * Rather than using the fmp/lmp global pointers
   1995   1.28   msaitoh 		 * we now keep the head of a packet chain in the
   1996   1.28   msaitoh 		 * buffer struct and pass this along from one
   1997   1.28   msaitoh 		 * descriptor to the next, until we get EOP.
   1998   1.28   msaitoh 		 */
   1999    1.1   msaitoh 		/*
   2000   1.28   msaitoh 		 * See if there is a stored head
   2001   1.28   msaitoh 		 * that determines what we are
   2002   1.28   msaitoh 		 */
   2003   1.93   msaitoh 		if (rbuf->fmp != NULL) {
   2004   1.93   msaitoh 			/* Secondary frag */
   2005   1.93   msaitoh 			sendmp = rbuf->fmp;
   2006   1.93   msaitoh 
   2007   1.86   msaitoh 			/* Update new (used in future) mbuf */
   2008   1.86   msaitoh 			newmp->m_pkthdr.len = newmp->m_len = rxr->mbuf_sz;
   2009  1.102   msaitoh 			IXGBE_M_ADJ(sc, rxr, newmp);
   2010   1.64  knakahar 			rbuf->buf = newmp;
   2011   1.64  knakahar 			rbuf->fmp = NULL;
   2012   1.86   msaitoh 
   2013   1.86   msaitoh 			/* For secondary frag */
   2014   1.74   msaitoh 			mp->m_len = len;
   2015    1.1   msaitoh 			mp->m_flags &= ~M_PKTHDR;
   2016   1.86   msaitoh 
   2017   1.86   msaitoh 			/* For sendmp */
   2018    1.1   msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   2019    1.1   msaitoh 		} else {
   2020    1.1   msaitoh 			/*
   2021   1.86   msaitoh 			 * It's the first segment of a multi descriptor
   2022   1.86   msaitoh 			 * packet or a single segment which contains a full
   2023   1.86   msaitoh 			 * packet.
   2024   1.86   msaitoh 			 */
   2025   1.86   msaitoh 
   2026   1.97   msaitoh 			if (eop && (len <= rx_copy_len)) {
   2027   1.93   msaitoh 				/*
   2028   1.93   msaitoh 				 * Optimize.  This might be a small packet, may
   2029   1.93   msaitoh 				 * be just a TCP ACK. Copy into a new mbuf, and
   2030   1.93   msaitoh 				 * Leave the old mbuf+cluster for re-use.
   2031   1.93   msaitoh 				 */
   2032   1.93   msaitoh 				sendmp->m_data += ETHER_ALIGN;
   2033   1.93   msaitoh 				memcpy(mtod(sendmp, void *),
   2034   1.93   msaitoh 				    mtod(mp, void *), len);
   2035   1.95   msaitoh 				IXGBE_EVC_ADD(&rxr->rx_copies, 1);
   2036   1.93   msaitoh 				rbuf->flags |= IXGBE_RX_COPY;
   2037   1.93   msaitoh 			} else {
   2038   1.96   msaitoh 				/* For long packet */
   2039   1.86   msaitoh 
   2040   1.86   msaitoh 				/* Update new (used in future) mbuf */
   2041   1.86   msaitoh 				newmp->m_pkthdr.len = newmp->m_len
   2042   1.86   msaitoh 				    = rxr->mbuf_sz;
   2043  1.102   msaitoh 				IXGBE_M_ADJ(sc, rxr, newmp);
   2044   1.64  knakahar 				rbuf->buf = newmp;
   2045   1.64  knakahar 				rbuf->fmp = NULL;
   2046   1.86   msaitoh 
   2047   1.86   msaitoh 				/* For sendmp */
   2048    1.1   msaitoh 				sendmp = mp;
   2049    1.1   msaitoh 			}
   2050    1.1   msaitoh 
   2051    1.1   msaitoh 			/* first desc of a non-ps chain */
   2052   1.86   msaitoh 			sendmp->m_pkthdr.len = sendmp->m_len = len;
   2053    1.1   msaitoh 		}
   2054    1.1   msaitoh 		++processed;
   2055    1.1   msaitoh 
   2056    1.1   msaitoh 		/* Pass the head pointer on */
   2057    1.1   msaitoh 		if (eop == 0) {
   2058    1.1   msaitoh 			nbuf->fmp = sendmp;
   2059    1.1   msaitoh 			sendmp = NULL;
   2060    1.1   msaitoh 			mp->m_next = nbuf->buf;
   2061    1.1   msaitoh 		} else { /* Sending this frame */
   2062    1.1   msaitoh 			m_set_rcvif(sendmp, ifp);
   2063   1.31   msaitoh 			++rxr->packets;
   2064   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_packets, 1);
   2065    1.1   msaitoh 			/* capture data for AIM */
   2066    1.1   msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   2067   1.95   msaitoh 			IXGBE_EVC_ADD(&rxr->rx_bytes, sendmp->m_pkthdr.len);
   2068    1.1   msaitoh 			/* Process vlan info */
   2069   1.28   msaitoh 			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
   2070    1.1   msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   2071    1.1   msaitoh 			if (vtag) {
   2072   1.29  knakahar 				vlan_set_tag(sendmp, vtag);
   2073    1.1   msaitoh 			}
   2074    1.1   msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   2075    1.1   msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   2076  1.102   msaitoh 				   &sc->stats.pf);
   2077    1.1   msaitoh 			}
   2078    1.8   msaitoh 
   2079    1.6   msaitoh #if 0 /* FreeBSD */
   2080   1.28   msaitoh 			/*
   2081   1.28   msaitoh 			 * In case of multiqueue, we have RXCSUM.PCSD bit set
   2082   1.28   msaitoh 			 * and never cleared. This means we have RSS hash
   2083   1.28   msaitoh 			 * available to be used.
   2084   1.28   msaitoh 			 */
   2085  1.102   msaitoh 			if (sc->num_queues > 1) {
   2086   1.28   msaitoh 				sendmp->m_pkthdr.flowid =
   2087   1.28   msaitoh 				    le32toh(cur->wb.lower.hi_dword.rss);
   2088   1.44   msaitoh 				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   2089   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4:
   2090   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2091   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV4);
   2092   1.28   msaitoh 					break;
   2093   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   2094   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2095   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV4);
   2096   1.28   msaitoh 					break;
   2097   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6:
   2098   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2099   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6);
   2100   1.28   msaitoh 					break;
   2101   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   2102   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2103   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6);
   2104   1.28   msaitoh 					break;
   2105   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   2106   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2107   1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6_EX);
   2108   1.28   msaitoh 					break;
   2109   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   2110   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2111   1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6_EX);
   2112   1.28   msaitoh 					break;
   2113    1.6   msaitoh #if __FreeBSD_version > 1100000
   2114   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   2115   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2116   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV4);
   2117   1.28   msaitoh 					break;
   2118   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   2119   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2120   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6);
   2121   1.28   msaitoh 					break;
   2122   1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   2123   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2124   1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6_EX);
   2125   1.28   msaitoh 					break;
   2126   1.28   msaitoh #endif
   2127   1.44   msaitoh 				default:
   2128   1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2129   1.28   msaitoh 					    M_HASHTYPE_OPAQUE_HASH);
   2130   1.28   msaitoh 				}
   2131   1.28   msaitoh 			} else {
   2132   1.28   msaitoh 				sendmp->m_pkthdr.flowid = que->msix;
   2133    1.1   msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   2134    1.1   msaitoh 			}
   2135    1.8   msaitoh #endif
   2136    1.1   msaitoh 		}
   2137    1.1   msaitoh next_desc:
   2138    1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   2139    1.1   msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2140    1.1   msaitoh 
   2141    1.1   msaitoh 		/* Advance our pointers to the next descriptor. */
   2142   1.94   msaitoh 		if (++i == rxr->num_desc) {
   2143   1.94   msaitoh 			wraparound = true;
   2144    1.1   msaitoh 			i = 0;
   2145   1.94   msaitoh 		}
   2146   1.82   msaitoh 		rxr->next_to_check = i;
   2147    1.1   msaitoh 
   2148    1.1   msaitoh 		/* Now send to the stack or do LRO */
   2149   1.85   msaitoh 		if (sendmp != NULL)
   2150    1.1   msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   2151    1.1   msaitoh 
   2152   1.28   msaitoh 		/* Every 8 descriptors we go to refresh mbufs */
   2153    1.1   msaitoh 		if (processed == 8) {
   2154    1.1   msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   2155    1.1   msaitoh 			processed = 0;
   2156    1.1   msaitoh 		}
   2157    1.1   msaitoh 	}
   2158    1.1   msaitoh 
   2159   1.90   msaitoh 	/* Save the current status */
   2160   1.90   msaitoh 	rxr->discard_multidesc = discard_multidesc;
   2161   1.90   msaitoh 
   2162    1.1   msaitoh 	/* Refresh any remaining buf structs */
   2163    1.1   msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   2164    1.1   msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   2165    1.1   msaitoh 
   2166   1.28   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   2167   1.28   msaitoh 
   2168    1.1   msaitoh #ifdef LRO
   2169    1.1   msaitoh 	/*
   2170    1.1   msaitoh 	 * Flush any outstanding LRO work
   2171    1.1   msaitoh 	 */
   2172   1.10   msaitoh 	tcp_lro_flush_all(lro);
   2173    1.1   msaitoh #endif /* LRO */
   2174    1.1   msaitoh 
   2175    1.1   msaitoh 	/*
   2176   1.28   msaitoh 	 * Still have cleaning to do?
   2177   1.28   msaitoh 	 */
   2178    1.1   msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   2179   1.28   msaitoh 		return (TRUE);
   2180   1.28   msaitoh 
   2181   1.28   msaitoh 	return (FALSE);
   2182   1.28   msaitoh } /* ixgbe_rxeof */
   2183    1.1   msaitoh 
   2184    1.1   msaitoh 
   2185   1.28   msaitoh /************************************************************************
   2186   1.28   msaitoh  * ixgbe_rx_checksum
   2187    1.1   msaitoh  *
   2188   1.28   msaitoh  *   Verify that the hardware indicated that the checksum is valid.
   2189   1.28   msaitoh  *   Inform the stack about the status of checksum so that stack
   2190   1.28   msaitoh  *   doesn't spend time verifying the checksum.
   2191   1.28   msaitoh  ************************************************************************/
   2192    1.1   msaitoh static void
   2193    1.1   msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2194    1.1   msaitoh     struct ixgbe_hw_stats *stats)
   2195    1.1   msaitoh {
   2196   1.28   msaitoh 	u16  status = (u16)staterr;
   2197   1.28   msaitoh 	u8   errors = (u8)(staterr >> 24);
   2198    1.1   msaitoh #if 0
   2199   1.28   msaitoh 	bool sctp = false;
   2200    1.1   msaitoh 
   2201    1.1   msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2202    1.1   msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2203    1.8   msaitoh 		sctp = true;
   2204    1.1   msaitoh #endif
   2205    1.1   msaitoh 
   2206    1.8   msaitoh 	/* IPv4 checksum */
   2207    1.1   msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2208   1.95   msaitoh 		IXGBE_EVC_ADD(&stats->ipcs, 1);
   2209    1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2210    1.1   msaitoh 			/* IP Checksum Good */
   2211    1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2212    1.1   msaitoh 		} else {
   2213   1.95   msaitoh 			IXGBE_EVC_ADD(&stats->ipcs_bad, 1);
   2214    1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2215    1.1   msaitoh 		}
   2216    1.1   msaitoh 	}
   2217    1.8   msaitoh 	/* TCP/UDP/SCTP checksum */
   2218    1.1   msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2219   1.95   msaitoh 		IXGBE_EVC_ADD(&stats->l4cs, 1);
   2220    1.1   msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2221    1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2222    1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2223    1.1   msaitoh 		} else {
   2224   1.95   msaitoh 			IXGBE_EVC_ADD(&stats->l4cs_bad, 1);
   2225    1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2226    1.1   msaitoh 		}
   2227    1.1   msaitoh 	}
   2228   1.28   msaitoh } /* ixgbe_rx_checksum */
   2229    1.1   msaitoh 
   2230   1.28   msaitoh /************************************************************************
   2231   1.28   msaitoh  * ixgbe_dma_malloc
   2232   1.28   msaitoh  ************************************************************************/
   2233    1.1   msaitoh int
   2234  1.102   msaitoh ixgbe_dma_malloc(struct ixgbe_softc *sc, const bus_size_t size,
   2235    1.1   msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2236    1.1   msaitoh {
   2237  1.102   msaitoh 	device_t dev = sc->dev;
   2238   1.28   msaitoh 	int      r, rsegs;
   2239    1.1   msaitoh 
   2240   1.28   msaitoh 	r = ixgbe_dma_tag_create(
   2241  1.102   msaitoh 	     /*      parent */ sc->osdep.dmat,
   2242   1.28   msaitoh 	     /*   alignment */ DBA_ALIGN,
   2243   1.28   msaitoh 	     /*      bounds */ 0,
   2244   1.28   msaitoh 	     /*     maxsize */ size,
   2245   1.28   msaitoh 	     /*   nsegments */ 1,
   2246   1.28   msaitoh 	     /*  maxsegsize */ size,
   2247   1.28   msaitoh 	     /*       flags */ BUS_DMA_ALLOCNOW,
   2248    1.1   msaitoh 			       &dma->dma_tag);
   2249    1.1   msaitoh 	if (r != 0) {
   2250    1.1   msaitoh 		aprint_error_dev(dev,
   2251   1.44   msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
   2252   1.44   msaitoh 		    r);
   2253    1.1   msaitoh 		goto fail_0;
   2254    1.1   msaitoh 	}
   2255    1.1   msaitoh 
   2256   1.28   msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
   2257   1.28   msaitoh 	    dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
   2258   1.28   msaitoh 	    &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2259    1.1   msaitoh 	if (r != 0) {
   2260    1.1   msaitoh 		aprint_error_dev(dev,
   2261    1.1   msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2262    1.1   msaitoh 		goto fail_1;
   2263    1.1   msaitoh 	}
   2264    1.1   msaitoh 
   2265    1.1   msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2266   1.76       ryo 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   2267    1.1   msaitoh 	if (r != 0) {
   2268    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2269    1.1   msaitoh 		    __func__, r);
   2270    1.1   msaitoh 		goto fail_2;
   2271    1.1   msaitoh 	}
   2272    1.1   msaitoh 
   2273    1.1   msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2274    1.1   msaitoh 	if (r != 0) {
   2275    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2276    1.1   msaitoh 		    __func__, r);
   2277    1.1   msaitoh 		goto fail_3;
   2278    1.1   msaitoh 	}
   2279    1.1   msaitoh 
   2280   1.28   msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
   2281   1.28   msaitoh 	    dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
   2282    1.1   msaitoh 	if (r != 0) {
   2283    1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2284    1.1   msaitoh 		    __func__, r);
   2285    1.1   msaitoh 		goto fail_4;
   2286    1.1   msaitoh 	}
   2287    1.1   msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2288    1.1   msaitoh 	dma->dma_size = size;
   2289    1.1   msaitoh 	return 0;
   2290    1.1   msaitoh fail_4:
   2291    1.1   msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2292    1.1   msaitoh fail_3:
   2293    1.1   msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2294    1.1   msaitoh fail_2:
   2295    1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2296    1.1   msaitoh fail_1:
   2297    1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2298    1.1   msaitoh fail_0:
   2299    1.1   msaitoh 
   2300   1.28   msaitoh 	return (r);
   2301   1.28   msaitoh } /* ixgbe_dma_malloc */
   2302   1.28   msaitoh 
   2303   1.28   msaitoh /************************************************************************
   2304   1.28   msaitoh  * ixgbe_dma_free
   2305   1.28   msaitoh  ************************************************************************/
   2306    1.3   msaitoh void
   2307  1.102   msaitoh ixgbe_dma_free(struct ixgbe_softc *sc, struct ixgbe_dma_alloc *dma)
   2308    1.1   msaitoh {
   2309    1.1   msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2310    1.1   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2311    1.1   msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2312   1.98    bouyer 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, dma->dma_size);
   2313    1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2314    1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2315   1.28   msaitoh } /* ixgbe_dma_free */
   2316    1.1   msaitoh 
   2317    1.1   msaitoh 
   2318   1.28   msaitoh /************************************************************************
   2319   1.28   msaitoh  * ixgbe_allocate_queues
   2320    1.1   msaitoh  *
   2321   1.28   msaitoh  *   Allocate memory for the transmit and receive rings, and then
   2322   1.28   msaitoh  *   the descriptors associated with each, called only once at attach.
   2323   1.28   msaitoh  ************************************************************************/
   2324    1.1   msaitoh int
   2325  1.102   msaitoh ixgbe_allocate_queues(struct ixgbe_softc *sc)
   2326    1.1   msaitoh {
   2327  1.102   msaitoh 	device_t	dev = sc->dev;
   2328    1.1   msaitoh 	struct ix_queue	*que;
   2329    1.1   msaitoh 	struct tx_ring	*txr;
   2330    1.1   msaitoh 	struct rx_ring	*rxr;
   2331   1.28   msaitoh 	int             rsize, tsize, error = IXGBE_SUCCESS;
   2332   1.28   msaitoh 	int             txconf = 0, rxconf = 0;
   2333    1.1   msaitoh 
   2334   1.28   msaitoh 	/* First, allocate the top level queue structs */
   2335  1.102   msaitoh 	sc->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
   2336  1.102   msaitoh 	    sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2337    1.1   msaitoh 
   2338   1.28   msaitoh 	/* Second, allocate the TX ring struct memory */
   2339  1.102   msaitoh 	sc->tx_rings = malloc(sizeof(struct tx_ring) *
   2340  1.102   msaitoh 	    sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2341    1.1   msaitoh 
   2342   1.28   msaitoh 	/* Third, allocate the RX ring */
   2343  1.102   msaitoh 	sc->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
   2344  1.102   msaitoh 	    sc->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2345    1.1   msaitoh 
   2346    1.1   msaitoh 	/* For the ring itself */
   2347  1.102   msaitoh 	tsize = roundup2(sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
   2348   1.28   msaitoh 	    DBA_ALIGN);
   2349    1.1   msaitoh 
   2350    1.1   msaitoh 	/*
   2351    1.1   msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2352    1.1   msaitoh 	 * possibility that things fail midcourse and we need to
   2353    1.1   msaitoh 	 * undo memory gracefully
   2354   1.28   msaitoh 	 */
   2355  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, txconf++) {
   2356    1.1   msaitoh 		/* Set up some basics */
   2357  1.102   msaitoh 		txr = &sc->tx_rings[i];
   2358  1.102   msaitoh 		txr->sc = sc;
   2359   1.28   msaitoh 		txr->txr_interq = NULL;
   2360   1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2361    1.5   msaitoh #ifdef PCI_IOV
   2362  1.102   msaitoh 		txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
   2363   1.28   msaitoh 		    i);
   2364    1.5   msaitoh #else
   2365    1.1   msaitoh 		txr->me = i;
   2366    1.5   msaitoh #endif
   2367  1.102   msaitoh 		txr->num_desc = sc->num_tx_desc;
   2368    1.1   msaitoh 
   2369    1.1   msaitoh 		/* Initialize the TX side lock */
   2370    1.1   msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2371    1.1   msaitoh 
   2372  1.102   msaitoh 		if (ixgbe_dma_malloc(sc, tsize, &txr->txdma,
   2373   1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2374    1.1   msaitoh 			aprint_error_dev(dev,
   2375    1.1   msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2376    1.1   msaitoh 			error = ENOMEM;
   2377    1.1   msaitoh 			goto err_tx_desc;
   2378    1.1   msaitoh 		}
   2379    1.1   msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2380    1.1   msaitoh 		bzero((void *)txr->tx_base, tsize);
   2381    1.1   msaitoh 
   2382   1.28   msaitoh 		/* Now allocate transmit buffers for the ring */
   2383   1.28   msaitoh 		if (ixgbe_allocate_transmit_buffers(txr)) {
   2384    1.1   msaitoh 			aprint_error_dev(dev,
   2385    1.1   msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2386    1.1   msaitoh 			error = ENOMEM;
   2387    1.1   msaitoh 			goto err_tx_desc;
   2388   1.63   msaitoh 		}
   2389  1.102   msaitoh 		if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   2390   1.28   msaitoh 			/* Allocate a buf ring */
   2391   1.28   msaitoh 			txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
   2392   1.28   msaitoh 			if (txr->txr_interq == NULL) {
   2393   1.28   msaitoh 				aprint_error_dev(dev,
   2394   1.28   msaitoh 				    "Critical Failure setting up buf ring\n");
   2395   1.28   msaitoh 				error = ENOMEM;
   2396   1.28   msaitoh 				goto err_tx_desc;
   2397   1.28   msaitoh 			}
   2398   1.28   msaitoh 		}
   2399    1.1   msaitoh 	}
   2400    1.1   msaitoh 
   2401    1.1   msaitoh 	/*
   2402    1.1   msaitoh 	 * Next the RX queues...
   2403   1.53   msaitoh 	 */
   2404  1.102   msaitoh 	rsize = roundup2(sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
   2405   1.28   msaitoh 	    DBA_ALIGN);
   2406  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++, rxconf++) {
   2407  1.102   msaitoh 		rxr = &sc->rx_rings[i];
   2408    1.1   msaitoh 		/* Set up some basics */
   2409  1.102   msaitoh 		rxr->sc = sc;
   2410    1.5   msaitoh #ifdef PCI_IOV
   2411   1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2412  1.102   msaitoh 		rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
   2413   1.28   msaitoh 		    i);
   2414    1.5   msaitoh #else
   2415    1.1   msaitoh 		rxr->me = i;
   2416    1.5   msaitoh #endif
   2417  1.102   msaitoh 		rxr->num_desc = sc->num_rx_desc;
   2418    1.1   msaitoh 
   2419    1.1   msaitoh 		/* Initialize the RX side lock */
   2420    1.1   msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2421    1.1   msaitoh 
   2422  1.102   msaitoh 		if (ixgbe_dma_malloc(sc, rsize, &rxr->rxdma,
   2423   1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2424    1.1   msaitoh 			aprint_error_dev(dev,
   2425    1.1   msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2426    1.1   msaitoh 			error = ENOMEM;
   2427    1.1   msaitoh 			goto err_rx_desc;
   2428    1.1   msaitoh 		}
   2429    1.1   msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2430    1.1   msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2431    1.1   msaitoh 
   2432   1.28   msaitoh 		/* Allocate receive buffers for the ring */
   2433    1.1   msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2434    1.1   msaitoh 			aprint_error_dev(dev,
   2435    1.1   msaitoh 			    "Critical Failure setting up receive buffers\n");
   2436    1.1   msaitoh 			error = ENOMEM;
   2437    1.1   msaitoh 			goto err_rx_desc;
   2438    1.1   msaitoh 		}
   2439    1.1   msaitoh 	}
   2440    1.1   msaitoh 
   2441    1.1   msaitoh 	/*
   2442   1.28   msaitoh 	 * Finally set up the queue holding structs
   2443   1.28   msaitoh 	 */
   2444  1.102   msaitoh 	for (int i = 0; i < sc->num_queues; i++) {
   2445  1.102   msaitoh 		que = &sc->queues[i];
   2446  1.102   msaitoh 		que->sc = sc;
   2447    1.3   msaitoh 		que->me = i;
   2448  1.102   msaitoh 		que->txr = &sc->tx_rings[i];
   2449  1.102   msaitoh 		que->rxr = &sc->rx_rings[i];
   2450   1.33  knakahar 
   2451   1.37  knakahar 		mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
   2452   1.37  knakahar 		que->disabled_count = 0;
   2453    1.1   msaitoh 	}
   2454    1.1   msaitoh 
   2455    1.1   msaitoh 	return (0);
   2456    1.1   msaitoh 
   2457    1.1   msaitoh err_rx_desc:
   2458  1.102   msaitoh 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
   2459  1.102   msaitoh 		ixgbe_dma_free(sc, &rxr->rxdma);
   2460    1.1   msaitoh err_tx_desc:
   2461  1.102   msaitoh 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
   2462  1.102   msaitoh 		ixgbe_dma_free(sc, &txr->txdma);
   2463  1.102   msaitoh 	free(sc->rx_rings, M_DEVBUF);
   2464  1.102   msaitoh 	free(sc->tx_rings, M_DEVBUF);
   2465  1.102   msaitoh 	free(sc->queues, M_DEVBUF);
   2466    1.1   msaitoh 	return (error);
   2467   1.28   msaitoh } /* ixgbe_allocate_queues */
   2468   1.60   msaitoh 
   2469   1.60   msaitoh /************************************************************************
   2470   1.60   msaitoh  * ixgbe_free_queues
   2471   1.60   msaitoh  *
   2472   1.60   msaitoh  *   Free descriptors for the transmit and receive rings, and then
   2473   1.60   msaitoh  *   the memory associated with each.
   2474   1.60   msaitoh  ************************************************************************/
   2475   1.60   msaitoh void
   2476  1.102   msaitoh ixgbe_free_queues(struct ixgbe_softc *sc)
   2477   1.60   msaitoh {
   2478   1.60   msaitoh 	struct ix_queue *que;
   2479   1.60   msaitoh 	int i;
   2480   1.60   msaitoh 
   2481  1.102   msaitoh 	ixgbe_free_transmit_structures(sc);
   2482  1.102   msaitoh 	ixgbe_free_receive_structures(sc);
   2483  1.102   msaitoh 	for (i = 0; i < sc->num_queues; i++) {
   2484  1.102   msaitoh 		que = &sc->queues[i];
   2485   1.60   msaitoh 		mutex_destroy(&que->dc_mtx);
   2486   1.60   msaitoh 	}
   2487  1.102   msaitoh 	free(sc->queues, M_DEVBUF);
   2488   1.60   msaitoh } /* ixgbe_free_queues */
   2489