Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.75
      1  1.75   msaitoh /* $NetBSD: ix_txrx.c,v 1.75 2021/05/18 05:29:15 msaitoh Exp $ */
      2  1.28   msaitoh 
      3   1.1   msaitoh /******************************************************************************
      4   1.1   msaitoh 
      5  1.28   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      6   1.1   msaitoh   All rights reserved.
      7  1.28   msaitoh 
      8  1.28   msaitoh   Redistribution and use in source and binary forms, with or without
      9   1.1   msaitoh   modification, are permitted provided that the following conditions are met:
     10  1.28   msaitoh 
     11  1.28   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     12   1.1   msaitoh       this list of conditions and the following disclaimer.
     13  1.28   msaitoh 
     14  1.28   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     15  1.28   msaitoh       notice, this list of conditions and the following disclaimer in the
     16   1.1   msaitoh       documentation and/or other materials provided with the distribution.
     17  1.28   msaitoh 
     18  1.28   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     19  1.28   msaitoh       contributors may be used to endorse or promote products derived from
     20   1.1   msaitoh       this software without specific prior written permission.
     21  1.28   msaitoh 
     22   1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23  1.28   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  1.28   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  1.28   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26  1.28   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  1.28   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  1.28   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  1.28   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  1.28   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     33   1.1   msaitoh 
     34   1.1   msaitoh ******************************************************************************/
     35  1.39   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
     36  1.28   msaitoh 
     37   1.1   msaitoh /*
     38   1.1   msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39   1.1   msaitoh  * All rights reserved.
     40   1.1   msaitoh  *
     41   1.1   msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     42   1.1   msaitoh  * by Coyote Point Systems, Inc.
     43   1.1   msaitoh  *
     44   1.1   msaitoh  * Redistribution and use in source and binary forms, with or without
     45   1.1   msaitoh  * modification, are permitted provided that the following conditions
     46   1.1   msaitoh  * are met:
     47   1.1   msaitoh  * 1. Redistributions of source code must retain the above copyright
     48   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer.
     49   1.1   msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer in the
     51   1.1   msaitoh  *    documentation and/or other materials provided with the distribution.
     52   1.1   msaitoh  *
     53   1.1   msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54   1.1   msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55   1.1   msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56   1.1   msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57   1.1   msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58   1.1   msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59   1.1   msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60   1.1   msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61   1.1   msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62   1.1   msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63   1.1   msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     64   1.1   msaitoh  */
     65   1.1   msaitoh 
     66  1.71   msaitoh #include <sys/cdefs.h>
     67  1.75   msaitoh __KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.75 2021/05/18 05:29:15 msaitoh Exp $");
     68  1.71   msaitoh 
     69   1.8   msaitoh #include "opt_inet.h"
     70   1.8   msaitoh #include "opt_inet6.h"
     71   1.8   msaitoh 
     72   1.1   msaitoh #include "ixgbe.h"
     73   1.1   msaitoh 
     74   1.1   msaitoh /*
     75  1.28   msaitoh  * HW RSC control:
     76  1.28   msaitoh  *  this feature only works with
     77  1.28   msaitoh  *  IPv4, and only on 82599 and later.
     78  1.28   msaitoh  *  Also this will cause IP forwarding to
     79  1.28   msaitoh  *  fail and that can't be controlled by
     80  1.28   msaitoh  *  the stack as LRO can. For all these
     81  1.28   msaitoh  *  reasons I've deemed it best to leave
     82  1.28   msaitoh  *  this off and not bother with a tuneable
     83  1.28   msaitoh  *  interface, this would need to be compiled
     84  1.28   msaitoh  *  to enable.
     85  1.28   msaitoh  */
     86   1.1   msaitoh static bool ixgbe_rsc_enable = FALSE;
     87   1.1   msaitoh 
     88   1.3   msaitoh /*
     89  1.28   msaitoh  * For Flow Director: this is the
     90  1.28   msaitoh  * number of TX packets we sample
     91  1.28   msaitoh  * for the filter pool, this means
     92  1.28   msaitoh  * every 20th packet will be probed.
     93  1.28   msaitoh  *
     94  1.28   msaitoh  * This feature can be disabled by
     95  1.28   msaitoh  * setting this to 0.
     96  1.28   msaitoh  */
     97   1.3   msaitoh static int atr_sample_rate = 20;
     98   1.3   msaitoh 
     99  1.28   msaitoh /************************************************************************
    100   1.3   msaitoh  *  Local Function prototypes
    101  1.28   msaitoh  ************************************************************************/
    102  1.28   msaitoh static void          ixgbe_setup_transmit_ring(struct tx_ring *);
    103  1.28   msaitoh static void          ixgbe_free_transmit_buffers(struct tx_ring *);
    104  1.28   msaitoh static int           ixgbe_setup_receive_ring(struct rx_ring *);
    105  1.28   msaitoh static void          ixgbe_free_receive_buffers(struct rx_ring *);
    106  1.28   msaitoh static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
    107  1.28   msaitoh                                        struct ixgbe_hw_stats *);
    108  1.28   msaitoh static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
    109  1.38  knakahar static void          ixgbe_drain(struct ifnet *, struct tx_ring *);
    110  1.28   msaitoh static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
    111  1.28   msaitoh static int           ixgbe_tx_ctx_setup(struct tx_ring *,
    112  1.28   msaitoh                                         struct mbuf *, u32 *, u32 *);
    113  1.28   msaitoh static int           ixgbe_tso_setup(struct tx_ring *,
    114  1.28   msaitoh                                      struct mbuf *, u32 *, u32 *);
    115   1.1   msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    116   1.1   msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    117  1.28   msaitoh                                     struct mbuf *, u32);
    118  1.28   msaitoh static int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
    119  1.28   msaitoh                                       struct ixgbe_dma_alloc *, int);
    120  1.28   msaitoh static void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    121   1.1   msaitoh 
    122   1.1   msaitoh static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    123   1.1   msaitoh 
    124  1.28   msaitoh /************************************************************************
    125  1.28   msaitoh  * ixgbe_legacy_start_locked - Transmit entry point
    126   1.1   msaitoh  *
    127  1.28   msaitoh  *   Called by the stack to initiate a transmit.
    128  1.28   msaitoh  *   The driver will remain in this routine as long as there are
    129  1.28   msaitoh  *   packets to transmit and transmit resources are available.
    130  1.28   msaitoh  *   In case resources are not available, the stack is notified
    131  1.28   msaitoh  *   and the packet is requeued.
    132  1.28   msaitoh  ************************************************************************/
    133  1.28   msaitoh int
    134  1.28   msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    135   1.1   msaitoh {
    136  1.45   msaitoh 	int rc;
    137   1.1   msaitoh 	struct mbuf    *m_head;
    138   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    139   1.1   msaitoh 
    140   1.1   msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    141   1.1   msaitoh 
    142  1.52   msaitoh 	if (adapter->link_active != LINK_STATE_UP) {
    143  1.38  knakahar 		/*
    144  1.38  knakahar 		 * discard all packets buffered in IFQ to avoid
    145  1.38  knakahar 		 * sending old packets at next link up timing.
    146  1.38  knakahar 		 */
    147  1.38  knakahar 		ixgbe_drain(ifp, txr);
    148  1.38  knakahar 		return (ENETDOWN);
    149  1.38  knakahar 	}
    150   1.1   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    151  1.28   msaitoh 		return (ENETDOWN);
    152  1.47   msaitoh 	if (txr->txr_no_space)
    153  1.47   msaitoh 		return (ENETDOWN);
    154  1.58   msaitoh 
    155   1.1   msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    156   1.1   msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    157   1.1   msaitoh 			break;
    158   1.1   msaitoh 
    159   1.1   msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    160   1.1   msaitoh 		if (m_head == NULL)
    161   1.1   msaitoh 			break;
    162   1.1   msaitoh 
    163   1.1   msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    164   1.1   msaitoh 			break;
    165   1.1   msaitoh 		}
    166   1.1   msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    167   1.1   msaitoh 		if (rc != 0) {
    168   1.1   msaitoh 			m_freem(m_head);
    169   1.1   msaitoh 			continue;
    170   1.1   msaitoh 		}
    171   1.1   msaitoh 
    172   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    173  1.48   msaitoh 		bpf_mtap(ifp, m_head, BPF_D_OUT);
    174   1.1   msaitoh 	}
    175  1.44   msaitoh 
    176  1.28   msaitoh 	return IXGBE_SUCCESS;
    177  1.28   msaitoh } /* ixgbe_legacy_start_locked */
    178  1.28   msaitoh 
    179  1.28   msaitoh /************************************************************************
    180  1.28   msaitoh  * ixgbe_legacy_start
    181  1.28   msaitoh  *
    182  1.28   msaitoh  *   Called by the stack, this always uses the first tx ring,
    183  1.28   msaitoh  *   and should not be used with multiqueue tx enabled.
    184  1.28   msaitoh  ************************************************************************/
    185   1.1   msaitoh void
    186  1.28   msaitoh ixgbe_legacy_start(struct ifnet *ifp)
    187   1.1   msaitoh {
    188   1.1   msaitoh 	struct adapter *adapter = ifp->if_softc;
    189  1.28   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    190   1.1   msaitoh 
    191   1.1   msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    192   1.1   msaitoh 		IXGBE_TX_LOCK(txr);
    193  1.28   msaitoh 		ixgbe_legacy_start_locked(ifp, txr);
    194   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    195   1.1   msaitoh 	}
    196  1.28   msaitoh } /* ixgbe_legacy_start */
    197   1.1   msaitoh 
    198  1.28   msaitoh /************************************************************************
    199  1.28   msaitoh  * ixgbe_mq_start - Multiqueue Transmit Entry Point
    200  1.28   msaitoh  *
    201  1.28   msaitoh  *   (if_transmit function)
    202  1.28   msaitoh  ************************************************************************/
    203   1.1   msaitoh int
    204   1.1   msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    205   1.1   msaitoh {
    206   1.1   msaitoh 	struct adapter	*adapter = ifp->if_softc;
    207   1.1   msaitoh 	struct tx_ring	*txr;
    208  1.70   msaitoh 	int		i;
    209  1.28   msaitoh #ifdef RSS
    210   1.1   msaitoh 	uint32_t bucket_id;
    211   1.1   msaitoh #endif
    212   1.1   msaitoh 
    213   1.1   msaitoh 	/*
    214   1.1   msaitoh 	 * When doing RSS, map it to the same outbound queue
    215   1.1   msaitoh 	 * as the incoming flow would be mapped to.
    216   1.1   msaitoh 	 *
    217   1.1   msaitoh 	 * If everything is setup correctly, it should be the
    218   1.1   msaitoh 	 * same bucket that the current CPU we're on is.
    219   1.1   msaitoh 	 */
    220  1.28   msaitoh #ifdef RSS
    221   1.1   msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    222  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
    223  1.28   msaitoh 		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
    224  1.28   msaitoh 		    &bucket_id) == 0)) {
    225   1.1   msaitoh 			i = bucket_id % adapter->num_queues;
    226   1.8   msaitoh #ifdef IXGBE_DEBUG
    227   1.8   msaitoh 			if (bucket_id > adapter->num_queues)
    228  1.28   msaitoh 				if_printf(ifp,
    229  1.28   msaitoh 				    "bucket_id (%d) > num_queues (%d)\n",
    230  1.28   msaitoh 				    bucket_id, adapter->num_queues);
    231   1.8   msaitoh #endif
    232   1.8   msaitoh 		} else
    233   1.1   msaitoh 			i = m->m_pkthdr.flowid % adapter->num_queues;
    234   1.3   msaitoh 	} else
    235  1.28   msaitoh #endif /* 0 */
    236  1.51  knakahar 		i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues;
    237   1.3   msaitoh 
    238   1.3   msaitoh 	/* Check for a hung queue and pick alternative */
    239  1.54   msaitoh 	if (((1ULL << i) & adapter->active_queues) == 0)
    240  1.18   msaitoh 		i = ffs64(adapter->active_queues);
    241   1.1   msaitoh 
    242   1.1   msaitoh 	txr = &adapter->tx_rings[i];
    243   1.1   msaitoh 
    244  1.50   msaitoh 	if (__predict_false(!pcq_put(txr->txr_interq, m))) {
    245  1.18   msaitoh 		m_freem(m);
    246  1.18   msaitoh 		txr->pcq_drops.ev_count++;
    247  1.50   msaitoh 		return ENOBUFS;
    248  1.18   msaitoh 	}
    249   1.1   msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    250   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    251   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    252  1.34  knakahar 	} else {
    253  1.34  knakahar 		if (adapter->txrx_use_workqueue) {
    254  1.44   msaitoh 			u_int *enqueued;
    255  1.44   msaitoh 
    256  1.34  knakahar 			/*
    257  1.34  knakahar 			 * This function itself is not called in interrupt
    258  1.34  knakahar 			 * context, however it can be called in fast softint
    259  1.34  knakahar 			 * context right after receiving forwarding packets.
    260  1.34  knakahar 			 * So, it is required to protect workqueue from twice
    261  1.34  knakahar 			 * enqueuing when the machine uses both spontaneous
    262  1.34  knakahar 			 * packets and forwarding packets.
    263  1.34  knakahar 			 */
    264  1.44   msaitoh 			enqueued = percpu_getref(adapter->txr_wq_enqueued);
    265  1.34  knakahar 			if (*enqueued == 0) {
    266  1.34  knakahar 				*enqueued = 1;
    267  1.34  knakahar 				percpu_putref(adapter->txr_wq_enqueued);
    268  1.44   msaitoh 				workqueue_enqueue(adapter->txr_wq,
    269  1.44   msaitoh 				    &txr->wq_cookie, curcpu());
    270  1.34  knakahar 			} else
    271  1.34  knakahar 				percpu_putref(adapter->txr_wq_enqueued);
    272  1.56  knakahar 		} else {
    273  1.56  knakahar 			kpreempt_disable();
    274  1.34  knakahar 			softint_schedule(txr->txr_si);
    275  1.56  knakahar 			kpreempt_enable();
    276  1.56  knakahar 		}
    277  1.34  knakahar 	}
    278   1.1   msaitoh 
    279   1.1   msaitoh 	return (0);
    280  1.28   msaitoh } /* ixgbe_mq_start */
    281   1.1   msaitoh 
    282  1.28   msaitoh /************************************************************************
    283  1.28   msaitoh  * ixgbe_mq_start_locked
    284  1.28   msaitoh  ************************************************************************/
    285   1.1   msaitoh int
    286   1.1   msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    287   1.1   msaitoh {
    288  1.28   msaitoh 	struct mbuf    *next;
    289  1.28   msaitoh 	int            enqueued = 0, err = 0;
    290   1.1   msaitoh 
    291  1.52   msaitoh 	if (txr->adapter->link_active != LINK_STATE_UP) {
    292  1.38  knakahar 		/*
    293  1.38  knakahar 		 * discard all packets buffered in txr_interq to avoid
    294  1.38  knakahar 		 * sending old packets at next link up timing.
    295  1.38  knakahar 		 */
    296  1.38  knakahar 		ixgbe_drain(ifp, txr);
    297  1.38  knakahar 		return (ENETDOWN);
    298  1.38  knakahar 	}
    299  1.28   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    300  1.28   msaitoh 		return (ENETDOWN);
    301  1.47   msaitoh 	if (txr->txr_no_space)
    302  1.47   msaitoh 		return (ENETDOWN);
    303   1.1   msaitoh 
    304   1.1   msaitoh 	/* Process the queue */
    305  1.18   msaitoh 	while ((next = pcq_get(txr->txr_interq)) != NULL) {
    306  1.18   msaitoh 		if ((err = ixgbe_xmit(txr, next)) != 0) {
    307  1.18   msaitoh 			m_freem(next);
    308  1.18   msaitoh 			/* All errors are counted in ixgbe_xmit() */
    309   1.1   msaitoh 			break;
    310   1.1   msaitoh 		}
    311   1.1   msaitoh 		enqueued++;
    312   1.3   msaitoh #if __FreeBSD_version >= 1100036
    313   1.4   msaitoh 		/*
    314   1.4   msaitoh 		 * Since we're looking at the tx ring, we can check
    315   1.4   msaitoh 		 * to see if we're a VF by examing our tail register
    316   1.4   msaitoh 		 * address.
    317   1.4   msaitoh 		 */
    318  1.28   msaitoh 		if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
    319  1.28   msaitoh 		    (next->m_flags & M_MCAST))
    320   1.3   msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    321   1.3   msaitoh #endif
    322   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    323  1.48   msaitoh 		bpf_mtap(ifp, next, BPF_D_OUT);
    324   1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    325   1.1   msaitoh 			break;
    326   1.1   msaitoh 	}
    327   1.1   msaitoh 
    328  1.28   msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
    329   1.1   msaitoh 		ixgbe_txeof(txr);
    330   1.1   msaitoh 
    331   1.1   msaitoh 	return (err);
    332  1.28   msaitoh } /* ixgbe_mq_start_locked */
    333   1.1   msaitoh 
    334  1.28   msaitoh /************************************************************************
    335  1.28   msaitoh  * ixgbe_deferred_mq_start
    336  1.28   msaitoh  *
    337  1.34  knakahar  *   Called from a softint and workqueue (indirectly) to drain queued
    338  1.34  knakahar  *   transmit packets.
    339  1.28   msaitoh  ************************************************************************/
    340   1.1   msaitoh void
    341  1.18   msaitoh ixgbe_deferred_mq_start(void *arg)
    342   1.1   msaitoh {
    343   1.1   msaitoh 	struct tx_ring *txr = arg;
    344   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    345  1.28   msaitoh 	struct ifnet   *ifp = adapter->ifp;
    346   1.1   msaitoh 
    347   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    348  1.18   msaitoh 	if (pcq_peek(txr->txr_interq) != NULL)
    349   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    350   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    351  1.28   msaitoh } /* ixgbe_deferred_mq_start */
    352   1.3   msaitoh 
    353  1.28   msaitoh /************************************************************************
    354  1.34  knakahar  * ixgbe_deferred_mq_start_work
    355  1.34  knakahar  *
    356  1.34  knakahar  *   Called from a workqueue to drain queued transmit packets.
    357  1.34  knakahar  ************************************************************************/
    358  1.34  knakahar void
    359  1.34  knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
    360  1.34  knakahar {
    361  1.34  knakahar 	struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
    362  1.34  knakahar 	struct adapter *adapter = txr->adapter;
    363  1.34  knakahar 	u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
    364  1.34  knakahar 	*enqueued = 0;
    365  1.34  knakahar 	percpu_putref(adapter->txr_wq_enqueued);
    366  1.34  knakahar 
    367  1.34  knakahar 	ixgbe_deferred_mq_start(txr);
    368  1.34  knakahar } /* ixgbe_deferred_mq_start */
    369  1.34  knakahar 
    370  1.38  knakahar /************************************************************************
    371  1.38  knakahar  * ixgbe_drain_all
    372  1.38  knakahar  ************************************************************************/
    373  1.38  knakahar void
    374  1.38  knakahar ixgbe_drain_all(struct adapter *adapter)
    375  1.38  knakahar {
    376  1.38  knakahar 	struct ifnet *ifp = adapter->ifp;
    377  1.38  knakahar 	struct ix_queue *que = adapter->queues;
    378  1.38  knakahar 
    379  1.38  knakahar 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    380  1.38  knakahar 		struct tx_ring  *txr = que->txr;
    381  1.38  knakahar 
    382  1.38  knakahar 		IXGBE_TX_LOCK(txr);
    383  1.38  knakahar 		ixgbe_drain(ifp, txr);
    384  1.38  knakahar 		IXGBE_TX_UNLOCK(txr);
    385  1.38  knakahar 	}
    386  1.38  knakahar }
    387  1.34  knakahar 
    388  1.34  knakahar /************************************************************************
    389  1.28   msaitoh  * ixgbe_xmit
    390   1.1   msaitoh  *
    391  1.28   msaitoh  *   Maps the mbufs to tx descriptors, allowing the
    392  1.28   msaitoh  *   TX engine to transmit the packets.
    393   1.1   msaitoh  *
    394  1.28   msaitoh  *   Return 0 on success, positive on failure
    395  1.28   msaitoh  ************************************************************************/
    396   1.1   msaitoh static int
    397   1.1   msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    398   1.1   msaitoh {
    399  1.28   msaitoh 	struct adapter          *adapter = txr->adapter;
    400  1.28   msaitoh 	struct ixgbe_tx_buf     *txbuf;
    401   1.1   msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    402  1.28   msaitoh 	struct ifnet	        *ifp = adapter->ifp;
    403  1.28   msaitoh 	int                     i, j, error;
    404  1.28   msaitoh 	int                     first;
    405  1.28   msaitoh 	u32                     olinfo_status = 0, cmd_type_len;
    406  1.28   msaitoh 	bool                    remap = TRUE;
    407  1.28   msaitoh 	bus_dmamap_t            map;
    408   1.1   msaitoh 
    409   1.1   msaitoh 	/* Basic descriptor defines */
    410  1.28   msaitoh 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    411   1.1   msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    412   1.1   msaitoh 
    413  1.29  knakahar 	if (vlan_has_tag(m_head))
    414  1.28   msaitoh 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    415   1.1   msaitoh 
    416  1.28   msaitoh 	/*
    417  1.28   msaitoh 	 * Important to capture the first descriptor
    418  1.28   msaitoh 	 * used because it will contain the index of
    419  1.28   msaitoh 	 * the one we tell the hardware to report back
    420  1.28   msaitoh 	 */
    421  1.28   msaitoh 	first = txr->next_avail_desc;
    422   1.1   msaitoh 	txbuf = &txr->tx_buffers[first];
    423   1.1   msaitoh 	map = txbuf->map;
    424   1.1   msaitoh 
    425   1.1   msaitoh 	/*
    426   1.1   msaitoh 	 * Map the packet for DMA.
    427   1.1   msaitoh 	 */
    428  1.22   msaitoh retry:
    429  1.28   msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
    430  1.28   msaitoh 	    BUS_DMA_NOWAIT);
    431   1.1   msaitoh 
    432   1.1   msaitoh 	if (__predict_false(error)) {
    433  1.22   msaitoh 		struct mbuf *m;
    434   1.1   msaitoh 
    435   1.1   msaitoh 		switch (error) {
    436   1.1   msaitoh 		case EAGAIN:
    437  1.35   msaitoh 			txr->q_eagain_tx_dma_setup++;
    438   1.1   msaitoh 			return EAGAIN;
    439   1.1   msaitoh 		case ENOMEM:
    440  1.35   msaitoh 			txr->q_enomem_tx_dma_setup++;
    441   1.1   msaitoh 			return EAGAIN;
    442   1.1   msaitoh 		case EFBIG:
    443  1.22   msaitoh 			/* Try it again? - one try */
    444  1.22   msaitoh 			if (remap == TRUE) {
    445  1.22   msaitoh 				remap = FALSE;
    446  1.22   msaitoh 				/*
    447  1.22   msaitoh 				 * XXX: m_defrag will choke on
    448  1.22   msaitoh 				 * non-MCLBYTES-sized clusters
    449  1.22   msaitoh 				 */
    450  1.35   msaitoh 				txr->q_efbig_tx_dma_setup++;
    451  1.22   msaitoh 				m = m_defrag(m_head, M_NOWAIT);
    452  1.22   msaitoh 				if (m == NULL) {
    453  1.35   msaitoh 					txr->q_mbuf_defrag_failed++;
    454  1.22   msaitoh 					return ENOBUFS;
    455  1.22   msaitoh 				}
    456  1.22   msaitoh 				m_head = m;
    457  1.22   msaitoh 				goto retry;
    458  1.22   msaitoh 			} else {
    459  1.35   msaitoh 				txr->q_efbig2_tx_dma_setup++;
    460  1.22   msaitoh 				return error;
    461  1.22   msaitoh 			}
    462   1.1   msaitoh 		case EINVAL:
    463  1.35   msaitoh 			txr->q_einval_tx_dma_setup++;
    464   1.1   msaitoh 			return error;
    465   1.1   msaitoh 		default:
    466  1.35   msaitoh 			txr->q_other_tx_dma_setup++;
    467   1.1   msaitoh 			return error;
    468   1.1   msaitoh 		}
    469   1.1   msaitoh 	}
    470   1.1   msaitoh 
    471   1.1   msaitoh 	/* Make certain there are enough descriptors */
    472  1.10   msaitoh 	if (txr->tx_avail < (map->dm_nsegs + 2)) {
    473  1.47   msaitoh 		txr->txr_no_space = true;
    474   1.1   msaitoh 		txr->no_desc_avail.ev_count++;
    475   1.1   msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    476   1.1   msaitoh 		return EAGAIN;
    477   1.1   msaitoh 	}
    478   1.1   msaitoh 
    479   1.1   msaitoh 	/*
    480   1.4   msaitoh 	 * Set up the appropriate offload context
    481   1.4   msaitoh 	 * this will consume the first descriptor
    482   1.4   msaitoh 	 */
    483   1.1   msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    484   1.1   msaitoh 	if (__predict_false(error)) {
    485   1.1   msaitoh 		return (error);
    486   1.1   msaitoh 	}
    487   1.1   msaitoh 
    488  1.73  knakahar #ifdef IXGBE_FDIR
    489   1.1   msaitoh 	/* Do the flow director magic */
    490  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
    491  1.28   msaitoh 	    (txr->atr_sample) && (!adapter->fdir_reinit)) {
    492   1.1   msaitoh 		++txr->atr_count;
    493   1.1   msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    494   1.1   msaitoh 			ixgbe_atr(txr, m_head);
    495   1.1   msaitoh 			txr->atr_count = 0;
    496   1.1   msaitoh 		}
    497   1.1   msaitoh 	}
    498  1.73  knakahar #endif
    499   1.1   msaitoh 
    500   1.8   msaitoh 	olinfo_status |= IXGBE_ADVTXD_CC;
    501   1.1   msaitoh 	i = txr->next_avail_desc;
    502   1.1   msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    503   1.1   msaitoh 		bus_size_t seglen;
    504   1.1   msaitoh 		bus_addr_t segaddr;
    505   1.1   msaitoh 
    506   1.1   msaitoh 		txbuf = &txr->tx_buffers[i];
    507   1.1   msaitoh 		txd = &txr->tx_base[i];
    508   1.1   msaitoh 		seglen = map->dm_segs[j].ds_len;
    509   1.1   msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    510   1.1   msaitoh 
    511   1.1   msaitoh 		txd->read.buffer_addr = segaddr;
    512  1.40   msaitoh 		txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
    513   1.1   msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    514   1.1   msaitoh 
    515   1.1   msaitoh 		if (++i == txr->num_desc)
    516   1.1   msaitoh 			i = 0;
    517   1.1   msaitoh 	}
    518   1.1   msaitoh 
    519  1.28   msaitoh 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    520   1.1   msaitoh 	txr->tx_avail -= map->dm_nsegs;
    521   1.1   msaitoh 	txr->next_avail_desc = i;
    522   1.1   msaitoh 
    523   1.1   msaitoh 	txbuf->m_head = m_head;
    524   1.1   msaitoh 	/*
    525   1.4   msaitoh 	 * Here we swap the map so the last descriptor,
    526   1.4   msaitoh 	 * which gets the completion interrupt has the
    527   1.4   msaitoh 	 * real map, and the first descriptor gets the
    528   1.4   msaitoh 	 * unused map from this descriptor.
    529   1.4   msaitoh 	 */
    530   1.1   msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    531   1.1   msaitoh 	txbuf->map = map;
    532   1.1   msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    533   1.1   msaitoh 	    BUS_DMASYNC_PREWRITE);
    534   1.1   msaitoh 
    535  1.28   msaitoh 	/* Set the EOP descriptor that will be marked done */
    536  1.28   msaitoh 	txbuf = &txr->tx_buffers[first];
    537   1.1   msaitoh 	txbuf->eop = txd;
    538   1.1   msaitoh 
    539  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    540   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    541   1.1   msaitoh 	/*
    542   1.1   msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    543   1.1   msaitoh 	 * hardware that this frame is available to transmit.
    544   1.1   msaitoh 	 */
    545   1.1   msaitoh 	++txr->total_packets.ev_count;
    546   1.3   msaitoh 	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
    547   1.3   msaitoh 
    548  1.61   thorpej 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
    549  1.61   thorpej 	if_statadd_ref(nsr, if_obytes, m_head->m_pkthdr.len);
    550  1.23   msaitoh 	if (m_head->m_flags & M_MCAST)
    551  1.61   thorpej 		if_statinc_ref(nsr, if_omcasts);
    552  1.61   thorpej 	IF_STAT_PUTREF(ifp);
    553  1.23   msaitoh 
    554  1.45   msaitoh 	/* Mark queue as having work */
    555  1.45   msaitoh 	if (txr->busy == 0)
    556  1.45   msaitoh 		txr->busy = 1;
    557  1.45   msaitoh 
    558  1.28   msaitoh 	return (0);
    559  1.28   msaitoh } /* ixgbe_xmit */
    560   1.1   msaitoh 
    561  1.38  knakahar /************************************************************************
    562  1.38  knakahar  * ixgbe_drain
    563  1.38  knakahar  ************************************************************************/
    564  1.38  knakahar static void
    565  1.38  knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
    566  1.38  knakahar {
    567  1.38  knakahar 	struct mbuf *m;
    568  1.38  knakahar 
    569  1.38  knakahar 	IXGBE_TX_LOCK_ASSERT(txr);
    570  1.38  knakahar 
    571  1.38  knakahar 	if (txr->me == 0) {
    572  1.38  knakahar 		while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    573  1.38  knakahar 			IFQ_DEQUEUE(&ifp->if_snd, m);
    574  1.38  knakahar 			m_freem(m);
    575  1.38  knakahar 			IF_DROP(&ifp->if_snd);
    576  1.38  knakahar 		}
    577  1.38  knakahar 	}
    578  1.38  knakahar 
    579  1.38  knakahar 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
    580  1.38  knakahar 		m_freem(m);
    581  1.38  knakahar 		txr->pcq_drops.ev_count++;
    582  1.38  knakahar 	}
    583  1.38  knakahar }
    584  1.16   msaitoh 
    585  1.28   msaitoh /************************************************************************
    586  1.28   msaitoh  * ixgbe_allocate_transmit_buffers
    587   1.1   msaitoh  *
    588  1.28   msaitoh  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
    589  1.28   msaitoh  *   the information needed to transmit a packet on the wire. This is
    590  1.28   msaitoh  *   called only once at attach, setup is done every reset.
    591  1.28   msaitoh  ************************************************************************/
    592  1.28   msaitoh static int
    593   1.1   msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    594   1.1   msaitoh {
    595  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    596  1.28   msaitoh 	device_t            dev = adapter->dev;
    597   1.1   msaitoh 	struct ixgbe_tx_buf *txbuf;
    598  1.28   msaitoh 	int                 error, i;
    599   1.1   msaitoh 
    600   1.1   msaitoh 	/*
    601   1.1   msaitoh 	 * Setup DMA descriptor areas.
    602   1.1   msaitoh 	 */
    603  1.28   msaitoh 	error = ixgbe_dma_tag_create(
    604  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
    605  1.28   msaitoh 	         /*   alignment */ 1,
    606  1.28   msaitoh 	         /*      bounds */ 0,
    607  1.28   msaitoh 	         /*     maxsize */ IXGBE_TSO_SIZE,
    608  1.28   msaitoh 	         /*   nsegments */ adapter->num_segs,
    609  1.28   msaitoh 	         /*  maxsegsize */ PAGE_SIZE,
    610  1.28   msaitoh 	         /*       flags */ 0,
    611  1.28   msaitoh 	                           &txr->txtag);
    612  1.28   msaitoh 	if (error != 0) {
    613   1.1   msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    614   1.1   msaitoh 		goto fail;
    615   1.1   msaitoh 	}
    616   1.1   msaitoh 
    617  1.57       chs 	txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) *
    618  1.57       chs 	    adapter->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO);
    619   1.1   msaitoh 
    620  1.28   msaitoh 	/* Create the descriptor buffer dma maps */
    621   1.1   msaitoh 	txbuf = txr->tx_buffers;
    622   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
    623   1.1   msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    624   1.1   msaitoh 		if (error != 0) {
    625   1.1   msaitoh 			aprint_error_dev(dev,
    626   1.1   msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    627   1.1   msaitoh 			goto fail;
    628   1.1   msaitoh 		}
    629   1.1   msaitoh 	}
    630   1.1   msaitoh 
    631   1.1   msaitoh 	return 0;
    632   1.1   msaitoh fail:
    633   1.1   msaitoh 	/* We free all, it handles case where we are in the middle */
    634  1.15   msaitoh #if 0 /* XXX was FreeBSD */
    635   1.1   msaitoh 	ixgbe_free_transmit_structures(adapter);
    636  1.15   msaitoh #else
    637  1.15   msaitoh 	ixgbe_free_transmit_buffers(txr);
    638  1.15   msaitoh #endif
    639   1.1   msaitoh 	return (error);
    640  1.28   msaitoh } /* ixgbe_allocate_transmit_buffers */
    641   1.1   msaitoh 
    642  1.28   msaitoh /************************************************************************
    643  1.28   msaitoh  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
    644  1.28   msaitoh  ************************************************************************/
    645   1.1   msaitoh static void
    646   1.1   msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    647   1.1   msaitoh {
    648  1.28   msaitoh 	struct adapter        *adapter = txr->adapter;
    649  1.28   msaitoh 	struct ixgbe_tx_buf   *txbuf;
    650   1.1   msaitoh #ifdef DEV_NETMAP
    651   1.1   msaitoh 	struct netmap_adapter *na = NA(adapter->ifp);
    652  1.28   msaitoh 	struct netmap_slot    *slot;
    653   1.1   msaitoh #endif /* DEV_NETMAP */
    654   1.1   msaitoh 
    655   1.1   msaitoh 	/* Clear the old ring contents */
    656   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    657  1.28   msaitoh 
    658   1.1   msaitoh #ifdef DEV_NETMAP
    659  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
    660  1.28   msaitoh 		/*
    661  1.28   msaitoh 		 * (under lock): if in netmap mode, do some consistency
    662  1.28   msaitoh 		 * checks and set slot to entry 0 of the netmap ring.
    663  1.28   msaitoh 		 */
    664  1.28   msaitoh 		slot = netmap_reset(na, NR_TX, txr->me, 0);
    665  1.28   msaitoh 	}
    666   1.1   msaitoh #endif /* DEV_NETMAP */
    667  1.28   msaitoh 
    668   1.1   msaitoh 	bzero((void *)txr->tx_base,
    669  1.28   msaitoh 	    (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
    670   1.1   msaitoh 	/* Reset indices */
    671   1.1   msaitoh 	txr->next_avail_desc = 0;
    672   1.1   msaitoh 	txr->next_to_clean = 0;
    673   1.1   msaitoh 
    674   1.1   msaitoh 	/* Free any existing tx buffers. */
    675  1.28   msaitoh 	txbuf = txr->tx_buffers;
    676   1.5   msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    677   1.1   msaitoh 		if (txbuf->m_head != NULL) {
    678   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    679   1.1   msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    680   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    681   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    682   1.1   msaitoh 			m_freem(txbuf->m_head);
    683   1.1   msaitoh 			txbuf->m_head = NULL;
    684   1.1   msaitoh 		}
    685  1.28   msaitoh 
    686   1.1   msaitoh #ifdef DEV_NETMAP
    687   1.1   msaitoh 		/*
    688   1.1   msaitoh 		 * In netmap mode, set the map for the packet buffer.
    689   1.1   msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    690   1.1   msaitoh 		 * the physical buffer address in the NIC ring.
    691   1.1   msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    692   1.1   msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    693   1.1   msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    694   1.1   msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    695   1.1   msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    696   1.1   msaitoh 		 */
    697  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
    698  1.53   msaitoh 			int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
    699   1.5   msaitoh 			netmap_load_map(na, txr->txtag,
    700   1.5   msaitoh 			    txbuf->map, NMB(na, slot + si));
    701   1.1   msaitoh 		}
    702   1.1   msaitoh #endif /* DEV_NETMAP */
    703  1.28   msaitoh 
    704   1.1   msaitoh 		/* Clear the EOP descriptor pointer */
    705   1.1   msaitoh 		txbuf->eop = NULL;
    706  1.28   msaitoh 	}
    707   1.1   msaitoh 
    708   1.1   msaitoh 	/* Set the rate at which we sample packets */
    709  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
    710   1.1   msaitoh 		txr->atr_sample = atr_sample_rate;
    711   1.1   msaitoh 
    712   1.1   msaitoh 	/* Set number of descriptors available */
    713   1.1   msaitoh 	txr->tx_avail = adapter->num_tx_desc;
    714   1.1   msaitoh 
    715   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    716   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    717   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    718  1.28   msaitoh } /* ixgbe_setup_transmit_ring */
    719   1.1   msaitoh 
    720  1.28   msaitoh /************************************************************************
    721  1.28   msaitoh  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
    722  1.28   msaitoh  ************************************************************************/
    723   1.1   msaitoh int
    724   1.1   msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
    725   1.1   msaitoh {
    726   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    727   1.1   msaitoh 
    728   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++)
    729   1.1   msaitoh 		ixgbe_setup_transmit_ring(txr);
    730   1.1   msaitoh 
    731   1.1   msaitoh 	return (0);
    732  1.28   msaitoh } /* ixgbe_setup_transmit_structures */
    733   1.1   msaitoh 
    734  1.28   msaitoh /************************************************************************
    735  1.28   msaitoh  * ixgbe_free_transmit_structures - Free all transmit rings.
    736  1.28   msaitoh  ************************************************************************/
    737   1.1   msaitoh void
    738   1.1   msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
    739   1.1   msaitoh {
    740   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    741   1.1   msaitoh 
    742   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    743   1.1   msaitoh 		ixgbe_free_transmit_buffers(txr);
    744   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
    745   1.1   msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    746   1.1   msaitoh 	}
    747   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
    748  1.28   msaitoh } /* ixgbe_free_transmit_structures */
    749   1.1   msaitoh 
    750  1.28   msaitoh /************************************************************************
    751  1.28   msaitoh  * ixgbe_free_transmit_buffers
    752   1.1   msaitoh  *
    753  1.28   msaitoh  *   Free transmit ring related data structures.
    754  1.28   msaitoh  ************************************************************************/
    755   1.1   msaitoh static void
    756   1.1   msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    757   1.1   msaitoh {
    758  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    759   1.1   msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    760  1.28   msaitoh 	int                 i;
    761   1.1   msaitoh 
    762  1.14   msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
    763   1.1   msaitoh 
    764   1.1   msaitoh 	if (txr->tx_buffers == NULL)
    765   1.1   msaitoh 		return;
    766   1.1   msaitoh 
    767   1.1   msaitoh 	tx_buffer = txr->tx_buffers;
    768   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
    769   1.1   msaitoh 		if (tx_buffer->m_head != NULL) {
    770   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    771   1.1   msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    772   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    773   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    774   1.1   msaitoh 			m_freem(tx_buffer->m_head);
    775   1.1   msaitoh 			tx_buffer->m_head = NULL;
    776   1.1   msaitoh 			if (tx_buffer->map != NULL) {
    777   1.1   msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    778   1.1   msaitoh 				    tx_buffer->map);
    779   1.1   msaitoh 				tx_buffer->map = NULL;
    780   1.1   msaitoh 			}
    781   1.1   msaitoh 		} else if (tx_buffer->map != NULL) {
    782   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    783   1.1   msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    784   1.1   msaitoh 			tx_buffer->map = NULL;
    785   1.1   msaitoh 		}
    786   1.1   msaitoh 	}
    787  1.18   msaitoh 	if (txr->txr_interq != NULL) {
    788  1.18   msaitoh 		struct mbuf *m;
    789  1.18   msaitoh 
    790  1.18   msaitoh 		while ((m = pcq_get(txr->txr_interq)) != NULL)
    791  1.18   msaitoh 			m_freem(m);
    792  1.18   msaitoh 		pcq_destroy(txr->txr_interq);
    793  1.18   msaitoh 	}
    794   1.1   msaitoh 	if (txr->tx_buffers != NULL) {
    795   1.1   msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    796   1.1   msaitoh 		txr->tx_buffers = NULL;
    797   1.1   msaitoh 	}
    798   1.1   msaitoh 	if (txr->txtag != NULL) {
    799   1.1   msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    800   1.1   msaitoh 		txr->txtag = NULL;
    801   1.1   msaitoh 	}
    802  1.28   msaitoh } /* ixgbe_free_transmit_buffers */
    803   1.1   msaitoh 
    804  1.28   msaitoh /************************************************************************
    805  1.28   msaitoh  * ixgbe_tx_ctx_setup
    806   1.1   msaitoh  *
    807  1.28   msaitoh  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
    808  1.28   msaitoh  ************************************************************************/
    809   1.1   msaitoh static int
    810   1.1   msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    811   1.1   msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    812   1.1   msaitoh {
    813  1.28   msaitoh 	struct adapter                   *adapter = txr->adapter;
    814   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    815  1.28   msaitoh 	struct ether_vlan_header         *eh;
    816   1.8   msaitoh #ifdef INET
    817  1.28   msaitoh 	struct ip                        *ip;
    818   1.8   msaitoh #endif
    819   1.8   msaitoh #ifdef INET6
    820  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    821   1.8   msaitoh #endif
    822  1.28   msaitoh 	int                              ehdrlen, ip_hlen = 0;
    823  1.28   msaitoh 	int                              offload = TRUE;
    824  1.28   msaitoh 	int                              ctxd = txr->next_avail_desc;
    825  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    826  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    827  1.28   msaitoh 	u16                              vtag = 0;
    828  1.28   msaitoh 	u16                              etype;
    829  1.28   msaitoh 	u8                               ipproto = 0;
    830  1.28   msaitoh 	char                             *l3d;
    831   1.8   msaitoh 
    832   1.1   msaitoh 
    833   1.1   msaitoh 	/* First check if TSO is to be used */
    834  1.28   msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
    835  1.17   msaitoh 		int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
    836  1.17   msaitoh 
    837  1.21   msaitoh 		if (rv != 0)
    838  1.17   msaitoh 			++adapter->tso_err.ev_count;
    839  1.21   msaitoh 		return rv;
    840  1.17   msaitoh 	}
    841   1.1   msaitoh 
    842   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    843   1.1   msaitoh 		offload = FALSE;
    844   1.1   msaitoh 
    845   1.1   msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    846  1.28   msaitoh 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    847   1.1   msaitoh 
    848   1.1   msaitoh 	/* Now ready a context descriptor */
    849  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    850   1.1   msaitoh 
    851   1.1   msaitoh 	/*
    852  1.28   msaitoh 	 * In advanced descriptors the vlan tag must
    853  1.28   msaitoh 	 * be placed into the context descriptor. Hence
    854  1.28   msaitoh 	 * we need to make one even if not doing offloads.
    855  1.28   msaitoh 	 */
    856  1.29  knakahar 	if (vlan_has_tag(mp)) {
    857  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    858   1.1   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    859  1.28   msaitoh 	} else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
    860  1.28   msaitoh 	           (offload == FALSE))
    861   1.4   msaitoh 		return (0);
    862   1.1   msaitoh 
    863   1.1   msaitoh 	/*
    864   1.1   msaitoh 	 * Determine where frame payload starts.
    865   1.1   msaitoh 	 * Jump over vlan headers if already present,
    866   1.1   msaitoh 	 * helpful for QinQ too.
    867   1.1   msaitoh 	 */
    868   1.1   msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    869   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    870   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    871   1.1   msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    872   1.1   msaitoh 		etype = ntohs(eh->evl_proto);
    873   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    874   1.1   msaitoh 	} else {
    875   1.1   msaitoh 		etype = ntohs(eh->evl_encap_proto);
    876   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    877   1.1   msaitoh 	}
    878   1.1   msaitoh 
    879   1.1   msaitoh 	/* Set the ether header length */
    880   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    881   1.1   msaitoh 
    882   1.3   msaitoh 	if (offload == FALSE)
    883   1.3   msaitoh 		goto no_offloads;
    884   1.3   msaitoh 
    885   1.8   msaitoh 	/*
    886  1.28   msaitoh 	 * If the first mbuf only includes the ethernet header,
    887  1.28   msaitoh 	 * jump to the next one
    888  1.28   msaitoh 	 * XXX: This assumes the stack splits mbufs containing headers
    889  1.28   msaitoh 	 *      on header boundaries
    890   1.8   msaitoh 	 * XXX: And assumes the entire IP header is contained in one mbuf
    891   1.8   msaitoh 	 */
    892   1.8   msaitoh 	if (mp->m_len == ehdrlen && mp->m_next)
    893   1.8   msaitoh 		l3d = mtod(mp->m_next, char *);
    894   1.8   msaitoh 	else
    895   1.8   msaitoh 		l3d = mtod(mp, char *) + ehdrlen;
    896   1.8   msaitoh 
    897   1.1   msaitoh 	switch (etype) {
    898   1.9   msaitoh #ifdef INET
    899   1.1   msaitoh 	case ETHERTYPE_IP:
    900   1.8   msaitoh 		ip = (struct ip *)(l3d);
    901   1.8   msaitoh 		ip_hlen = ip->ip_hl << 2;
    902   1.8   msaitoh 		ipproto = ip->ip_p;
    903   1.8   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    904   1.1   msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    905   1.8   msaitoh 		    ip->ip_sum == 0);
    906   1.1   msaitoh 		break;
    907   1.9   msaitoh #endif
    908   1.9   msaitoh #ifdef INET6
    909   1.1   msaitoh 	case ETHERTYPE_IPV6:
    910   1.8   msaitoh 		ip6 = (struct ip6_hdr *)(l3d);
    911   1.8   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    912   1.8   msaitoh 		ipproto = ip6->ip6_nxt;
    913   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    914   1.1   msaitoh 		break;
    915   1.9   msaitoh #endif
    916   1.1   msaitoh 	default:
    917  1.11   msaitoh 		offload = false;
    918   1.1   msaitoh 		break;
    919   1.1   msaitoh 	}
    920   1.1   msaitoh 
    921   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    922   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    923   1.1   msaitoh 
    924   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    925   1.1   msaitoh 
    926   1.8   msaitoh 	/* No support for offloads for non-L4 next headers */
    927  1.63   msaitoh 	switch (ipproto) {
    928  1.36   msaitoh 	case IPPROTO_TCP:
    929  1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    930  1.36   msaitoh 		    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
    931  1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    932  1.36   msaitoh 		else
    933  1.36   msaitoh 			offload = false;
    934  1.36   msaitoh 		break;
    935  1.36   msaitoh 	case IPPROTO_UDP:
    936  1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    937  1.36   msaitoh 		    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
    938  1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    939  1.36   msaitoh 		else
    940  1.11   msaitoh 			offload = false;
    941  1.36   msaitoh 		break;
    942  1.36   msaitoh 	default:
    943  1.36   msaitoh 		offload = false;
    944  1.36   msaitoh 		break;
    945   1.8   msaitoh 	}
    946   1.8   msaitoh 
    947   1.8   msaitoh 	if (offload) /* Insert L4 checksum into data descriptors */
    948   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    949   1.1   msaitoh 
    950   1.3   msaitoh no_offloads:
    951   1.3   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    952   1.3   msaitoh 
    953   1.1   msaitoh 	/* Now copy bits into descriptor */
    954   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    955   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    956   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    957   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(0);
    958   1.1   msaitoh 
    959   1.1   msaitoh 	/* We've consumed the first desc, adjust counters */
    960   1.1   msaitoh 	if (++ctxd == txr->num_desc)
    961   1.1   msaitoh 		ctxd = 0;
    962   1.1   msaitoh 	txr->next_avail_desc = ctxd;
    963   1.1   msaitoh 	--txr->tx_avail;
    964   1.1   msaitoh 
    965  1.28   msaitoh 	return (0);
    966  1.28   msaitoh } /* ixgbe_tx_ctx_setup */
    967   1.1   msaitoh 
    968  1.28   msaitoh /************************************************************************
    969  1.28   msaitoh  * ixgbe_tso_setup
    970   1.1   msaitoh  *
    971  1.28   msaitoh  *   Setup work for hardware segmentation offload (TSO) on
    972  1.28   msaitoh  *   adapters using advanced tx descriptors
    973  1.28   msaitoh  ************************************************************************/
    974   1.1   msaitoh static int
    975  1.28   msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
    976  1.28   msaitoh     u32 *olinfo_status)
    977   1.1   msaitoh {
    978   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    979  1.28   msaitoh 	struct ether_vlan_header         *eh;
    980   1.1   msaitoh #ifdef INET6
    981  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    982   1.1   msaitoh #endif
    983   1.1   msaitoh #ifdef INET
    984  1.28   msaitoh 	struct ip                        *ip;
    985   1.1   msaitoh #endif
    986  1.28   msaitoh 	struct tcphdr                    *th;
    987  1.28   msaitoh 	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
    988  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    989  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    990  1.28   msaitoh 	u32                              mss_l4len_idx = 0, paylen;
    991  1.28   msaitoh 	u16                              vtag = 0, eh_type;
    992   1.1   msaitoh 
    993   1.1   msaitoh 	/*
    994   1.1   msaitoh 	 * Determine where frame payload starts.
    995   1.1   msaitoh 	 * Jump over vlan headers if already present
    996   1.1   msaitoh 	 */
    997   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    998   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    999   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1000   1.1   msaitoh 		eh_type = eh->evl_proto;
   1001   1.1   msaitoh 	} else {
   1002   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1003   1.1   msaitoh 		eh_type = eh->evl_encap_proto;
   1004   1.1   msaitoh 	}
   1005   1.1   msaitoh 
   1006   1.1   msaitoh 	switch (ntohs(eh_type)) {
   1007   1.1   msaitoh #ifdef INET
   1008   1.1   msaitoh 	case ETHERTYPE_IP:
   1009   1.1   msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
   1010   1.1   msaitoh 		if (ip->ip_p != IPPROTO_TCP)
   1011   1.1   msaitoh 			return (ENXIO);
   1012   1.1   msaitoh 		ip->ip_sum = 0;
   1013   1.1   msaitoh 		ip_hlen = ip->ip_hl << 2;
   1014   1.1   msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1015   1.1   msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1016   1.1   msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1017   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   1018   1.1   msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
   1019   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1020   1.1   msaitoh 		break;
   1021   1.1   msaitoh #endif
   1022  1.28   msaitoh #ifdef INET6
   1023  1.28   msaitoh 	case ETHERTYPE_IPV6:
   1024  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1025  1.28   msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
   1026  1.28   msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
   1027  1.28   msaitoh 			return (ENXIO);
   1028  1.28   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
   1029  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1030  1.28   msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
   1031  1.28   msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1032  1.28   msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1033  1.28   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   1034  1.28   msaitoh 		break;
   1035  1.28   msaitoh #endif
   1036   1.1   msaitoh 	default:
   1037   1.1   msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
   1038   1.1   msaitoh 		    __func__, ntohs(eh_type));
   1039   1.1   msaitoh 		break;
   1040   1.1   msaitoh 	}
   1041   1.1   msaitoh 
   1042   1.1   msaitoh 	ctxd = txr->next_avail_desc;
   1043  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
   1044   1.1   msaitoh 
   1045   1.1   msaitoh 	tcp_hlen = th->th_off << 2;
   1046   1.1   msaitoh 
   1047   1.1   msaitoh 	/* This is used in the transmit desc in encap */
   1048   1.1   msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
   1049   1.1   msaitoh 
   1050   1.1   msaitoh 	/* VLAN MACLEN IPLEN */
   1051  1.29  knakahar 	if (vlan_has_tag(mp)) {
   1052  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
   1053  1.28   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   1054   1.1   msaitoh 	}
   1055   1.1   msaitoh 
   1056   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   1057   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
   1058   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
   1059   1.1   msaitoh 
   1060   1.1   msaitoh 	/* ADV DTYPE TUCMD */
   1061   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   1062   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   1063   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
   1064   1.1   msaitoh 
   1065   1.1   msaitoh 	/* MSS L4LEN IDX */
   1066   1.1   msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   1067   1.1   msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   1068   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   1069   1.1   msaitoh 
   1070   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
   1071   1.1   msaitoh 
   1072   1.1   msaitoh 	if (++ctxd == txr->num_desc)
   1073   1.1   msaitoh 		ctxd = 0;
   1074   1.1   msaitoh 
   1075   1.1   msaitoh 	txr->tx_avail--;
   1076   1.1   msaitoh 	txr->next_avail_desc = ctxd;
   1077   1.1   msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1078   1.1   msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1079   1.1   msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1080   1.1   msaitoh 	++txr->tso_tx.ev_count;
   1081  1.28   msaitoh 
   1082   1.1   msaitoh 	return (0);
   1083  1.28   msaitoh } /* ixgbe_tso_setup */
   1084   1.1   msaitoh 
   1085   1.3   msaitoh 
   1086  1.28   msaitoh /************************************************************************
   1087  1.28   msaitoh  * ixgbe_txeof
   1088   1.1   msaitoh  *
   1089  1.28   msaitoh  *   Examine each tx_buffer in the used queue. If the hardware is done
   1090  1.28   msaitoh  *   processing the packet then free associated resources. The
   1091  1.28   msaitoh  *   tx_buffer is put back on the free queue.
   1092  1.28   msaitoh  ************************************************************************/
   1093  1.32   msaitoh bool
   1094   1.1   msaitoh ixgbe_txeof(struct tx_ring *txr)
   1095   1.1   msaitoh {
   1096   1.1   msaitoh 	struct adapter		*adapter = txr->adapter;
   1097   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1098  1.28   msaitoh 	struct ixgbe_tx_buf	*buf;
   1099  1.28   msaitoh 	union ixgbe_adv_tx_desc *txd;
   1100   1.1   msaitoh 	u32			work, processed = 0;
   1101   1.7   msaitoh 	u32			limit = adapter->tx_process_limit;
   1102   1.1   msaitoh 
   1103   1.1   msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1104   1.1   msaitoh 
   1105   1.1   msaitoh #ifdef DEV_NETMAP
   1106  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1107  1.28   msaitoh 	    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
   1108  1.28   msaitoh 		struct netmap_adapter *na = NA(adapter->ifp);
   1109  1.53   msaitoh 		struct netmap_kring *kring = na->tx_rings[txr->me];
   1110   1.1   msaitoh 		txd = txr->tx_base;
   1111   1.1   msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1112   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD);
   1113   1.1   msaitoh 		/*
   1114   1.1   msaitoh 		 * In netmap mode, all the work is done in the context
   1115   1.1   msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1116   1.1   msaitoh 		 * clients, which may be sleeping on individual rings
   1117   1.1   msaitoh 		 * or on a global resource for all rings.
   1118   1.1   msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1119   1.1   msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1120   1.1   msaitoh 		 * more frequently. This is implemented as follows:
   1121   1.1   msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1122   1.1   msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1123   1.1   msaitoh 		 *   means the user thread should not be woken up);
   1124   1.1   msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1125   1.1   msaitoh 		 *   or the slot has the DD bit set.
   1126   1.1   msaitoh 		 */
   1127  1.53   msaitoh 		if (kring->nr_kflags < kring->nkr_num_slots &&
   1128  1.53   msaitoh 		    txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) {
   1129   1.1   msaitoh 			netmap_tx_irq(ifp, txr->me);
   1130   1.1   msaitoh 		}
   1131  1.32   msaitoh 		return false;
   1132   1.1   msaitoh 	}
   1133   1.1   msaitoh #endif /* DEV_NETMAP */
   1134   1.1   msaitoh 
   1135   1.1   msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1136  1.45   msaitoh 		txr->busy = 0;
   1137  1.32   msaitoh 		return false;
   1138   1.1   msaitoh 	}
   1139   1.1   msaitoh 
   1140   1.1   msaitoh 	/* Get work starting point */
   1141   1.1   msaitoh 	work = txr->next_to_clean;
   1142   1.1   msaitoh 	buf = &txr->tx_buffers[work];
   1143   1.1   msaitoh 	txd = &txr->tx_base[work];
   1144   1.1   msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1145  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1146   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD);
   1147   1.8   msaitoh 
   1148   1.1   msaitoh 	do {
   1149   1.8   msaitoh 		union ixgbe_adv_tx_desc *eop = buf->eop;
   1150   1.1   msaitoh 		if (eop == NULL) /* No work */
   1151   1.1   msaitoh 			break;
   1152   1.1   msaitoh 
   1153   1.1   msaitoh 		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
   1154   1.1   msaitoh 			break;	/* I/O not complete */
   1155   1.1   msaitoh 
   1156   1.1   msaitoh 		if (buf->m_head) {
   1157  1.28   msaitoh 			txr->bytes += buf->m_head->m_pkthdr.len;
   1158  1.28   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
   1159   1.1   msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1160   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1161  1.28   msaitoh 			ixgbe_dmamap_unload(txr->txtag, buf->map);
   1162   1.1   msaitoh 			m_freem(buf->m_head);
   1163   1.1   msaitoh 			buf->m_head = NULL;
   1164   1.1   msaitoh 		}
   1165   1.1   msaitoh 		buf->eop = NULL;
   1166  1.47   msaitoh 		txr->txr_no_space = false;
   1167   1.1   msaitoh 		++txr->tx_avail;
   1168   1.1   msaitoh 
   1169   1.1   msaitoh 		/* We clean the range if multi segment */
   1170   1.1   msaitoh 		while (txd != eop) {
   1171   1.1   msaitoh 			++txd;
   1172   1.1   msaitoh 			++buf;
   1173   1.1   msaitoh 			++work;
   1174   1.1   msaitoh 			/* wrap the ring? */
   1175   1.1   msaitoh 			if (__predict_false(!work)) {
   1176   1.1   msaitoh 				work -= txr->num_desc;
   1177   1.1   msaitoh 				buf = txr->tx_buffers;
   1178   1.1   msaitoh 				txd = txr->tx_base;
   1179   1.1   msaitoh 			}
   1180   1.1   msaitoh 			if (buf->m_head) {
   1181   1.1   msaitoh 				txr->bytes +=
   1182   1.1   msaitoh 				    buf->m_head->m_pkthdr.len;
   1183   1.1   msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1184   1.1   msaitoh 				    buf->map,
   1185   1.1   msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1186   1.1   msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1187   1.1   msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1188   1.1   msaitoh 				    buf->map);
   1189   1.1   msaitoh 				m_freem(buf->m_head);
   1190   1.1   msaitoh 				buf->m_head = NULL;
   1191   1.1   msaitoh 			}
   1192   1.1   msaitoh 			++txr->tx_avail;
   1193   1.1   msaitoh 			buf->eop = NULL;
   1194   1.1   msaitoh 
   1195   1.1   msaitoh 		}
   1196   1.1   msaitoh 		++txr->packets;
   1197   1.1   msaitoh 		++processed;
   1198  1.61   thorpej 		if_statinc(ifp, if_opackets);
   1199   1.1   msaitoh 
   1200   1.1   msaitoh 		/* Try the next packet */
   1201   1.1   msaitoh 		++txd;
   1202   1.1   msaitoh 		++buf;
   1203   1.1   msaitoh 		++work;
   1204   1.1   msaitoh 		/* reset with a wrap */
   1205   1.1   msaitoh 		if (__predict_false(!work)) {
   1206   1.1   msaitoh 			work -= txr->num_desc;
   1207   1.1   msaitoh 			buf = txr->tx_buffers;
   1208   1.1   msaitoh 			txd = txr->tx_base;
   1209   1.1   msaitoh 		}
   1210   1.1   msaitoh 		prefetch(txd);
   1211   1.1   msaitoh 	} while (__predict_true(--limit));
   1212   1.1   msaitoh 
   1213   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1214   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1215   1.1   msaitoh 
   1216   1.1   msaitoh 	work += txr->num_desc;
   1217   1.1   msaitoh 	txr->next_to_clean = work;
   1218   1.1   msaitoh 
   1219  1.45   msaitoh 	/*
   1220  1.45   msaitoh 	 * Queue Hang detection, we know there's
   1221  1.45   msaitoh 	 * work outstanding or the first return
   1222  1.45   msaitoh 	 * would have been taken, so increment busy
   1223  1.45   msaitoh 	 * if nothing managed to get cleaned, then
   1224  1.45   msaitoh 	 * in local_timer it will be checked and
   1225  1.45   msaitoh 	 * marked as HUNG if it exceeds a MAX attempt.
   1226  1.45   msaitoh 	 */
   1227  1.45   msaitoh 	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
   1228  1.45   msaitoh 		++txr->busy;
   1229  1.45   msaitoh 	/*
   1230  1.45   msaitoh 	 * If anything gets cleaned we reset state to 1,
   1231  1.45   msaitoh 	 * note this will turn off HUNG if its set.
   1232  1.45   msaitoh 	 */
   1233  1.45   msaitoh 	if (processed)
   1234  1.45   msaitoh 		txr->busy = 1;
   1235  1.45   msaitoh 
   1236  1.43   msaitoh 	if (txr->tx_avail == txr->num_desc)
   1237  1.45   msaitoh 		txr->busy = 0;
   1238  1.43   msaitoh 
   1239  1.32   msaitoh 	return ((limit > 0) ? false : true);
   1240  1.28   msaitoh } /* ixgbe_txeof */
   1241   1.1   msaitoh 
   1242  1.28   msaitoh /************************************************************************
   1243  1.28   msaitoh  * ixgbe_rsc_count
   1244  1.28   msaitoh  *
   1245  1.28   msaitoh  *   Used to detect a descriptor that has been merged by Hardware RSC.
   1246  1.28   msaitoh  ************************************************************************/
   1247   1.1   msaitoh static inline u32
   1248   1.1   msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1249   1.1   msaitoh {
   1250   1.1   msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1251   1.1   msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1252  1.28   msaitoh } /* ixgbe_rsc_count */
   1253   1.1   msaitoh 
   1254  1.28   msaitoh /************************************************************************
   1255  1.28   msaitoh  * ixgbe_setup_hw_rsc
   1256   1.1   msaitoh  *
   1257  1.28   msaitoh  *   Initialize Hardware RSC (LRO) feature on 82599
   1258  1.28   msaitoh  *   for an RX ring, this is toggled by the LRO capability
   1259  1.28   msaitoh  *   even though it is transparent to the stack.
   1260  1.28   msaitoh  *
   1261  1.28   msaitoh  *   NOTE: Since this HW feature only works with IPv4 and
   1262  1.28   msaitoh  *         testing has shown soft LRO to be as effective,
   1263  1.28   msaitoh  *         this feature will be disabled by default.
   1264  1.28   msaitoh  ************************************************************************/
   1265   1.1   msaitoh static void
   1266   1.1   msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1267   1.1   msaitoh {
   1268  1.28   msaitoh 	struct	adapter  *adapter = rxr->adapter;
   1269  1.28   msaitoh 	struct	ixgbe_hw *hw = &adapter->hw;
   1270  1.28   msaitoh 	u32              rscctrl, rdrxctl;
   1271   1.1   msaitoh 
   1272   1.1   msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1273   1.1   msaitoh 	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
   1274   1.1   msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1275   1.1   msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1276   1.1   msaitoh 		return;
   1277   1.1   msaitoh 	}
   1278   1.1   msaitoh 
   1279   1.1   msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1280   1.1   msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1281  1.28   msaitoh #ifdef DEV_NETMAP
   1282  1.28   msaitoh 	/* Always strip CRC unless Netmap disabled it */
   1283  1.28   msaitoh 	if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
   1284  1.28   msaitoh 	    !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
   1285  1.28   msaitoh 	    ix_crcstrip)
   1286   1.1   msaitoh #endif /* DEV_NETMAP */
   1287  1.28   msaitoh 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1288   1.1   msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1289   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1290   1.1   msaitoh 
   1291   1.1   msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1292   1.1   msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1293   1.1   msaitoh 	/*
   1294  1.28   msaitoh 	 * Limit the total number of descriptors that
   1295  1.28   msaitoh 	 * can be combined, so it does not exceed 64K
   1296  1.28   msaitoh 	 */
   1297   1.1   msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1298   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1299   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1300   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1301   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1302   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1303   1.1   msaitoh 	else  /* Using 16K cluster */
   1304   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1305   1.1   msaitoh 
   1306   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1307   1.1   msaitoh 
   1308   1.1   msaitoh 	/* Enable TCP header recognition */
   1309   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1310  1.28   msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
   1311   1.1   msaitoh 
   1312   1.1   msaitoh 	/* Disable RSC for ACK packets */
   1313   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1314   1.1   msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1315   1.1   msaitoh 
   1316   1.1   msaitoh 	rxr->hw_rsc = TRUE;
   1317  1.28   msaitoh } /* ixgbe_setup_hw_rsc */
   1318   1.8   msaitoh 
   1319  1.28   msaitoh /************************************************************************
   1320  1.28   msaitoh  * ixgbe_refresh_mbufs
   1321   1.1   msaitoh  *
   1322  1.28   msaitoh  *   Refresh mbuf buffers for RX descriptor rings
   1323  1.28   msaitoh  *    - now keeps its own state so discards due to resource
   1324  1.28   msaitoh  *      exhaustion are unnecessary, if an mbuf cannot be obtained
   1325  1.28   msaitoh  *      it just returns, keeping its placeholder, thus it can simply
   1326  1.28   msaitoh  *      be recalled to try again.
   1327  1.65   msaitoh  *
   1328  1.65   msaitoh  *   XXX NetBSD TODO:
   1329  1.65   msaitoh  *    - The ixgbe_rxeof() function always preallocates mbuf cluster (jcl),
   1330  1.65   msaitoh  *      so the ixgbe_refresh_mbufs() function can be simplified.
   1331  1.65   msaitoh  *
   1332  1.28   msaitoh  ************************************************************************/
   1333   1.1   msaitoh static void
   1334   1.1   msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1335   1.1   msaitoh {
   1336  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1337  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1338  1.28   msaitoh 	struct mbuf         *mp;
   1339  1.28   msaitoh 	int                 i, j, error;
   1340  1.28   msaitoh 	bool                refreshed = false;
   1341   1.1   msaitoh 
   1342   1.1   msaitoh 	i = j = rxr->next_to_refresh;
   1343   1.1   msaitoh 	/* Control the loop with one beyond */
   1344   1.1   msaitoh 	if (++j == rxr->num_desc)
   1345   1.1   msaitoh 		j = 0;
   1346   1.1   msaitoh 
   1347   1.1   msaitoh 	while (j != limit) {
   1348   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1349   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1350  1.49   msaitoh 			mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
   1351   1.1   msaitoh 			    MT_DATA, M_PKTHDR, rxr->mbuf_sz);
   1352   1.1   msaitoh 			if (mp == NULL) {
   1353   1.1   msaitoh 				rxr->no_jmbuf.ev_count++;
   1354   1.1   msaitoh 				goto update;
   1355   1.1   msaitoh 			}
   1356   1.1   msaitoh 			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
   1357   1.1   msaitoh 				m_adj(mp, ETHER_ALIGN);
   1358   1.1   msaitoh 		} else
   1359   1.1   msaitoh 			mp = rxbuf->buf;
   1360   1.1   msaitoh 
   1361   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1362   1.1   msaitoh 
   1363   1.1   msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1364   1.1   msaitoh 		 * than replaced, there's no need to go through busdma.
   1365   1.1   msaitoh 		 */
   1366   1.1   msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1367   1.1   msaitoh 			/* Get the memory mapping */
   1368   1.4   msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1369   1.1   msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1370   1.1   msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1371   1.1   msaitoh 			if (error != 0) {
   1372  1.55   msaitoh 				device_printf(adapter->dev, "Refresh mbufs: "
   1373  1.55   msaitoh 				    "payload dmamap load failure - %d\n",
   1374  1.55   msaitoh 				    error);
   1375   1.1   msaitoh 				m_free(mp);
   1376   1.1   msaitoh 				rxbuf->buf = NULL;
   1377   1.1   msaitoh 				goto update;
   1378   1.1   msaitoh 			}
   1379   1.1   msaitoh 			rxbuf->buf = mp;
   1380   1.1   msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1381   1.1   msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1382   1.1   msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1383   1.1   msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1384   1.1   msaitoh 		} else {
   1385   1.1   msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1386   1.1   msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1387   1.1   msaitoh 		}
   1388   1.1   msaitoh 
   1389   1.1   msaitoh 		refreshed = true;
   1390   1.1   msaitoh 		/* Next is precalculated */
   1391   1.1   msaitoh 		i = j;
   1392   1.1   msaitoh 		rxr->next_to_refresh = i;
   1393   1.1   msaitoh 		if (++j == rxr->num_desc)
   1394   1.1   msaitoh 			j = 0;
   1395   1.1   msaitoh 	}
   1396  1.28   msaitoh 
   1397   1.1   msaitoh update:
   1398   1.1   msaitoh 	if (refreshed) /* Update hardware tail index */
   1399  1.28   msaitoh 		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
   1400  1.28   msaitoh 
   1401   1.1   msaitoh 	return;
   1402  1.28   msaitoh } /* ixgbe_refresh_mbufs */
   1403   1.1   msaitoh 
   1404  1.28   msaitoh /************************************************************************
   1405  1.28   msaitoh  * ixgbe_allocate_receive_buffers
   1406   1.1   msaitoh  *
   1407  1.28   msaitoh  *   Allocate memory for rx_buffer structures. Since we use one
   1408  1.28   msaitoh  *   rx_buffer per received packet, the maximum number of rx_buffer's
   1409  1.28   msaitoh  *   that we'll need is equal to the number of receive descriptors
   1410  1.28   msaitoh  *   that we've allocated.
   1411  1.28   msaitoh  ************************************************************************/
   1412  1.28   msaitoh static int
   1413   1.1   msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1414   1.1   msaitoh {
   1415  1.53   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1416  1.28   msaitoh 	device_t            dev = adapter->dev;
   1417  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1418  1.28   msaitoh 	int                 bsize, error;
   1419   1.1   msaitoh 
   1420   1.1   msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1421  1.57       chs 	rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO);
   1422   1.1   msaitoh 
   1423  1.28   msaitoh 	error = ixgbe_dma_tag_create(
   1424  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
   1425  1.28   msaitoh 	         /*   alignment */ 1,
   1426  1.28   msaitoh 	         /*      bounds */ 0,
   1427  1.28   msaitoh 	         /*     maxsize */ MJUM16BYTES,
   1428  1.28   msaitoh 	         /*   nsegments */ 1,
   1429  1.28   msaitoh 	         /*  maxsegsize */ MJUM16BYTES,
   1430  1.28   msaitoh 	         /*       flags */ 0,
   1431  1.28   msaitoh 	                           &rxr->ptag);
   1432  1.28   msaitoh 	if (error != 0) {
   1433   1.1   msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1434   1.1   msaitoh 		goto fail;
   1435   1.1   msaitoh 	}
   1436   1.1   msaitoh 
   1437   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1438   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1439   1.4   msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1440   1.1   msaitoh 		if (error) {
   1441   1.1   msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1442   1.1   msaitoh 			goto fail;
   1443   1.1   msaitoh 		}
   1444   1.1   msaitoh 	}
   1445   1.1   msaitoh 
   1446   1.1   msaitoh 	return (0);
   1447   1.1   msaitoh 
   1448   1.1   msaitoh fail:
   1449   1.1   msaitoh 	/* Frees all, but can handle partial completion */
   1450   1.1   msaitoh 	ixgbe_free_receive_structures(adapter);
   1451  1.28   msaitoh 
   1452   1.1   msaitoh 	return (error);
   1453  1.28   msaitoh } /* ixgbe_allocate_receive_buffers */
   1454   1.1   msaitoh 
   1455  1.28   msaitoh /************************************************************************
   1456  1.30   msaitoh  * ixgbe_free_receive_ring
   1457  1.28   msaitoh  ************************************************************************/
   1458  1.28   msaitoh static void
   1459   1.1   msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1460  1.27   msaitoh {
   1461   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1462  1.27   msaitoh 		ixgbe_rx_discard(rxr, i);
   1463   1.1   msaitoh 	}
   1464  1.28   msaitoh } /* ixgbe_free_receive_ring */
   1465   1.1   msaitoh 
   1466  1.28   msaitoh /************************************************************************
   1467  1.28   msaitoh  * ixgbe_setup_receive_ring
   1468   1.1   msaitoh  *
   1469  1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1470  1.28   msaitoh  ************************************************************************/
   1471   1.1   msaitoh static int
   1472   1.1   msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1473   1.1   msaitoh {
   1474  1.28   msaitoh 	struct adapter        *adapter;
   1475  1.28   msaitoh 	struct ixgbe_rx_buf   *rxbuf;
   1476   1.1   msaitoh #ifdef LRO
   1477  1.28   msaitoh 	struct ifnet          *ifp;
   1478  1.28   msaitoh 	struct lro_ctrl       *lro = &rxr->lro;
   1479   1.1   msaitoh #endif /* LRO */
   1480   1.1   msaitoh #ifdef DEV_NETMAP
   1481   1.1   msaitoh 	struct netmap_adapter *na = NA(rxr->adapter->ifp);
   1482  1.28   msaitoh 	struct netmap_slot    *slot;
   1483   1.1   msaitoh #endif /* DEV_NETMAP */
   1484  1.28   msaitoh 	int                   rsize, error = 0;
   1485   1.1   msaitoh 
   1486   1.1   msaitoh 	adapter = rxr->adapter;
   1487   1.1   msaitoh #ifdef LRO
   1488   1.1   msaitoh 	ifp = adapter->ifp;
   1489   1.1   msaitoh #endif /* LRO */
   1490   1.1   msaitoh 
   1491   1.1   msaitoh 	/* Clear the ring contents */
   1492   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1493  1.28   msaitoh 
   1494   1.1   msaitoh #ifdef DEV_NETMAP
   1495  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1496  1.28   msaitoh 		slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1497   1.1   msaitoh #endif /* DEV_NETMAP */
   1498  1.28   msaitoh 
   1499   1.1   msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   1500   1.1   msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1501   1.1   msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1502   1.1   msaitoh 	/* Cache the size */
   1503   1.1   msaitoh 	rxr->mbuf_sz = adapter->rx_mbuf_sz;
   1504   1.1   msaitoh 
   1505   1.1   msaitoh 	/* Free current RX buffer structs and their mbufs */
   1506   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1507   1.1   msaitoh 
   1508  1.49   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1509  1.49   msaitoh 	/*
   1510  1.49   msaitoh 	 * Now reinitialize our supply of jumbo mbufs.  The number
   1511  1.49   msaitoh 	 * or size of jumbo mbufs may have changed.
   1512  1.49   msaitoh 	 * Assume all of rxr->ptag are the same.
   1513  1.49   msaitoh 	 */
   1514  1.49   msaitoh 	ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr,
   1515  1.67   msaitoh 	    adapter->num_jcl, adapter->rx_mbuf_sz);
   1516  1.49   msaitoh 
   1517  1.49   msaitoh 	IXGBE_RX_LOCK(rxr);
   1518  1.49   msaitoh 
   1519   1.1   msaitoh 	/* Now replenish the mbufs */
   1520   1.1   msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1521  1.28   msaitoh 		struct mbuf *mp;
   1522   1.1   msaitoh 
   1523   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1524  1.28   msaitoh 
   1525   1.1   msaitoh #ifdef DEV_NETMAP
   1526   1.1   msaitoh 		/*
   1527   1.1   msaitoh 		 * In netmap mode, fill the map and set the buffer
   1528   1.1   msaitoh 		 * address in the NIC ring, considering the offset
   1529   1.1   msaitoh 		 * between the netmap and NIC rings (see comment in
   1530   1.1   msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1531   1.1   msaitoh 		 * an mbuf, so end the block with a continue;
   1532   1.1   msaitoh 		 */
   1533  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
   1534  1.53   msaitoh 			int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
   1535   1.1   msaitoh 			uint64_t paddr;
   1536   1.1   msaitoh 			void *addr;
   1537   1.1   msaitoh 
   1538   1.1   msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1539   1.1   msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1540   1.1   msaitoh 			/* Update descriptor and the cached value */
   1541   1.1   msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1542   1.1   msaitoh 			rxbuf->addr = htole64(paddr);
   1543   1.1   msaitoh 			continue;
   1544   1.1   msaitoh 		}
   1545   1.1   msaitoh #endif /* DEV_NETMAP */
   1546  1.28   msaitoh 
   1547  1.28   msaitoh 		rxbuf->flags = 0;
   1548  1.49   msaitoh 		rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
   1549   1.1   msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   1550   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1551   1.1   msaitoh 			error = ENOBUFS;
   1552  1.28   msaitoh 			goto fail;
   1553   1.1   msaitoh 		}
   1554   1.1   msaitoh 		mp = rxbuf->buf;
   1555   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1556   1.1   msaitoh 		/* Get the memory mapping */
   1557  1.28   msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
   1558  1.28   msaitoh 		    mp, BUS_DMA_NOWAIT);
   1559  1.75   msaitoh 		if (error != 0) {
   1560  1.75   msaitoh 			/*
   1561  1.75   msaitoh 			 * Clear this entry for later cleanup in
   1562  1.75   msaitoh 			 * ixgbe_discard() which is called via
   1563  1.75   msaitoh 			 * ixgbe_free_receive_ring().
   1564  1.75   msaitoh 			 */
   1565  1.75   msaitoh 			m_freem(mp);
   1566  1.75   msaitoh 			rxbuf->buf = NULL;
   1567   1.1   msaitoh                         goto fail;
   1568  1.75   msaitoh 		}
   1569   1.1   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1570   1.1   msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   1571   1.1   msaitoh 		/* Update the descriptor and the cached value */
   1572   1.1   msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1573   1.1   msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1574   1.1   msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1575   1.1   msaitoh 	}
   1576   1.1   msaitoh 
   1577   1.1   msaitoh 	/* Setup our descriptor indices */
   1578   1.1   msaitoh 	rxr->next_to_check = 0;
   1579   1.1   msaitoh 	rxr->next_to_refresh = 0;
   1580   1.1   msaitoh 	rxr->lro_enabled = FALSE;
   1581   1.1   msaitoh 	rxr->rx_copies.ev_count = 0;
   1582  1.13   msaitoh #if 0 /* NetBSD */
   1583   1.1   msaitoh 	rxr->rx_bytes.ev_count = 0;
   1584  1.13   msaitoh #if 1	/* Fix inconsistency */
   1585  1.13   msaitoh 	rxr->rx_packets.ev_count = 0;
   1586  1.13   msaitoh #endif
   1587  1.13   msaitoh #endif
   1588   1.1   msaitoh 	rxr->vtag_strip = FALSE;
   1589   1.1   msaitoh 
   1590   1.1   msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1591   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1592   1.1   msaitoh 
   1593   1.1   msaitoh 	/*
   1594  1.28   msaitoh 	 * Now set up the LRO interface
   1595  1.28   msaitoh 	 */
   1596   1.1   msaitoh 	if (ixgbe_rsc_enable)
   1597   1.1   msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1598   1.1   msaitoh #ifdef LRO
   1599   1.1   msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1600   1.1   msaitoh 		device_t dev = adapter->dev;
   1601   1.1   msaitoh 		int err = tcp_lro_init(lro);
   1602   1.1   msaitoh 		if (err) {
   1603   1.1   msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1604   1.1   msaitoh 			goto fail;
   1605   1.1   msaitoh 		}
   1606   1.1   msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1607   1.1   msaitoh 		rxr->lro_enabled = TRUE;
   1608   1.1   msaitoh 		lro->ifp = adapter->ifp;
   1609   1.1   msaitoh 	}
   1610   1.1   msaitoh #endif /* LRO */
   1611   1.1   msaitoh 
   1612   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1613  1.28   msaitoh 
   1614   1.1   msaitoh 	return (0);
   1615   1.1   msaitoh 
   1616   1.1   msaitoh fail:
   1617   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1618   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1619  1.28   msaitoh 
   1620   1.1   msaitoh 	return (error);
   1621  1.28   msaitoh } /* ixgbe_setup_receive_ring */
   1622   1.1   msaitoh 
   1623  1.28   msaitoh /************************************************************************
   1624  1.28   msaitoh  * ixgbe_setup_receive_structures - Initialize all receive rings.
   1625  1.28   msaitoh  ************************************************************************/
   1626   1.1   msaitoh int
   1627   1.1   msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
   1628   1.1   msaitoh {
   1629   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1630  1.28   msaitoh 	int            j;
   1631   1.1   msaitoh 
   1632  1.62   msaitoh 	INIT_DEBUGOUT("ixgbe_setup_receive_structures");
   1633   1.1   msaitoh 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   1634   1.1   msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1635   1.1   msaitoh 			goto fail;
   1636   1.1   msaitoh 
   1637   1.1   msaitoh 	return (0);
   1638   1.1   msaitoh fail:
   1639   1.1   msaitoh 	/*
   1640   1.1   msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1641   1.1   msaitoh 	 * the rings that completed, the failing case will have
   1642   1.1   msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1643   1.1   msaitoh 	 */
   1644   1.1   msaitoh 	for (int i = 0; i < j; ++i) {
   1645   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   1646  1.27   msaitoh 		IXGBE_RX_LOCK(rxr);
   1647   1.1   msaitoh 		ixgbe_free_receive_ring(rxr);
   1648  1.27   msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1649   1.1   msaitoh 	}
   1650   1.1   msaitoh 
   1651   1.1   msaitoh 	return (ENOBUFS);
   1652  1.28   msaitoh } /* ixgbe_setup_receive_structures */
   1653   1.1   msaitoh 
   1654   1.3   msaitoh 
   1655  1.28   msaitoh /************************************************************************
   1656  1.28   msaitoh  * ixgbe_free_receive_structures - Free all receive rings.
   1657  1.28   msaitoh  ************************************************************************/
   1658   1.1   msaitoh void
   1659   1.1   msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
   1660   1.1   msaitoh {
   1661   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1662   1.1   msaitoh 
   1663   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1664   1.1   msaitoh 
   1665   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1666   1.1   msaitoh 		ixgbe_free_receive_buffers(rxr);
   1667   1.1   msaitoh #ifdef LRO
   1668   1.1   msaitoh 		/* Free LRO memory */
   1669  1.28   msaitoh 		tcp_lro_free(&rxr->lro);
   1670   1.1   msaitoh #endif /* LRO */
   1671   1.1   msaitoh 		/* Free the ring memory as well */
   1672   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   1673   1.1   msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1674   1.1   msaitoh 	}
   1675   1.1   msaitoh 
   1676   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   1677  1.28   msaitoh } /* ixgbe_free_receive_structures */
   1678   1.1   msaitoh 
   1679   1.1   msaitoh 
   1680  1.28   msaitoh /************************************************************************
   1681  1.28   msaitoh  * ixgbe_free_receive_buffers - Free receive ring data structures
   1682  1.28   msaitoh  ************************************************************************/
   1683   1.1   msaitoh static void
   1684   1.1   msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1685   1.1   msaitoh {
   1686  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1687  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1688   1.1   msaitoh 
   1689   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1690   1.1   msaitoh 
   1691   1.1   msaitoh 	/* Cleanup any existing buffers */
   1692   1.1   msaitoh 	if (rxr->rx_buffers != NULL) {
   1693   1.1   msaitoh 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   1694   1.1   msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1695  1.27   msaitoh 			ixgbe_rx_discard(rxr, i);
   1696   1.1   msaitoh 			if (rxbuf->pmap != NULL) {
   1697   1.1   msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1698   1.1   msaitoh 				rxbuf->pmap = NULL;
   1699   1.1   msaitoh 			}
   1700   1.1   msaitoh 		}
   1701  1.59   msaitoh 
   1702  1.59   msaitoh 		/* NetBSD specific. See ixgbe_netbsd.c */
   1703  1.59   msaitoh 		ixgbe_jcl_destroy(adapter, rxr);
   1704  1.59   msaitoh 
   1705   1.1   msaitoh 		if (rxr->rx_buffers != NULL) {
   1706   1.1   msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1707   1.1   msaitoh 			rxr->rx_buffers = NULL;
   1708   1.1   msaitoh 		}
   1709   1.1   msaitoh 	}
   1710   1.1   msaitoh 
   1711   1.1   msaitoh 	if (rxr->ptag != NULL) {
   1712   1.1   msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1713   1.1   msaitoh 		rxr->ptag = NULL;
   1714   1.1   msaitoh 	}
   1715   1.1   msaitoh 
   1716   1.1   msaitoh 	return;
   1717  1.28   msaitoh } /* ixgbe_free_receive_buffers */
   1718   1.1   msaitoh 
   1719  1.28   msaitoh /************************************************************************
   1720  1.28   msaitoh  * ixgbe_rx_input
   1721  1.28   msaitoh  ************************************************************************/
   1722   1.1   msaitoh static __inline void
   1723  1.28   msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
   1724  1.28   msaitoh     u32 ptype)
   1725   1.1   msaitoh {
   1726  1.20   msaitoh 	struct adapter	*adapter = ifp->if_softc;
   1727   1.1   msaitoh 
   1728   1.1   msaitoh #ifdef LRO
   1729   1.1   msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1730   1.1   msaitoh 
   1731  1.28   msaitoh 	/*
   1732  1.28   msaitoh 	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1733  1.28   msaitoh 	 * should be computed by hardware. Also it should not have VLAN tag in
   1734  1.28   msaitoh 	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1735  1.28   msaitoh 	 */
   1736   1.1   msaitoh         if (rxr->lro_enabled &&
   1737   1.1   msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1738   1.1   msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1739   1.1   msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1740   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1741   1.1   msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1742   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1743   1.1   msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1744   1.1   msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1745   1.1   msaitoh                 /*
   1746   1.1   msaitoh                  * Send to the stack if:
   1747   1.1   msaitoh                  **  - LRO not enabled, or
   1748   1.1   msaitoh                  **  - no LRO resources, or
   1749   1.1   msaitoh                  **  - lro enqueue fails
   1750   1.1   msaitoh                  */
   1751   1.1   msaitoh                 if (rxr->lro.lro_cnt != 0)
   1752   1.1   msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1753   1.1   msaitoh                                 return;
   1754   1.1   msaitoh         }
   1755   1.1   msaitoh #endif /* LRO */
   1756   1.1   msaitoh 
   1757  1.20   msaitoh 	if_percpuq_enqueue(adapter->ipq, m);
   1758  1.28   msaitoh } /* ixgbe_rx_input */
   1759   1.1   msaitoh 
   1760  1.28   msaitoh /************************************************************************
   1761  1.28   msaitoh  * ixgbe_rx_discard
   1762  1.28   msaitoh  ************************************************************************/
   1763   1.1   msaitoh static __inline void
   1764   1.1   msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1765   1.1   msaitoh {
   1766  1.28   msaitoh 	struct ixgbe_rx_buf *rbuf;
   1767   1.1   msaitoh 
   1768   1.1   msaitoh 	rbuf = &rxr->rx_buffers[i];
   1769   1.1   msaitoh 
   1770   1.1   msaitoh 	/*
   1771  1.70   msaitoh 	 * With advanced descriptors the writeback clobbers the buffer addrs,
   1772  1.70   msaitoh 	 * so its easier to just free the existing mbufs and take the normal
   1773  1.70   msaitoh 	 * refresh path to get new buffers and mapping.
   1774  1.28   msaitoh 	 */
   1775   1.1   msaitoh 
   1776  1.26   msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   1777  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1778  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1779  1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1780   1.1   msaitoh 		m_freem(rbuf->fmp);
   1781   1.1   msaitoh 		rbuf->fmp = NULL;
   1782   1.1   msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1783   1.1   msaitoh 	} else if (rbuf->buf) {
   1784  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1785  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1786  1.72       rin 		ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1787   1.1   msaitoh 		m_free(rbuf->buf);
   1788   1.1   msaitoh 		rbuf->buf = NULL;
   1789   1.1   msaitoh 	}
   1790   1.1   msaitoh 
   1791   1.1   msaitoh 	rbuf->flags = 0;
   1792   1.1   msaitoh 
   1793   1.1   msaitoh 	return;
   1794  1.28   msaitoh } /* ixgbe_rx_discard */
   1795   1.1   msaitoh 
   1796   1.1   msaitoh 
   1797  1.28   msaitoh /************************************************************************
   1798  1.28   msaitoh  * ixgbe_rxeof
   1799   1.1   msaitoh  *
   1800  1.28   msaitoh  *   Executes in interrupt context. It replenishes the
   1801  1.28   msaitoh  *   mbufs in the descriptor and sends data which has
   1802  1.28   msaitoh  *   been dma'ed into host memory to upper layer.
   1803   1.1   msaitoh  *
   1804  1.28   msaitoh  *   Return TRUE for more work, FALSE for all clean.
   1805  1.28   msaitoh  ************************************************************************/
   1806   1.1   msaitoh bool
   1807   1.1   msaitoh ixgbe_rxeof(struct ix_queue *que)
   1808   1.1   msaitoh {
   1809   1.1   msaitoh 	struct adapter		*adapter = que->adapter;
   1810   1.1   msaitoh 	struct rx_ring		*rxr = que->rxr;
   1811   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1812   1.1   msaitoh #ifdef LRO
   1813   1.1   msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1814   1.1   msaitoh #endif /* LRO */
   1815  1.28   msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1816  1.28   msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1817   1.1   msaitoh 	int			i, nextp, processed = 0;
   1818   1.1   msaitoh 	u32			staterr = 0;
   1819  1.65   msaitoh 	u32			count = 0;
   1820  1.65   msaitoh 	u32			limit = adapter->rx_process_limit;
   1821  1.65   msaitoh 	bool			discard_multidesc = false;
   1822   1.1   msaitoh #ifdef RSS
   1823   1.1   msaitoh 	u16			pkt_info;
   1824   1.1   msaitoh #endif
   1825   1.1   msaitoh 
   1826   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1827   1.1   msaitoh 
   1828   1.1   msaitoh #ifdef DEV_NETMAP
   1829  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
   1830  1.28   msaitoh 		/* Same as the txeof routine: wakeup clients on intr. */
   1831  1.28   msaitoh 		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1832  1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1833  1.28   msaitoh 			return (FALSE);
   1834  1.28   msaitoh 		}
   1835   1.1   msaitoh 	}
   1836   1.1   msaitoh #endif /* DEV_NETMAP */
   1837   1.1   msaitoh 
   1838  1.65   msaitoh 	/*
   1839  1.65   msaitoh 	 * The max number of loop is rx_process_limit. If discard_multidesc is
   1840  1.65   msaitoh 	 * true, continue processing to not to send broken packet to the upper
   1841  1.65   msaitoh 	 * layer.
   1842  1.65   msaitoh 	 */
   1843  1.65   msaitoh 	for (i = rxr->next_to_check;
   1844  1.65   msaitoh 	     (count < limit) || (discard_multidesc == true);) {
   1845  1.65   msaitoh 
   1846  1.28   msaitoh 		struct mbuf *sendmp, *mp;
   1847  1.64  knakahar 		struct mbuf *newmp;
   1848  1.28   msaitoh 		u32         rsc, ptype;
   1849  1.28   msaitoh 		u16         len;
   1850  1.28   msaitoh 		u16         vtag = 0;
   1851  1.28   msaitoh 		bool        eop;
   1852  1.53   msaitoh 
   1853   1.1   msaitoh 		/* Sync the ring. */
   1854   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1855   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1856   1.1   msaitoh 
   1857   1.1   msaitoh 		cur = &rxr->rx_base[i];
   1858   1.1   msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1859   1.1   msaitoh #ifdef RSS
   1860   1.1   msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1861   1.1   msaitoh #endif
   1862   1.1   msaitoh 
   1863   1.1   msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1864   1.1   msaitoh 			break;
   1865   1.1   msaitoh 
   1866  1.65   msaitoh 		count++;
   1867   1.1   msaitoh 		sendmp = NULL;
   1868   1.1   msaitoh 		nbuf = NULL;
   1869   1.1   msaitoh 		rsc = 0;
   1870   1.1   msaitoh 		cur->wb.upper.status_error = 0;
   1871   1.1   msaitoh 		rbuf = &rxr->rx_buffers[i];
   1872   1.1   msaitoh 		mp = rbuf->buf;
   1873   1.1   msaitoh 
   1874   1.1   msaitoh 		len = le16toh(cur->wb.upper.length);
   1875   1.1   msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1876   1.1   msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1877   1.1   msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1878   1.1   msaitoh 
   1879   1.1   msaitoh 		/* Make sure bad packets are discarded */
   1880   1.1   msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1881   1.3   msaitoh #if __FreeBSD_version >= 1100036
   1882  1.28   msaitoh 			if (adapter->feat_en & IXGBE_FEATURE_VF)
   1883   1.4   msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1884   1.3   msaitoh #endif
   1885   1.1   msaitoh 			rxr->rx_discarded.ev_count++;
   1886   1.1   msaitoh 			ixgbe_rx_discard(rxr, i);
   1887  1.65   msaitoh 			discard_multidesc = false;
   1888   1.1   msaitoh 			goto next_desc;
   1889   1.1   msaitoh 		}
   1890   1.1   msaitoh 
   1891  1.64  knakahar 		/* pre-alloc new mbuf */
   1892  1.65   msaitoh 		if (!discard_multidesc)
   1893  1.65   msaitoh 			newmp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, MT_DATA,
   1894  1.65   msaitoh 			    M_PKTHDR, rxr->mbuf_sz);
   1895  1.65   msaitoh 		else
   1896  1.65   msaitoh 			newmp = NULL;
   1897  1.64  knakahar 		if (newmp == NULL) {
   1898  1.66   msaitoh 			rxr->no_jmbuf.ev_count++;
   1899  1.65   msaitoh 			/*
   1900  1.65   msaitoh 			 * Descriptor initialization is already done by the
   1901  1.65   msaitoh 			 * above code (cur->wb.upper.status_error = 0).
   1902  1.65   msaitoh 			 * So, we can reuse current rbuf->buf for new packet.
   1903  1.65   msaitoh 			 *
   1904  1.65   msaitoh 			 * Rewrite the buffer addr, see comment in
   1905  1.65   msaitoh 			 * ixgbe_rx_discard().
   1906  1.65   msaitoh 			 */
   1907  1.65   msaitoh 			cur->read.pkt_addr = rbuf->addr;
   1908  1.65   msaitoh 			m_freem(rbuf->fmp);
   1909  1.65   msaitoh 			rbuf->fmp = NULL;
   1910  1.65   msaitoh 			if (!eop) {
   1911  1.65   msaitoh 				/* Discard the entire packet. */
   1912  1.65   msaitoh 				discard_multidesc = true;
   1913  1.65   msaitoh 			} else
   1914  1.65   msaitoh 				discard_multidesc = false;
   1915  1.64  knakahar 			goto next_desc;
   1916  1.64  knakahar 		}
   1917  1.65   msaitoh 		discard_multidesc = false;
   1918  1.64  knakahar 
   1919  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1920  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1921  1.27   msaitoh 
   1922   1.1   msaitoh 		/*
   1923  1.28   msaitoh 		 * On 82599 which supports a hardware
   1924  1.28   msaitoh 		 * LRO (called HW RSC), packets need
   1925  1.28   msaitoh 		 * not be fragmented across sequential
   1926  1.28   msaitoh 		 * descriptors, rather the next descriptor
   1927  1.28   msaitoh 		 * is indicated in bits of the descriptor.
   1928  1.28   msaitoh 		 * This also means that we might proceses
   1929  1.28   msaitoh 		 * more than one packet at a time, something
   1930  1.28   msaitoh 		 * that has never been true before, it
   1931  1.28   msaitoh 		 * required eliminating global chain pointers
   1932  1.28   msaitoh 		 * in favor of what we are doing here.  -jfv
   1933  1.28   msaitoh 		 */
   1934   1.1   msaitoh 		if (!eop) {
   1935   1.1   msaitoh 			/*
   1936  1.28   msaitoh 			 * Figure out the next descriptor
   1937  1.28   msaitoh 			 * of this frame.
   1938  1.28   msaitoh 			 */
   1939   1.1   msaitoh 			if (rxr->hw_rsc == TRUE) {
   1940   1.1   msaitoh 				rsc = ixgbe_rsc_count(cur);
   1941   1.1   msaitoh 				rxr->rsc_num += (rsc - 1);
   1942   1.1   msaitoh 			}
   1943   1.1   msaitoh 			if (rsc) { /* Get hardware index */
   1944  1.28   msaitoh 				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
   1945   1.1   msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1946   1.1   msaitoh 			} else { /* Just sequential */
   1947   1.1   msaitoh 				nextp = i + 1;
   1948   1.1   msaitoh 				if (nextp == adapter->num_rx_desc)
   1949   1.1   msaitoh 					nextp = 0;
   1950   1.1   msaitoh 			}
   1951   1.1   msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1952   1.1   msaitoh 			prefetch(nbuf);
   1953   1.1   msaitoh 		}
   1954   1.1   msaitoh 		/*
   1955  1.28   msaitoh 		 * Rather than using the fmp/lmp global pointers
   1956  1.28   msaitoh 		 * we now keep the head of a packet chain in the
   1957  1.28   msaitoh 		 * buffer struct and pass this along from one
   1958  1.28   msaitoh 		 * descriptor to the next, until we get EOP.
   1959  1.28   msaitoh 		 */
   1960   1.1   msaitoh 		/*
   1961  1.28   msaitoh 		 * See if there is a stored head
   1962  1.28   msaitoh 		 * that determines what we are
   1963  1.28   msaitoh 		 */
   1964   1.1   msaitoh 		sendmp = rbuf->fmp;
   1965   1.1   msaitoh 		if (sendmp != NULL) {  /* secondary frag */
   1966  1.64  knakahar 			rbuf->buf = newmp;
   1967  1.64  knakahar 			rbuf->fmp = NULL;
   1968  1.74   msaitoh 			mp->m_len = len;
   1969   1.1   msaitoh 			mp->m_flags &= ~M_PKTHDR;
   1970   1.1   msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   1971   1.1   msaitoh 		} else {
   1972   1.1   msaitoh 			/*
   1973   1.1   msaitoh 			 * Optimize.  This might be a small packet,
   1974   1.1   msaitoh 			 * maybe just a TCP ACK.  Do a fast copy that
   1975   1.1   msaitoh 			 * is cache aligned into a new mbuf, and
   1976   1.1   msaitoh 			 * leave the old mbuf+cluster for re-use.
   1977   1.1   msaitoh 			 */
   1978   1.1   msaitoh 			if (eop && len <= IXGBE_RX_COPY_LEN) {
   1979   1.1   msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1980   1.1   msaitoh 				if (sendmp != NULL) {
   1981  1.28   msaitoh 					sendmp->m_data += IXGBE_RX_COPY_ALIGN;
   1982  1.28   msaitoh 					ixgbe_bcopy(mp->m_data, sendmp->m_data,
   1983  1.28   msaitoh 					    len);
   1984   1.1   msaitoh 					sendmp->m_len = len;
   1985   1.1   msaitoh 					rxr->rx_copies.ev_count++;
   1986   1.1   msaitoh 					rbuf->flags |= IXGBE_RX_COPY;
   1987  1.64  knakahar 
   1988  1.64  knakahar 					m_freem(newmp);
   1989   1.1   msaitoh 				}
   1990   1.1   msaitoh 			}
   1991   1.1   msaitoh 			if (sendmp == NULL) {
   1992  1.64  knakahar 				rbuf->buf = newmp;
   1993  1.64  knakahar 				rbuf->fmp = NULL;
   1994  1.74   msaitoh 				mp->m_len = len;
   1995   1.1   msaitoh 				sendmp = mp;
   1996   1.1   msaitoh 			}
   1997   1.1   msaitoh 
   1998   1.1   msaitoh 			/* first desc of a non-ps chain */
   1999   1.1   msaitoh 			sendmp->m_flags |= M_PKTHDR;
   2000  1.74   msaitoh 			sendmp->m_pkthdr.len = len;
   2001   1.1   msaitoh 		}
   2002   1.1   msaitoh 		++processed;
   2003   1.1   msaitoh 
   2004   1.1   msaitoh 		/* Pass the head pointer on */
   2005   1.1   msaitoh 		if (eop == 0) {
   2006   1.1   msaitoh 			nbuf->fmp = sendmp;
   2007   1.1   msaitoh 			sendmp = NULL;
   2008   1.1   msaitoh 			mp->m_next = nbuf->buf;
   2009   1.1   msaitoh 		} else { /* Sending this frame */
   2010   1.1   msaitoh 			m_set_rcvif(sendmp, ifp);
   2011  1.31   msaitoh 			++rxr->packets;
   2012   1.1   msaitoh 			rxr->rx_packets.ev_count++;
   2013   1.1   msaitoh 			/* capture data for AIM */
   2014   1.1   msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   2015   1.1   msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   2016   1.1   msaitoh 			/* Process vlan info */
   2017  1.28   msaitoh 			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
   2018   1.1   msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   2019   1.1   msaitoh 			if (vtag) {
   2020  1.29  knakahar 				vlan_set_tag(sendmp, vtag);
   2021   1.1   msaitoh 			}
   2022   1.1   msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   2023   1.1   msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   2024   1.3   msaitoh 				   &adapter->stats.pf);
   2025   1.1   msaitoh 			}
   2026   1.8   msaitoh 
   2027   1.6   msaitoh #if 0 /* FreeBSD */
   2028  1.28   msaitoh 			/*
   2029  1.28   msaitoh 			 * In case of multiqueue, we have RXCSUM.PCSD bit set
   2030  1.28   msaitoh 			 * and never cleared. This means we have RSS hash
   2031  1.28   msaitoh 			 * available to be used.
   2032  1.28   msaitoh 			 */
   2033  1.28   msaitoh 			if (adapter->num_queues > 1) {
   2034  1.28   msaitoh 				sendmp->m_pkthdr.flowid =
   2035  1.28   msaitoh 				    le32toh(cur->wb.lower.hi_dword.rss);
   2036  1.44   msaitoh 				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   2037  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4:
   2038  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2039  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV4);
   2040  1.28   msaitoh 					break;
   2041  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   2042  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2043  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV4);
   2044  1.28   msaitoh 					break;
   2045  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6:
   2046  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2047  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6);
   2048  1.28   msaitoh 					break;
   2049  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   2050  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2051  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6);
   2052  1.28   msaitoh 					break;
   2053  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   2054  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2055  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6_EX);
   2056  1.28   msaitoh 					break;
   2057  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   2058  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2059  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6_EX);
   2060  1.28   msaitoh 					break;
   2061   1.6   msaitoh #if __FreeBSD_version > 1100000
   2062  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   2063  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2064  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV4);
   2065  1.28   msaitoh 					break;
   2066  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   2067  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2068  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6);
   2069  1.28   msaitoh 					break;
   2070  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   2071  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2072  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6_EX);
   2073  1.28   msaitoh 					break;
   2074  1.28   msaitoh #endif
   2075  1.44   msaitoh 				default:
   2076  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2077  1.28   msaitoh 					    M_HASHTYPE_OPAQUE_HASH);
   2078  1.28   msaitoh 				}
   2079  1.28   msaitoh 			} else {
   2080  1.28   msaitoh 				sendmp->m_pkthdr.flowid = que->msix;
   2081   1.1   msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   2082   1.1   msaitoh 			}
   2083   1.8   msaitoh #endif
   2084   1.1   msaitoh 		}
   2085   1.1   msaitoh next_desc:
   2086   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   2087   1.1   msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2088   1.1   msaitoh 
   2089   1.1   msaitoh 		/* Advance our pointers to the next descriptor. */
   2090   1.1   msaitoh 		if (++i == rxr->num_desc)
   2091   1.1   msaitoh 			i = 0;
   2092   1.1   msaitoh 
   2093   1.1   msaitoh 		/* Now send to the stack or do LRO */
   2094   1.1   msaitoh 		if (sendmp != NULL) {
   2095   1.1   msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   2096   1.1   msaitoh 		}
   2097   1.1   msaitoh 
   2098  1.28   msaitoh 		/* Every 8 descriptors we go to refresh mbufs */
   2099   1.1   msaitoh 		if (processed == 8) {
   2100   1.1   msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   2101   1.1   msaitoh 			processed = 0;
   2102   1.1   msaitoh 		}
   2103   1.1   msaitoh 	}
   2104   1.1   msaitoh 
   2105   1.1   msaitoh 	/* Refresh any remaining buf structs */
   2106   1.1   msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   2107   1.1   msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   2108   1.1   msaitoh 
   2109   1.1   msaitoh 	rxr->next_to_check = i;
   2110   1.1   msaitoh 
   2111  1.28   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   2112  1.28   msaitoh 
   2113   1.1   msaitoh #ifdef LRO
   2114   1.1   msaitoh 	/*
   2115   1.1   msaitoh 	 * Flush any outstanding LRO work
   2116   1.1   msaitoh 	 */
   2117  1.10   msaitoh 	tcp_lro_flush_all(lro);
   2118   1.1   msaitoh #endif /* LRO */
   2119   1.1   msaitoh 
   2120   1.1   msaitoh 	/*
   2121  1.28   msaitoh 	 * Still have cleaning to do?
   2122  1.28   msaitoh 	 */
   2123   1.1   msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   2124  1.28   msaitoh 		return (TRUE);
   2125  1.28   msaitoh 
   2126  1.28   msaitoh 	return (FALSE);
   2127  1.28   msaitoh } /* ixgbe_rxeof */
   2128   1.1   msaitoh 
   2129   1.1   msaitoh 
   2130  1.28   msaitoh /************************************************************************
   2131  1.28   msaitoh  * ixgbe_rx_checksum
   2132   1.1   msaitoh  *
   2133  1.28   msaitoh  *   Verify that the hardware indicated that the checksum is valid.
   2134  1.28   msaitoh  *   Inform the stack about the status of checksum so that stack
   2135  1.28   msaitoh  *   doesn't spend time verifying the checksum.
   2136  1.28   msaitoh  ************************************************************************/
   2137   1.1   msaitoh static void
   2138   1.1   msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2139   1.1   msaitoh     struct ixgbe_hw_stats *stats)
   2140   1.1   msaitoh {
   2141  1.28   msaitoh 	u16  status = (u16)staterr;
   2142  1.28   msaitoh 	u8   errors = (u8)(staterr >> 24);
   2143   1.1   msaitoh #if 0
   2144  1.28   msaitoh 	bool sctp = false;
   2145   1.1   msaitoh 
   2146   1.1   msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2147   1.1   msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2148   1.8   msaitoh 		sctp = true;
   2149   1.1   msaitoh #endif
   2150   1.1   msaitoh 
   2151   1.8   msaitoh 	/* IPv4 checksum */
   2152   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2153   1.1   msaitoh 		stats->ipcs.ev_count++;
   2154   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2155   1.1   msaitoh 			/* IP Checksum Good */
   2156   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2157   1.1   msaitoh 		} else {
   2158   1.1   msaitoh 			stats->ipcs_bad.ev_count++;
   2159   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2160   1.1   msaitoh 		}
   2161   1.1   msaitoh 	}
   2162   1.8   msaitoh 	/* TCP/UDP/SCTP checksum */
   2163   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2164   1.1   msaitoh 		stats->l4cs.ev_count++;
   2165   1.1   msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2166   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2167   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2168   1.1   msaitoh 		} else {
   2169   1.1   msaitoh 			stats->l4cs_bad.ev_count++;
   2170   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2171   1.1   msaitoh 		}
   2172   1.1   msaitoh 	}
   2173  1.28   msaitoh } /* ixgbe_rx_checksum */
   2174   1.1   msaitoh 
   2175  1.28   msaitoh /************************************************************************
   2176  1.28   msaitoh  * ixgbe_dma_malloc
   2177  1.28   msaitoh  ************************************************************************/
   2178   1.1   msaitoh int
   2179   1.1   msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2180   1.1   msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2181   1.1   msaitoh {
   2182   1.1   msaitoh 	device_t dev = adapter->dev;
   2183  1.28   msaitoh 	int      r, rsegs;
   2184   1.1   msaitoh 
   2185  1.28   msaitoh 	r = ixgbe_dma_tag_create(
   2186  1.28   msaitoh 	     /*      parent */ adapter->osdep.dmat,
   2187  1.28   msaitoh 	     /*   alignment */ DBA_ALIGN,
   2188  1.28   msaitoh 	     /*      bounds */ 0,
   2189  1.28   msaitoh 	     /*     maxsize */ size,
   2190  1.28   msaitoh 	     /*   nsegments */ 1,
   2191  1.28   msaitoh 	     /*  maxsegsize */ size,
   2192  1.28   msaitoh 	     /*       flags */ BUS_DMA_ALLOCNOW,
   2193   1.1   msaitoh 			       &dma->dma_tag);
   2194   1.1   msaitoh 	if (r != 0) {
   2195   1.1   msaitoh 		aprint_error_dev(dev,
   2196  1.44   msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
   2197  1.44   msaitoh 		    r);
   2198   1.1   msaitoh 		goto fail_0;
   2199   1.1   msaitoh 	}
   2200   1.1   msaitoh 
   2201  1.28   msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
   2202  1.28   msaitoh 	    dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
   2203  1.28   msaitoh 	    &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2204   1.1   msaitoh 	if (r != 0) {
   2205   1.1   msaitoh 		aprint_error_dev(dev,
   2206   1.1   msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2207   1.1   msaitoh 		goto fail_1;
   2208   1.1   msaitoh 	}
   2209   1.1   msaitoh 
   2210   1.1   msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2211   1.1   msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2212   1.1   msaitoh 	if (r != 0) {
   2213   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2214   1.1   msaitoh 		    __func__, r);
   2215   1.1   msaitoh 		goto fail_2;
   2216   1.1   msaitoh 	}
   2217   1.1   msaitoh 
   2218   1.1   msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2219   1.1   msaitoh 	if (r != 0) {
   2220   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2221   1.1   msaitoh 		    __func__, r);
   2222   1.1   msaitoh 		goto fail_3;
   2223   1.1   msaitoh 	}
   2224   1.1   msaitoh 
   2225  1.28   msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
   2226  1.28   msaitoh 	    dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
   2227   1.1   msaitoh 	if (r != 0) {
   2228   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2229   1.1   msaitoh 		    __func__, r);
   2230   1.1   msaitoh 		goto fail_4;
   2231   1.1   msaitoh 	}
   2232   1.1   msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2233   1.1   msaitoh 	dma->dma_size = size;
   2234   1.1   msaitoh 	return 0;
   2235   1.1   msaitoh fail_4:
   2236   1.1   msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2237   1.1   msaitoh fail_3:
   2238   1.1   msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2239   1.1   msaitoh fail_2:
   2240   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2241   1.1   msaitoh fail_1:
   2242   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2243   1.1   msaitoh fail_0:
   2244   1.1   msaitoh 
   2245  1.28   msaitoh 	return (r);
   2246  1.28   msaitoh } /* ixgbe_dma_malloc */
   2247  1.28   msaitoh 
   2248  1.28   msaitoh /************************************************************************
   2249  1.28   msaitoh  * ixgbe_dma_free
   2250  1.28   msaitoh  ************************************************************************/
   2251   1.3   msaitoh void
   2252   1.1   msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2253   1.1   msaitoh {
   2254   1.1   msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2255   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2256   1.1   msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2257   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2258   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2259  1.28   msaitoh } /* ixgbe_dma_free */
   2260   1.1   msaitoh 
   2261   1.1   msaitoh 
   2262  1.28   msaitoh /************************************************************************
   2263  1.28   msaitoh  * ixgbe_allocate_queues
   2264   1.1   msaitoh  *
   2265  1.28   msaitoh  *   Allocate memory for the transmit and receive rings, and then
   2266  1.28   msaitoh  *   the descriptors associated with each, called only once at attach.
   2267  1.28   msaitoh  ************************************************************************/
   2268   1.1   msaitoh int
   2269   1.1   msaitoh ixgbe_allocate_queues(struct adapter *adapter)
   2270   1.1   msaitoh {
   2271   1.1   msaitoh 	device_t	dev = adapter->dev;
   2272   1.1   msaitoh 	struct ix_queue	*que;
   2273   1.1   msaitoh 	struct tx_ring	*txr;
   2274   1.1   msaitoh 	struct rx_ring	*rxr;
   2275  1.28   msaitoh 	int             rsize, tsize, error = IXGBE_SUCCESS;
   2276  1.28   msaitoh 	int             txconf = 0, rxconf = 0;
   2277   1.1   msaitoh 
   2278  1.28   msaitoh 	/* First, allocate the top level queue structs */
   2279  1.28   msaitoh 	adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
   2280  1.63   msaitoh 	    adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2281   1.1   msaitoh 
   2282  1.28   msaitoh 	/* Second, allocate the TX ring struct memory */
   2283  1.57       chs 	adapter->tx_rings = malloc(sizeof(struct tx_ring) *
   2284  1.57       chs 	    adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2285   1.1   msaitoh 
   2286  1.28   msaitoh 	/* Third, allocate the RX ring */
   2287  1.28   msaitoh 	adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
   2288  1.57       chs 	    adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO);
   2289   1.1   msaitoh 
   2290   1.1   msaitoh 	/* For the ring itself */
   2291  1.28   msaitoh 	tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
   2292  1.28   msaitoh 	    DBA_ALIGN);
   2293   1.1   msaitoh 
   2294   1.1   msaitoh 	/*
   2295   1.1   msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2296   1.1   msaitoh 	 * possibility that things fail midcourse and we need to
   2297   1.1   msaitoh 	 * undo memory gracefully
   2298  1.28   msaitoh 	 */
   2299   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2300   1.1   msaitoh 		/* Set up some basics */
   2301   1.1   msaitoh 		txr = &adapter->tx_rings[i];
   2302   1.1   msaitoh 		txr->adapter = adapter;
   2303  1.28   msaitoh 		txr->txr_interq = NULL;
   2304  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2305   1.5   msaitoh #ifdef PCI_IOV
   2306  1.28   msaitoh 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2307  1.28   msaitoh 		    i);
   2308   1.5   msaitoh #else
   2309   1.1   msaitoh 		txr->me = i;
   2310   1.5   msaitoh #endif
   2311   1.1   msaitoh 		txr->num_desc = adapter->num_tx_desc;
   2312   1.1   msaitoh 
   2313   1.1   msaitoh 		/* Initialize the TX side lock */
   2314   1.1   msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2315   1.1   msaitoh 
   2316  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
   2317  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2318   1.1   msaitoh 			aprint_error_dev(dev,
   2319   1.1   msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2320   1.1   msaitoh 			error = ENOMEM;
   2321   1.1   msaitoh 			goto err_tx_desc;
   2322   1.1   msaitoh 		}
   2323   1.1   msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2324   1.1   msaitoh 		bzero((void *)txr->tx_base, tsize);
   2325   1.1   msaitoh 
   2326  1.28   msaitoh 		/* Now allocate transmit buffers for the ring */
   2327  1.28   msaitoh 		if (ixgbe_allocate_transmit_buffers(txr)) {
   2328   1.1   msaitoh 			aprint_error_dev(dev,
   2329   1.1   msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2330   1.1   msaitoh 			error = ENOMEM;
   2331   1.1   msaitoh 			goto err_tx_desc;
   2332  1.63   msaitoh 		}
   2333  1.28   msaitoh 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   2334  1.28   msaitoh 			/* Allocate a buf ring */
   2335  1.28   msaitoh 			txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
   2336  1.28   msaitoh 			if (txr->txr_interq == NULL) {
   2337  1.28   msaitoh 				aprint_error_dev(dev,
   2338  1.28   msaitoh 				    "Critical Failure setting up buf ring\n");
   2339  1.28   msaitoh 				error = ENOMEM;
   2340  1.28   msaitoh 				goto err_tx_desc;
   2341  1.28   msaitoh 			}
   2342  1.28   msaitoh 		}
   2343   1.1   msaitoh 	}
   2344   1.1   msaitoh 
   2345   1.1   msaitoh 	/*
   2346   1.1   msaitoh 	 * Next the RX queues...
   2347  1.53   msaitoh 	 */
   2348  1.28   msaitoh 	rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
   2349  1.28   msaitoh 	    DBA_ALIGN);
   2350   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2351   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   2352   1.1   msaitoh 		/* Set up some basics */
   2353   1.1   msaitoh 		rxr->adapter = adapter;
   2354   1.5   msaitoh #ifdef PCI_IOV
   2355  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2356  1.28   msaitoh 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2357  1.28   msaitoh 		    i);
   2358   1.5   msaitoh #else
   2359   1.1   msaitoh 		rxr->me = i;
   2360   1.5   msaitoh #endif
   2361   1.1   msaitoh 		rxr->num_desc = adapter->num_rx_desc;
   2362   1.1   msaitoh 
   2363   1.1   msaitoh 		/* Initialize the RX side lock */
   2364   1.1   msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2365   1.1   msaitoh 
   2366  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
   2367  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2368   1.1   msaitoh 			aprint_error_dev(dev,
   2369   1.1   msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2370   1.1   msaitoh 			error = ENOMEM;
   2371   1.1   msaitoh 			goto err_rx_desc;
   2372   1.1   msaitoh 		}
   2373   1.1   msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2374   1.1   msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2375   1.1   msaitoh 
   2376  1.28   msaitoh 		/* Allocate receive buffers for the ring */
   2377   1.1   msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2378   1.1   msaitoh 			aprint_error_dev(dev,
   2379   1.1   msaitoh 			    "Critical Failure setting up receive buffers\n");
   2380   1.1   msaitoh 			error = ENOMEM;
   2381   1.1   msaitoh 			goto err_rx_desc;
   2382   1.1   msaitoh 		}
   2383   1.1   msaitoh 	}
   2384   1.1   msaitoh 
   2385   1.1   msaitoh 	/*
   2386  1.28   msaitoh 	 * Finally set up the queue holding structs
   2387  1.28   msaitoh 	 */
   2388   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++) {
   2389   1.1   msaitoh 		que = &adapter->queues[i];
   2390   1.1   msaitoh 		que->adapter = adapter;
   2391   1.3   msaitoh 		que->me = i;
   2392   1.1   msaitoh 		que->txr = &adapter->tx_rings[i];
   2393   1.1   msaitoh 		que->rxr = &adapter->rx_rings[i];
   2394  1.33  knakahar 
   2395  1.37  knakahar 		mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
   2396  1.37  knakahar 		que->disabled_count = 0;
   2397   1.1   msaitoh 	}
   2398   1.1   msaitoh 
   2399   1.1   msaitoh 	return (0);
   2400   1.1   msaitoh 
   2401   1.1   msaitoh err_rx_desc:
   2402   1.1   msaitoh 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2403   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2404   1.1   msaitoh err_tx_desc:
   2405   1.1   msaitoh 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2406   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
   2407   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   2408   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
   2409   1.1   msaitoh 	free(adapter->queues, M_DEVBUF);
   2410   1.1   msaitoh 	return (error);
   2411  1.28   msaitoh } /* ixgbe_allocate_queues */
   2412  1.60   msaitoh 
   2413  1.60   msaitoh /************************************************************************
   2414  1.60   msaitoh  * ixgbe_free_queues
   2415  1.60   msaitoh  *
   2416  1.60   msaitoh  *   Free descriptors for the transmit and receive rings, and then
   2417  1.60   msaitoh  *   the memory associated with each.
   2418  1.60   msaitoh  ************************************************************************/
   2419  1.60   msaitoh void
   2420  1.60   msaitoh ixgbe_free_queues(struct adapter *adapter)
   2421  1.60   msaitoh {
   2422  1.60   msaitoh 	struct ix_queue *que;
   2423  1.60   msaitoh 	int i;
   2424  1.60   msaitoh 
   2425  1.60   msaitoh 	ixgbe_free_transmit_structures(adapter);
   2426  1.60   msaitoh 	ixgbe_free_receive_structures(adapter);
   2427  1.60   msaitoh 	for (i = 0; i < adapter->num_queues; i++) {
   2428  1.60   msaitoh 		que = &adapter->queues[i];
   2429  1.60   msaitoh 		mutex_destroy(&que->dc_mtx);
   2430  1.60   msaitoh 	}
   2431  1.60   msaitoh 	free(adapter->queues, M_DEVBUF);
   2432  1.60   msaitoh } /* ixgbe_free_queues */
   2433