Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.44
      1  1.44   msaitoh /* $NetBSD: ix_txrx.c,v 1.44 2018/05/16 08:08:24 msaitoh Exp $ */
      2  1.28   msaitoh 
      3   1.1   msaitoh /******************************************************************************
      4   1.1   msaitoh 
      5  1.28   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      6   1.1   msaitoh   All rights reserved.
      7  1.28   msaitoh 
      8  1.28   msaitoh   Redistribution and use in source and binary forms, with or without
      9   1.1   msaitoh   modification, are permitted provided that the following conditions are met:
     10  1.28   msaitoh 
     11  1.28   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     12   1.1   msaitoh       this list of conditions and the following disclaimer.
     13  1.28   msaitoh 
     14  1.28   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     15  1.28   msaitoh       notice, this list of conditions and the following disclaimer in the
     16   1.1   msaitoh       documentation and/or other materials provided with the distribution.
     17  1.28   msaitoh 
     18  1.28   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     19  1.28   msaitoh       contributors may be used to endorse or promote products derived from
     20   1.1   msaitoh       this software without specific prior written permission.
     21  1.28   msaitoh 
     22   1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23  1.28   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  1.28   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  1.28   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26  1.28   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  1.28   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  1.28   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  1.28   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  1.28   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     33   1.1   msaitoh 
     34   1.1   msaitoh ******************************************************************************/
     35  1.39   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
     36  1.28   msaitoh 
     37   1.1   msaitoh /*
     38   1.1   msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39   1.1   msaitoh  * All rights reserved.
     40   1.1   msaitoh  *
     41   1.1   msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     42   1.1   msaitoh  * by Coyote Point Systems, Inc.
     43   1.1   msaitoh  *
     44   1.1   msaitoh  * Redistribution and use in source and binary forms, with or without
     45   1.1   msaitoh  * modification, are permitted provided that the following conditions
     46   1.1   msaitoh  * are met:
     47   1.1   msaitoh  * 1. Redistributions of source code must retain the above copyright
     48   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer.
     49   1.1   msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer in the
     51   1.1   msaitoh  *    documentation and/or other materials provided with the distribution.
     52   1.1   msaitoh  *
     53   1.1   msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54   1.1   msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55   1.1   msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56   1.1   msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57   1.1   msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58   1.1   msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59   1.1   msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60   1.1   msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61   1.1   msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62   1.1   msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63   1.1   msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     64   1.1   msaitoh  */
     65   1.1   msaitoh 
     66   1.8   msaitoh #include "opt_inet.h"
     67   1.8   msaitoh #include "opt_inet6.h"
     68   1.8   msaitoh 
     69   1.1   msaitoh #include "ixgbe.h"
     70   1.1   msaitoh 
     71   1.1   msaitoh /*
     72  1.28   msaitoh  * HW RSC control:
     73  1.28   msaitoh  *  this feature only works with
     74  1.28   msaitoh  *  IPv4, and only on 82599 and later.
     75  1.28   msaitoh  *  Also this will cause IP forwarding to
     76  1.28   msaitoh  *  fail and that can't be controlled by
     77  1.28   msaitoh  *  the stack as LRO can. For all these
     78  1.28   msaitoh  *  reasons I've deemed it best to leave
     79  1.28   msaitoh  *  this off and not bother with a tuneable
     80  1.28   msaitoh  *  interface, this would need to be compiled
     81  1.28   msaitoh  *  to enable.
     82  1.28   msaitoh  */
     83   1.1   msaitoh static bool ixgbe_rsc_enable = FALSE;
     84   1.1   msaitoh 
     85   1.3   msaitoh /*
     86  1.28   msaitoh  * For Flow Director: this is the
     87  1.28   msaitoh  * number of TX packets we sample
     88  1.28   msaitoh  * for the filter pool, this means
     89  1.28   msaitoh  * every 20th packet will be probed.
     90  1.28   msaitoh  *
     91  1.28   msaitoh  * This feature can be disabled by
     92  1.28   msaitoh  * setting this to 0.
     93  1.28   msaitoh  */
     94   1.3   msaitoh static int atr_sample_rate = 20;
     95   1.3   msaitoh 
     96  1.28   msaitoh /************************************************************************
     97   1.3   msaitoh  *  Local Function prototypes
     98  1.28   msaitoh  ************************************************************************/
     99  1.28   msaitoh static void          ixgbe_setup_transmit_ring(struct tx_ring *);
    100  1.28   msaitoh static void          ixgbe_free_transmit_buffers(struct tx_ring *);
    101  1.28   msaitoh static int           ixgbe_setup_receive_ring(struct rx_ring *);
    102  1.28   msaitoh static void          ixgbe_free_receive_buffers(struct rx_ring *);
    103  1.28   msaitoh static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
    104  1.28   msaitoh                                        struct ixgbe_hw_stats *);
    105  1.28   msaitoh static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
    106  1.38  knakahar static void          ixgbe_drain(struct ifnet *, struct tx_ring *);
    107  1.28   msaitoh static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
    108  1.28   msaitoh static int           ixgbe_tx_ctx_setup(struct tx_ring *,
    109  1.28   msaitoh                                         struct mbuf *, u32 *, u32 *);
    110  1.28   msaitoh static int           ixgbe_tso_setup(struct tx_ring *,
    111  1.28   msaitoh                                      struct mbuf *, u32 *, u32 *);
    112   1.1   msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    113   1.1   msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    114  1.28   msaitoh                                     struct mbuf *, u32);
    115  1.28   msaitoh static int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
    116  1.28   msaitoh                                       struct ixgbe_dma_alloc *, int);
    117  1.28   msaitoh static void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    118   1.1   msaitoh 
    119   1.1   msaitoh static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    120   1.1   msaitoh 
    121  1.28   msaitoh /************************************************************************
    122  1.28   msaitoh  * ixgbe_legacy_start_locked - Transmit entry point
    123   1.1   msaitoh  *
    124  1.28   msaitoh  *   Called by the stack to initiate a transmit.
    125  1.28   msaitoh  *   The driver will remain in this routine as long as there are
    126  1.28   msaitoh  *   packets to transmit and transmit resources are available.
    127  1.28   msaitoh  *   In case resources are not available, the stack is notified
    128  1.28   msaitoh  *   and the packet is requeued.
    129  1.28   msaitoh  ************************************************************************/
    130  1.28   msaitoh int
    131  1.28   msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    132   1.1   msaitoh {
    133   1.1   msaitoh 	struct mbuf    *m_head;
    134   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    135  1.42   msaitoh 	int enqueued = 0;
    136  1.42   msaitoh 	int rc;
    137   1.1   msaitoh 
    138   1.1   msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    139   1.1   msaitoh 
    140  1.38  knakahar 	if (!adapter->link_active) {
    141  1.38  knakahar 		/*
    142  1.38  knakahar 		 * discard all packets buffered in IFQ to avoid
    143  1.38  knakahar 		 * sending old packets at next link up timing.
    144  1.38  knakahar 		 */
    145  1.38  knakahar 		ixgbe_drain(ifp, txr);
    146  1.38  knakahar 		return (ENETDOWN);
    147  1.38  knakahar 	}
    148   1.1   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    149  1.28   msaitoh 		return (ENETDOWN);
    150   1.1   msaitoh 
    151   1.1   msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    152   1.1   msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    153   1.1   msaitoh 			break;
    154   1.1   msaitoh 
    155   1.1   msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    156   1.1   msaitoh 		if (m_head == NULL)
    157   1.1   msaitoh 			break;
    158   1.1   msaitoh 
    159   1.1   msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    160   1.1   msaitoh 			break;
    161   1.1   msaitoh 		}
    162  1.42   msaitoh 		enqueued++;
    163   1.1   msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    164   1.1   msaitoh 		if (rc != 0) {
    165   1.1   msaitoh 			m_freem(m_head);
    166   1.1   msaitoh 			continue;
    167   1.1   msaitoh 		}
    168   1.1   msaitoh 
    169   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    170   1.1   msaitoh 		bpf_mtap(ifp, m_head);
    171   1.1   msaitoh 	}
    172  1.44   msaitoh 
    173  1.42   msaitoh 	if (enqueued) {
    174  1.42   msaitoh 		txr->lastsent = time_uptime;
    175  1.42   msaitoh 		txr->sending = true;
    176  1.42   msaitoh 	}
    177   1.1   msaitoh 
    178  1.28   msaitoh 	return IXGBE_SUCCESS;
    179  1.28   msaitoh } /* ixgbe_legacy_start_locked */
    180  1.28   msaitoh 
    181  1.28   msaitoh /************************************************************************
    182  1.28   msaitoh  * ixgbe_legacy_start
    183  1.28   msaitoh  *
    184  1.28   msaitoh  *   Called by the stack, this always uses the first tx ring,
    185  1.28   msaitoh  *   and should not be used with multiqueue tx enabled.
    186  1.28   msaitoh  ************************************************************************/
    187   1.1   msaitoh void
    188  1.28   msaitoh ixgbe_legacy_start(struct ifnet *ifp)
    189   1.1   msaitoh {
    190   1.1   msaitoh 	struct adapter *adapter = ifp->if_softc;
    191  1.28   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    192   1.1   msaitoh 
    193   1.1   msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    194   1.1   msaitoh 		IXGBE_TX_LOCK(txr);
    195  1.28   msaitoh 		ixgbe_legacy_start_locked(ifp, txr);
    196   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    197   1.1   msaitoh 	}
    198  1.28   msaitoh } /* ixgbe_legacy_start */
    199   1.1   msaitoh 
    200  1.28   msaitoh /************************************************************************
    201  1.28   msaitoh  * ixgbe_mq_start - Multiqueue Transmit Entry Point
    202  1.28   msaitoh  *
    203  1.28   msaitoh  *   (if_transmit function)
    204  1.28   msaitoh  ************************************************************************/
    205   1.1   msaitoh int
    206   1.1   msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    207   1.1   msaitoh {
    208   1.1   msaitoh 	struct adapter	*adapter = ifp->if_softc;
    209   1.1   msaitoh 	struct tx_ring	*txr;
    210   1.1   msaitoh 	int 		i, err = 0;
    211  1.28   msaitoh #ifdef RSS
    212   1.1   msaitoh 	uint32_t bucket_id;
    213   1.1   msaitoh #endif
    214   1.1   msaitoh 
    215   1.1   msaitoh 	/*
    216   1.1   msaitoh 	 * When doing RSS, map it to the same outbound queue
    217   1.1   msaitoh 	 * as the incoming flow would be mapped to.
    218   1.1   msaitoh 	 *
    219   1.1   msaitoh 	 * If everything is setup correctly, it should be the
    220   1.1   msaitoh 	 * same bucket that the current CPU we're on is.
    221   1.1   msaitoh 	 */
    222  1.28   msaitoh #ifdef RSS
    223   1.1   msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    224  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
    225  1.28   msaitoh 		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
    226  1.28   msaitoh 		    &bucket_id) == 0)) {
    227   1.1   msaitoh 			i = bucket_id % adapter->num_queues;
    228   1.8   msaitoh #ifdef IXGBE_DEBUG
    229   1.8   msaitoh 			if (bucket_id > adapter->num_queues)
    230  1.28   msaitoh 				if_printf(ifp,
    231  1.28   msaitoh 				    "bucket_id (%d) > num_queues (%d)\n",
    232  1.28   msaitoh 				    bucket_id, adapter->num_queues);
    233   1.8   msaitoh #endif
    234   1.8   msaitoh 		} else
    235   1.1   msaitoh 			i = m->m_pkthdr.flowid % adapter->num_queues;
    236   1.3   msaitoh 	} else
    237  1.28   msaitoh #endif /* 0 */
    238  1.18   msaitoh 		i = cpu_index(curcpu()) % adapter->num_queues;
    239   1.3   msaitoh 
    240   1.3   msaitoh 	/* Check for a hung queue and pick alternative */
    241   1.3   msaitoh 	if (((1 << i) & adapter->active_queues) == 0)
    242  1.18   msaitoh 		i = ffs64(adapter->active_queues);
    243   1.1   msaitoh 
    244   1.1   msaitoh 	txr = &adapter->tx_rings[i];
    245   1.1   msaitoh 
    246  1.18   msaitoh 	err = pcq_put(txr->txr_interq, m);
    247  1.18   msaitoh 	if (err == false) {
    248  1.18   msaitoh 		m_freem(m);
    249  1.18   msaitoh 		txr->pcq_drops.ev_count++;
    250   1.1   msaitoh 		return (err);
    251  1.18   msaitoh 	}
    252   1.1   msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    253   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    254   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    255  1.34  knakahar 	} else {
    256  1.34  knakahar 		if (adapter->txrx_use_workqueue) {
    257  1.44   msaitoh 			u_int *enqueued;
    258  1.44   msaitoh 
    259  1.34  knakahar 			/*
    260  1.34  knakahar 			 * This function itself is not called in interrupt
    261  1.34  knakahar 			 * context, however it can be called in fast softint
    262  1.34  knakahar 			 * context right after receiving forwarding packets.
    263  1.34  knakahar 			 * So, it is required to protect workqueue from twice
    264  1.34  knakahar 			 * enqueuing when the machine uses both spontaneous
    265  1.34  knakahar 			 * packets and forwarding packets.
    266  1.34  knakahar 			 */
    267  1.44   msaitoh 			enqueued = percpu_getref(adapter->txr_wq_enqueued);
    268  1.34  knakahar 			if (*enqueued == 0) {
    269  1.34  knakahar 				*enqueued = 1;
    270  1.34  knakahar 				percpu_putref(adapter->txr_wq_enqueued);
    271  1.44   msaitoh 				workqueue_enqueue(adapter->txr_wq,
    272  1.44   msaitoh 				    &txr->wq_cookie, curcpu());
    273  1.34  knakahar 			} else
    274  1.34  knakahar 				percpu_putref(adapter->txr_wq_enqueued);
    275  1.34  knakahar 		} else
    276  1.34  knakahar 			softint_schedule(txr->txr_si);
    277  1.34  knakahar 	}
    278   1.1   msaitoh 
    279   1.1   msaitoh 	return (0);
    280  1.28   msaitoh } /* ixgbe_mq_start */
    281   1.1   msaitoh 
    282  1.28   msaitoh /************************************************************************
    283  1.28   msaitoh  * ixgbe_mq_start_locked
    284  1.28   msaitoh  ************************************************************************/
    285   1.1   msaitoh int
    286   1.1   msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    287   1.1   msaitoh {
    288  1.28   msaitoh 	struct mbuf    *next;
    289  1.28   msaitoh 	int            enqueued = 0, err = 0;
    290   1.1   msaitoh 
    291  1.38  knakahar 	if (!txr->adapter->link_active) {
    292  1.38  knakahar 		/*
    293  1.38  knakahar 		 * discard all packets buffered in txr_interq to avoid
    294  1.38  knakahar 		 * sending old packets at next link up timing.
    295  1.38  knakahar 		 */
    296  1.38  knakahar 		ixgbe_drain(ifp, txr);
    297  1.38  knakahar 		return (ENETDOWN);
    298  1.38  knakahar 	}
    299  1.28   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    300  1.28   msaitoh 		return (ENETDOWN);
    301   1.1   msaitoh 
    302   1.1   msaitoh 	/* Process the queue */
    303  1.18   msaitoh 	while ((next = pcq_get(txr->txr_interq)) != NULL) {
    304  1.18   msaitoh 		if ((err = ixgbe_xmit(txr, next)) != 0) {
    305  1.18   msaitoh 			m_freem(next);
    306  1.18   msaitoh 			/* All errors are counted in ixgbe_xmit() */
    307   1.1   msaitoh 			break;
    308   1.1   msaitoh 		}
    309   1.1   msaitoh 		enqueued++;
    310   1.3   msaitoh #if __FreeBSD_version >= 1100036
    311   1.4   msaitoh 		/*
    312   1.4   msaitoh 		 * Since we're looking at the tx ring, we can check
    313   1.4   msaitoh 		 * to see if we're a VF by examing our tail register
    314   1.4   msaitoh 		 * address.
    315   1.4   msaitoh 		 */
    316  1.28   msaitoh 		if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
    317  1.28   msaitoh 		    (next->m_flags & M_MCAST))
    318   1.3   msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    319   1.3   msaitoh #endif
    320   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    321   1.1   msaitoh 		bpf_mtap(ifp, next);
    322   1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    323   1.1   msaitoh 			break;
    324   1.1   msaitoh 	}
    325   1.1   msaitoh 
    326  1.42   msaitoh 	if (enqueued) {
    327  1.42   msaitoh 		txr->lastsent = time_uptime;
    328  1.42   msaitoh 		txr->sending = true;
    329  1.42   msaitoh 	}
    330  1.42   msaitoh 
    331  1.28   msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
    332   1.1   msaitoh 		ixgbe_txeof(txr);
    333   1.1   msaitoh 
    334   1.1   msaitoh 	return (err);
    335  1.28   msaitoh } /* ixgbe_mq_start_locked */
    336   1.1   msaitoh 
    337  1.28   msaitoh /************************************************************************
    338  1.28   msaitoh  * ixgbe_deferred_mq_start
    339  1.28   msaitoh  *
    340  1.34  knakahar  *   Called from a softint and workqueue (indirectly) to drain queued
    341  1.34  knakahar  *   transmit packets.
    342  1.28   msaitoh  ************************************************************************/
    343   1.1   msaitoh void
    344  1.18   msaitoh ixgbe_deferred_mq_start(void *arg)
    345   1.1   msaitoh {
    346   1.1   msaitoh 	struct tx_ring *txr = arg;
    347   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    348  1.28   msaitoh 	struct ifnet   *ifp = adapter->ifp;
    349   1.1   msaitoh 
    350   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    351  1.18   msaitoh 	if (pcq_peek(txr->txr_interq) != NULL)
    352   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    353   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    354  1.28   msaitoh } /* ixgbe_deferred_mq_start */
    355   1.3   msaitoh 
    356  1.28   msaitoh /************************************************************************
    357  1.34  knakahar  * ixgbe_deferred_mq_start_work
    358  1.34  knakahar  *
    359  1.34  knakahar  *   Called from a workqueue to drain queued transmit packets.
    360  1.34  knakahar  ************************************************************************/
    361  1.34  knakahar void
    362  1.34  knakahar ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
    363  1.34  knakahar {
    364  1.34  knakahar 	struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
    365  1.34  knakahar 	struct adapter *adapter = txr->adapter;
    366  1.34  knakahar 	u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
    367  1.34  knakahar 	*enqueued = 0;
    368  1.34  knakahar 	percpu_putref(adapter->txr_wq_enqueued);
    369  1.34  knakahar 
    370  1.34  knakahar 	ixgbe_deferred_mq_start(txr);
    371  1.34  knakahar } /* ixgbe_deferred_mq_start */
    372  1.34  knakahar 
    373  1.38  knakahar /************************************************************************
    374  1.38  knakahar  * ixgbe_drain_all
    375  1.38  knakahar  ************************************************************************/
    376  1.38  knakahar void
    377  1.38  knakahar ixgbe_drain_all(struct adapter *adapter)
    378  1.38  knakahar {
    379  1.38  knakahar 	struct ifnet *ifp = adapter->ifp;
    380  1.38  knakahar 	struct ix_queue *que = adapter->queues;
    381  1.38  knakahar 
    382  1.38  knakahar 	for (int i = 0; i < adapter->num_queues; i++, que++) {
    383  1.38  knakahar 		struct tx_ring  *txr = que->txr;
    384  1.38  knakahar 
    385  1.38  knakahar 		IXGBE_TX_LOCK(txr);
    386  1.38  knakahar 		ixgbe_drain(ifp, txr);
    387  1.38  knakahar 		IXGBE_TX_UNLOCK(txr);
    388  1.38  knakahar 	}
    389  1.38  knakahar }
    390  1.34  knakahar 
    391  1.34  knakahar /************************************************************************
    392  1.28   msaitoh  * ixgbe_xmit
    393   1.1   msaitoh  *
    394  1.28   msaitoh  *   Maps the mbufs to tx descriptors, allowing the
    395  1.28   msaitoh  *   TX engine to transmit the packets.
    396   1.1   msaitoh  *
    397  1.28   msaitoh  *   Return 0 on success, positive on failure
    398  1.28   msaitoh  ************************************************************************/
    399   1.1   msaitoh static int
    400   1.1   msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    401   1.1   msaitoh {
    402  1.28   msaitoh 	struct adapter          *adapter = txr->adapter;
    403  1.28   msaitoh 	struct ixgbe_tx_buf     *txbuf;
    404   1.1   msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    405  1.28   msaitoh 	struct ifnet	        *ifp = adapter->ifp;
    406  1.28   msaitoh 	int                     i, j, error;
    407  1.28   msaitoh 	int                     first;
    408  1.28   msaitoh 	u32                     olinfo_status = 0, cmd_type_len;
    409  1.28   msaitoh 	bool                    remap = TRUE;
    410  1.28   msaitoh 	bus_dmamap_t            map;
    411   1.1   msaitoh 
    412   1.1   msaitoh 	/* Basic descriptor defines */
    413  1.28   msaitoh 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    414   1.1   msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    415   1.1   msaitoh 
    416  1.29  knakahar 	if (vlan_has_tag(m_head))
    417  1.28   msaitoh 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    418   1.1   msaitoh 
    419  1.28   msaitoh 	/*
    420  1.28   msaitoh 	 * Important to capture the first descriptor
    421  1.28   msaitoh 	 * used because it will contain the index of
    422  1.28   msaitoh 	 * the one we tell the hardware to report back
    423  1.28   msaitoh 	 */
    424  1.28   msaitoh 	first = txr->next_avail_desc;
    425   1.1   msaitoh 	txbuf = &txr->tx_buffers[first];
    426   1.1   msaitoh 	map = txbuf->map;
    427   1.1   msaitoh 
    428   1.1   msaitoh 	/*
    429   1.1   msaitoh 	 * Map the packet for DMA.
    430   1.1   msaitoh 	 */
    431  1.22   msaitoh retry:
    432  1.28   msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
    433  1.28   msaitoh 	    BUS_DMA_NOWAIT);
    434   1.1   msaitoh 
    435   1.1   msaitoh 	if (__predict_false(error)) {
    436  1.22   msaitoh 		struct mbuf *m;
    437   1.1   msaitoh 
    438   1.1   msaitoh 		switch (error) {
    439   1.1   msaitoh 		case EAGAIN:
    440  1.35   msaitoh 			txr->q_eagain_tx_dma_setup++;
    441   1.1   msaitoh 			return EAGAIN;
    442   1.1   msaitoh 		case ENOMEM:
    443  1.35   msaitoh 			txr->q_enomem_tx_dma_setup++;
    444   1.1   msaitoh 			return EAGAIN;
    445   1.1   msaitoh 		case EFBIG:
    446  1.22   msaitoh 			/* Try it again? - one try */
    447  1.22   msaitoh 			if (remap == TRUE) {
    448  1.22   msaitoh 				remap = FALSE;
    449  1.22   msaitoh 				/*
    450  1.22   msaitoh 				 * XXX: m_defrag will choke on
    451  1.22   msaitoh 				 * non-MCLBYTES-sized clusters
    452  1.22   msaitoh 				 */
    453  1.35   msaitoh 				txr->q_efbig_tx_dma_setup++;
    454  1.22   msaitoh 				m = m_defrag(m_head, M_NOWAIT);
    455  1.22   msaitoh 				if (m == NULL) {
    456  1.35   msaitoh 					txr->q_mbuf_defrag_failed++;
    457  1.22   msaitoh 					return ENOBUFS;
    458  1.22   msaitoh 				}
    459  1.22   msaitoh 				m_head = m;
    460  1.22   msaitoh 				goto retry;
    461  1.22   msaitoh 			} else {
    462  1.35   msaitoh 				txr->q_efbig2_tx_dma_setup++;
    463  1.22   msaitoh 				return error;
    464  1.22   msaitoh 			}
    465   1.1   msaitoh 		case EINVAL:
    466  1.35   msaitoh 			txr->q_einval_tx_dma_setup++;
    467   1.1   msaitoh 			return error;
    468   1.1   msaitoh 		default:
    469  1.35   msaitoh 			txr->q_other_tx_dma_setup++;
    470   1.1   msaitoh 			return error;
    471   1.1   msaitoh 		}
    472   1.1   msaitoh 	}
    473   1.1   msaitoh 
    474   1.1   msaitoh 	/* Make certain there are enough descriptors */
    475  1.10   msaitoh 	if (txr->tx_avail < (map->dm_nsegs + 2)) {
    476   1.1   msaitoh 		txr->no_desc_avail.ev_count++;
    477   1.1   msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    478   1.1   msaitoh 		return EAGAIN;
    479   1.1   msaitoh 	}
    480   1.1   msaitoh 
    481   1.1   msaitoh 	/*
    482   1.4   msaitoh 	 * Set up the appropriate offload context
    483   1.4   msaitoh 	 * this will consume the first descriptor
    484   1.4   msaitoh 	 */
    485   1.1   msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    486   1.1   msaitoh 	if (__predict_false(error)) {
    487   1.1   msaitoh 		return (error);
    488   1.1   msaitoh 	}
    489   1.1   msaitoh 
    490   1.1   msaitoh 	/* Do the flow director magic */
    491  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
    492  1.28   msaitoh 	    (txr->atr_sample) && (!adapter->fdir_reinit)) {
    493   1.1   msaitoh 		++txr->atr_count;
    494   1.1   msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    495   1.1   msaitoh 			ixgbe_atr(txr, m_head);
    496   1.1   msaitoh 			txr->atr_count = 0;
    497   1.1   msaitoh 		}
    498   1.1   msaitoh 	}
    499   1.1   msaitoh 
    500   1.8   msaitoh 	olinfo_status |= IXGBE_ADVTXD_CC;
    501   1.1   msaitoh 	i = txr->next_avail_desc;
    502   1.1   msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    503   1.1   msaitoh 		bus_size_t seglen;
    504   1.1   msaitoh 		bus_addr_t segaddr;
    505   1.1   msaitoh 
    506   1.1   msaitoh 		txbuf = &txr->tx_buffers[i];
    507   1.1   msaitoh 		txd = &txr->tx_base[i];
    508   1.1   msaitoh 		seglen = map->dm_segs[j].ds_len;
    509   1.1   msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    510   1.1   msaitoh 
    511   1.1   msaitoh 		txd->read.buffer_addr = segaddr;
    512  1.40   msaitoh 		txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
    513   1.1   msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    514   1.1   msaitoh 
    515   1.1   msaitoh 		if (++i == txr->num_desc)
    516   1.1   msaitoh 			i = 0;
    517   1.1   msaitoh 	}
    518   1.1   msaitoh 
    519  1.28   msaitoh 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    520   1.1   msaitoh 	txr->tx_avail -= map->dm_nsegs;
    521   1.1   msaitoh 	txr->next_avail_desc = i;
    522   1.1   msaitoh 
    523   1.1   msaitoh 	txbuf->m_head = m_head;
    524   1.1   msaitoh 	/*
    525   1.4   msaitoh 	 * Here we swap the map so the last descriptor,
    526   1.4   msaitoh 	 * which gets the completion interrupt has the
    527   1.4   msaitoh 	 * real map, and the first descriptor gets the
    528   1.4   msaitoh 	 * unused map from this descriptor.
    529   1.4   msaitoh 	 */
    530   1.1   msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    531   1.1   msaitoh 	txbuf->map = map;
    532   1.1   msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    533   1.1   msaitoh 	    BUS_DMASYNC_PREWRITE);
    534   1.1   msaitoh 
    535  1.28   msaitoh 	/* Set the EOP descriptor that will be marked done */
    536  1.28   msaitoh 	txbuf = &txr->tx_buffers[first];
    537   1.1   msaitoh 	txbuf->eop = txd;
    538   1.1   msaitoh 
    539  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    540   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    541   1.1   msaitoh 	/*
    542   1.1   msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    543   1.1   msaitoh 	 * hardware that this frame is available to transmit.
    544   1.1   msaitoh 	 */
    545   1.1   msaitoh 	++txr->total_packets.ev_count;
    546   1.3   msaitoh 	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
    547   1.3   msaitoh 
    548  1.23   msaitoh 	/*
    549  1.23   msaitoh 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
    550  1.23   msaitoh 	 */
    551  1.23   msaitoh 	ifp->if_obytes += m_head->m_pkthdr.len;
    552  1.23   msaitoh 	if (m_head->m_flags & M_MCAST)
    553  1.23   msaitoh 		ifp->if_omcasts++;
    554  1.23   msaitoh 
    555  1.28   msaitoh 	return (0);
    556  1.28   msaitoh } /* ixgbe_xmit */
    557   1.1   msaitoh 
    558  1.38  knakahar /************************************************************************
    559  1.38  knakahar  * ixgbe_drain
    560  1.38  knakahar  ************************************************************************/
    561  1.38  knakahar static void
    562  1.38  knakahar ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
    563  1.38  knakahar {
    564  1.38  knakahar 	struct mbuf *m;
    565  1.38  knakahar 
    566  1.38  knakahar 	IXGBE_TX_LOCK_ASSERT(txr);
    567  1.38  knakahar 
    568  1.38  knakahar 	if (txr->me == 0) {
    569  1.38  knakahar 		while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    570  1.38  knakahar 			IFQ_DEQUEUE(&ifp->if_snd, m);
    571  1.38  knakahar 			m_freem(m);
    572  1.38  knakahar 			IF_DROP(&ifp->if_snd);
    573  1.38  knakahar 		}
    574  1.38  knakahar 	}
    575  1.38  knakahar 
    576  1.38  knakahar 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
    577  1.38  knakahar 		m_freem(m);
    578  1.38  knakahar 		txr->pcq_drops.ev_count++;
    579  1.38  knakahar 	}
    580  1.38  knakahar }
    581  1.16   msaitoh 
    582  1.28   msaitoh /************************************************************************
    583  1.28   msaitoh  * ixgbe_allocate_transmit_buffers
    584   1.1   msaitoh  *
    585  1.28   msaitoh  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
    586  1.28   msaitoh  *   the information needed to transmit a packet on the wire. This is
    587  1.28   msaitoh  *   called only once at attach, setup is done every reset.
    588  1.28   msaitoh  ************************************************************************/
    589  1.28   msaitoh static int
    590   1.1   msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    591   1.1   msaitoh {
    592  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    593  1.28   msaitoh 	device_t            dev = adapter->dev;
    594   1.1   msaitoh 	struct ixgbe_tx_buf *txbuf;
    595  1.28   msaitoh 	int                 error, i;
    596   1.1   msaitoh 
    597   1.1   msaitoh 	/*
    598   1.1   msaitoh 	 * Setup DMA descriptor areas.
    599   1.1   msaitoh 	 */
    600  1.28   msaitoh 	error = ixgbe_dma_tag_create(
    601  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
    602  1.28   msaitoh 	         /*   alignment */ 1,
    603  1.28   msaitoh 	         /*      bounds */ 0,
    604  1.28   msaitoh 	         /*     maxsize */ IXGBE_TSO_SIZE,
    605  1.28   msaitoh 	         /*   nsegments */ adapter->num_segs,
    606  1.28   msaitoh 	         /*  maxsegsize */ PAGE_SIZE,
    607  1.28   msaitoh 	         /*       flags */ 0,
    608  1.28   msaitoh 	                           &txr->txtag);
    609  1.28   msaitoh 	if (error != 0) {
    610   1.1   msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    611   1.1   msaitoh 		goto fail;
    612   1.1   msaitoh 	}
    613   1.1   msaitoh 
    614  1.28   msaitoh 	txr->tx_buffers =
    615   1.1   msaitoh 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
    616  1.28   msaitoh 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
    617  1.28   msaitoh 	if (txr->tx_buffers == NULL) {
    618   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
    619   1.1   msaitoh 		error = ENOMEM;
    620   1.1   msaitoh 		goto fail;
    621   1.1   msaitoh 	}
    622   1.1   msaitoh 
    623  1.28   msaitoh 	/* Create the descriptor buffer dma maps */
    624   1.1   msaitoh 	txbuf = txr->tx_buffers;
    625   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
    626   1.1   msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    627   1.1   msaitoh 		if (error != 0) {
    628   1.1   msaitoh 			aprint_error_dev(dev,
    629   1.1   msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    630   1.1   msaitoh 			goto fail;
    631   1.1   msaitoh 		}
    632   1.1   msaitoh 	}
    633   1.1   msaitoh 
    634   1.1   msaitoh 	return 0;
    635   1.1   msaitoh fail:
    636   1.1   msaitoh 	/* We free all, it handles case where we are in the middle */
    637  1.15   msaitoh #if 0 /* XXX was FreeBSD */
    638   1.1   msaitoh 	ixgbe_free_transmit_structures(adapter);
    639  1.15   msaitoh #else
    640  1.15   msaitoh 	ixgbe_free_transmit_buffers(txr);
    641  1.15   msaitoh #endif
    642   1.1   msaitoh 	return (error);
    643  1.28   msaitoh } /* ixgbe_allocate_transmit_buffers */
    644   1.1   msaitoh 
    645  1.28   msaitoh /************************************************************************
    646  1.28   msaitoh  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
    647  1.28   msaitoh  ************************************************************************/
    648   1.1   msaitoh static void
    649   1.1   msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    650   1.1   msaitoh {
    651  1.28   msaitoh 	struct adapter        *adapter = txr->adapter;
    652  1.28   msaitoh 	struct ixgbe_tx_buf   *txbuf;
    653   1.1   msaitoh #ifdef DEV_NETMAP
    654   1.1   msaitoh 	struct netmap_adapter *na = NA(adapter->ifp);
    655  1.28   msaitoh 	struct netmap_slot    *slot;
    656   1.1   msaitoh #endif /* DEV_NETMAP */
    657   1.1   msaitoh 
    658   1.1   msaitoh 	/* Clear the old ring contents */
    659   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    660  1.28   msaitoh 
    661   1.1   msaitoh #ifdef DEV_NETMAP
    662  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
    663  1.28   msaitoh 		/*
    664  1.28   msaitoh 		 * (under lock): if in netmap mode, do some consistency
    665  1.28   msaitoh 		 * checks and set slot to entry 0 of the netmap ring.
    666  1.28   msaitoh 		 */
    667  1.28   msaitoh 		slot = netmap_reset(na, NR_TX, txr->me, 0);
    668  1.28   msaitoh 	}
    669   1.1   msaitoh #endif /* DEV_NETMAP */
    670  1.28   msaitoh 
    671   1.1   msaitoh 	bzero((void *)txr->tx_base,
    672  1.28   msaitoh 	    (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
    673   1.1   msaitoh 	/* Reset indices */
    674   1.1   msaitoh 	txr->next_avail_desc = 0;
    675   1.1   msaitoh 	txr->next_to_clean = 0;
    676   1.1   msaitoh 
    677   1.1   msaitoh 	/* Free any existing tx buffers. */
    678  1.28   msaitoh 	txbuf = txr->tx_buffers;
    679   1.5   msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    680  1.42   msaitoh 		txr->sending = false;
    681   1.1   msaitoh 		if (txbuf->m_head != NULL) {
    682   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    683   1.1   msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    684   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    685   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    686   1.1   msaitoh 			m_freem(txbuf->m_head);
    687   1.1   msaitoh 			txbuf->m_head = NULL;
    688   1.1   msaitoh 		}
    689  1.28   msaitoh 
    690   1.1   msaitoh #ifdef DEV_NETMAP
    691   1.1   msaitoh 		/*
    692   1.1   msaitoh 		 * In netmap mode, set the map for the packet buffer.
    693   1.1   msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    694   1.1   msaitoh 		 * the physical buffer address in the NIC ring.
    695   1.1   msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    696   1.1   msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    697   1.1   msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    698   1.1   msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    699   1.1   msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    700   1.1   msaitoh 		 */
    701  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
    702   1.1   msaitoh 			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
    703   1.5   msaitoh 			netmap_load_map(na, txr->txtag,
    704   1.5   msaitoh 			    txbuf->map, NMB(na, slot + si));
    705   1.1   msaitoh 		}
    706   1.1   msaitoh #endif /* DEV_NETMAP */
    707  1.28   msaitoh 
    708   1.1   msaitoh 		/* Clear the EOP descriptor pointer */
    709   1.1   msaitoh 		txbuf->eop = NULL;
    710  1.28   msaitoh 	}
    711   1.1   msaitoh 
    712   1.1   msaitoh 	/* Set the rate at which we sample packets */
    713  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
    714   1.1   msaitoh 		txr->atr_sample = atr_sample_rate;
    715   1.1   msaitoh 
    716   1.1   msaitoh 	/* Set number of descriptors available */
    717   1.1   msaitoh 	txr->tx_avail = adapter->num_tx_desc;
    718   1.1   msaitoh 
    719   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    720   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    721   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    722  1.28   msaitoh } /* ixgbe_setup_transmit_ring */
    723   1.1   msaitoh 
    724  1.28   msaitoh /************************************************************************
    725  1.28   msaitoh  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
    726  1.28   msaitoh  ************************************************************************/
    727   1.1   msaitoh int
    728   1.1   msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
    729   1.1   msaitoh {
    730   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    731   1.1   msaitoh 
    732   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++)
    733   1.1   msaitoh 		ixgbe_setup_transmit_ring(txr);
    734   1.1   msaitoh 
    735   1.1   msaitoh 	return (0);
    736  1.28   msaitoh } /* ixgbe_setup_transmit_structures */
    737   1.1   msaitoh 
    738  1.28   msaitoh /************************************************************************
    739  1.28   msaitoh  * ixgbe_free_transmit_structures - Free all transmit rings.
    740  1.28   msaitoh  ************************************************************************/
    741   1.1   msaitoh void
    742   1.1   msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
    743   1.1   msaitoh {
    744   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    745   1.1   msaitoh 
    746   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    747   1.1   msaitoh 		ixgbe_free_transmit_buffers(txr);
    748   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
    749   1.1   msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    750   1.1   msaitoh 	}
    751   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
    752  1.28   msaitoh } /* ixgbe_free_transmit_structures */
    753   1.1   msaitoh 
    754  1.28   msaitoh /************************************************************************
    755  1.28   msaitoh  * ixgbe_free_transmit_buffers
    756   1.1   msaitoh  *
    757  1.28   msaitoh  *   Free transmit ring related data structures.
    758  1.28   msaitoh  ************************************************************************/
    759   1.1   msaitoh static void
    760   1.1   msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    761   1.1   msaitoh {
    762  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    763   1.1   msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    764  1.28   msaitoh 	int                 i;
    765   1.1   msaitoh 
    766  1.14   msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
    767   1.1   msaitoh 
    768   1.1   msaitoh 	if (txr->tx_buffers == NULL)
    769   1.1   msaitoh 		return;
    770   1.1   msaitoh 
    771   1.1   msaitoh 	tx_buffer = txr->tx_buffers;
    772   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
    773   1.1   msaitoh 		if (tx_buffer->m_head != NULL) {
    774   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    775   1.1   msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    776   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    777   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    778   1.1   msaitoh 			m_freem(tx_buffer->m_head);
    779   1.1   msaitoh 			tx_buffer->m_head = NULL;
    780   1.1   msaitoh 			if (tx_buffer->map != NULL) {
    781   1.1   msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    782   1.1   msaitoh 				    tx_buffer->map);
    783   1.1   msaitoh 				tx_buffer->map = NULL;
    784   1.1   msaitoh 			}
    785   1.1   msaitoh 		} else if (tx_buffer->map != NULL) {
    786   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    787   1.1   msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    788   1.1   msaitoh 			tx_buffer->map = NULL;
    789   1.1   msaitoh 		}
    790   1.1   msaitoh 	}
    791  1.18   msaitoh 	if (txr->txr_interq != NULL) {
    792  1.18   msaitoh 		struct mbuf *m;
    793  1.18   msaitoh 
    794  1.18   msaitoh 		while ((m = pcq_get(txr->txr_interq)) != NULL)
    795  1.18   msaitoh 			m_freem(m);
    796  1.18   msaitoh 		pcq_destroy(txr->txr_interq);
    797  1.18   msaitoh 	}
    798   1.1   msaitoh 	if (txr->tx_buffers != NULL) {
    799   1.1   msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    800   1.1   msaitoh 		txr->tx_buffers = NULL;
    801   1.1   msaitoh 	}
    802   1.1   msaitoh 	if (txr->txtag != NULL) {
    803   1.1   msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    804   1.1   msaitoh 		txr->txtag = NULL;
    805   1.1   msaitoh 	}
    806  1.28   msaitoh } /* ixgbe_free_transmit_buffers */
    807   1.1   msaitoh 
    808  1.28   msaitoh /************************************************************************
    809  1.28   msaitoh  * ixgbe_tx_ctx_setup
    810   1.1   msaitoh  *
    811  1.28   msaitoh  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
    812  1.28   msaitoh  ************************************************************************/
    813   1.1   msaitoh static int
    814   1.1   msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    815   1.1   msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    816   1.1   msaitoh {
    817  1.28   msaitoh 	struct adapter                   *adapter = txr->adapter;
    818   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    819  1.28   msaitoh 	struct ether_vlan_header         *eh;
    820   1.8   msaitoh #ifdef INET
    821  1.28   msaitoh 	struct ip                        *ip;
    822   1.8   msaitoh #endif
    823   1.8   msaitoh #ifdef INET6
    824  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    825   1.8   msaitoh #endif
    826  1.28   msaitoh 	int                              ehdrlen, ip_hlen = 0;
    827  1.28   msaitoh 	int                              offload = TRUE;
    828  1.28   msaitoh 	int                              ctxd = txr->next_avail_desc;
    829  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    830  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    831  1.28   msaitoh 	u16                              vtag = 0;
    832  1.28   msaitoh 	u16                              etype;
    833  1.28   msaitoh 	u8                               ipproto = 0;
    834  1.28   msaitoh 	char                             *l3d;
    835   1.8   msaitoh 
    836   1.1   msaitoh 
    837   1.1   msaitoh 	/* First check if TSO is to be used */
    838  1.28   msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
    839  1.17   msaitoh 		int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
    840  1.17   msaitoh 
    841  1.21   msaitoh 		if (rv != 0)
    842  1.17   msaitoh 			++adapter->tso_err.ev_count;
    843  1.21   msaitoh 		return rv;
    844  1.17   msaitoh 	}
    845   1.1   msaitoh 
    846   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    847   1.1   msaitoh 		offload = FALSE;
    848   1.1   msaitoh 
    849   1.1   msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    850  1.28   msaitoh 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    851   1.1   msaitoh 
    852   1.1   msaitoh 	/* Now ready a context descriptor */
    853  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    854   1.1   msaitoh 
    855   1.1   msaitoh 	/*
    856  1.28   msaitoh 	 * In advanced descriptors the vlan tag must
    857  1.28   msaitoh 	 * be placed into the context descriptor. Hence
    858  1.28   msaitoh 	 * we need to make one even if not doing offloads.
    859  1.28   msaitoh 	 */
    860  1.29  knakahar 	if (vlan_has_tag(mp)) {
    861  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    862   1.1   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    863  1.28   msaitoh 	} else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
    864  1.28   msaitoh 	           (offload == FALSE))
    865   1.4   msaitoh 		return (0);
    866   1.1   msaitoh 
    867   1.1   msaitoh 	/*
    868   1.1   msaitoh 	 * Determine where frame payload starts.
    869   1.1   msaitoh 	 * Jump over vlan headers if already present,
    870   1.1   msaitoh 	 * helpful for QinQ too.
    871   1.1   msaitoh 	 */
    872   1.1   msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    873   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    874   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    875   1.1   msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    876   1.1   msaitoh 		etype = ntohs(eh->evl_proto);
    877   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    878   1.1   msaitoh 	} else {
    879   1.1   msaitoh 		etype = ntohs(eh->evl_encap_proto);
    880   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    881   1.1   msaitoh 	}
    882   1.1   msaitoh 
    883   1.1   msaitoh 	/* Set the ether header length */
    884   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    885   1.1   msaitoh 
    886   1.3   msaitoh 	if (offload == FALSE)
    887   1.3   msaitoh 		goto no_offloads;
    888   1.3   msaitoh 
    889   1.8   msaitoh 	/*
    890  1.28   msaitoh 	 * If the first mbuf only includes the ethernet header,
    891  1.28   msaitoh 	 * jump to the next one
    892  1.28   msaitoh 	 * XXX: This assumes the stack splits mbufs containing headers
    893  1.28   msaitoh 	 *      on header boundaries
    894   1.8   msaitoh 	 * XXX: And assumes the entire IP header is contained in one mbuf
    895   1.8   msaitoh 	 */
    896   1.8   msaitoh 	if (mp->m_len == ehdrlen && mp->m_next)
    897   1.8   msaitoh 		l3d = mtod(mp->m_next, char *);
    898   1.8   msaitoh 	else
    899   1.8   msaitoh 		l3d = mtod(mp, char *) + ehdrlen;
    900   1.8   msaitoh 
    901   1.1   msaitoh 	switch (etype) {
    902   1.9   msaitoh #ifdef INET
    903   1.1   msaitoh 	case ETHERTYPE_IP:
    904   1.8   msaitoh 		ip = (struct ip *)(l3d);
    905   1.8   msaitoh 		ip_hlen = ip->ip_hl << 2;
    906   1.8   msaitoh 		ipproto = ip->ip_p;
    907   1.8   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    908   1.1   msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    909   1.8   msaitoh 		    ip->ip_sum == 0);
    910   1.1   msaitoh 		break;
    911   1.9   msaitoh #endif
    912   1.9   msaitoh #ifdef INET6
    913   1.1   msaitoh 	case ETHERTYPE_IPV6:
    914   1.8   msaitoh 		ip6 = (struct ip6_hdr *)(l3d);
    915   1.8   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    916   1.8   msaitoh 		ipproto = ip6->ip6_nxt;
    917   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    918   1.1   msaitoh 		break;
    919   1.9   msaitoh #endif
    920   1.1   msaitoh 	default:
    921  1.11   msaitoh 		offload = false;
    922   1.1   msaitoh 		break;
    923   1.1   msaitoh 	}
    924   1.1   msaitoh 
    925   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    926   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    927   1.1   msaitoh 
    928   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    929   1.1   msaitoh 
    930   1.8   msaitoh 	/* No support for offloads for non-L4 next headers */
    931   1.8   msaitoh  	switch (ipproto) {
    932  1.36   msaitoh 	case IPPROTO_TCP:
    933  1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    934  1.36   msaitoh 		    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
    935  1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    936  1.36   msaitoh 		else
    937  1.36   msaitoh 			offload = false;
    938  1.36   msaitoh 		break;
    939  1.36   msaitoh 	case IPPROTO_UDP:
    940  1.36   msaitoh 		if (mp->m_pkthdr.csum_flags &
    941  1.36   msaitoh 		    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
    942  1.36   msaitoh 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    943  1.36   msaitoh 		else
    944  1.11   msaitoh 			offload = false;
    945  1.36   msaitoh 		break;
    946  1.36   msaitoh 	default:
    947  1.36   msaitoh 		offload = false;
    948  1.36   msaitoh 		break;
    949   1.8   msaitoh 	}
    950   1.8   msaitoh 
    951   1.8   msaitoh 	if (offload) /* Insert L4 checksum into data descriptors */
    952   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    953   1.1   msaitoh 
    954   1.3   msaitoh no_offloads:
    955   1.3   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    956   1.3   msaitoh 
    957   1.1   msaitoh 	/* Now copy bits into descriptor */
    958   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    959   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    960   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    961   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(0);
    962   1.1   msaitoh 
    963   1.1   msaitoh 	/* We've consumed the first desc, adjust counters */
    964   1.1   msaitoh 	if (++ctxd == txr->num_desc)
    965   1.1   msaitoh 		ctxd = 0;
    966   1.1   msaitoh 	txr->next_avail_desc = ctxd;
    967   1.1   msaitoh 	--txr->tx_avail;
    968   1.1   msaitoh 
    969  1.28   msaitoh 	return (0);
    970  1.28   msaitoh } /* ixgbe_tx_ctx_setup */
    971   1.1   msaitoh 
    972  1.28   msaitoh /************************************************************************
    973  1.28   msaitoh  * ixgbe_tso_setup
    974   1.1   msaitoh  *
    975  1.28   msaitoh  *   Setup work for hardware segmentation offload (TSO) on
    976  1.28   msaitoh  *   adapters using advanced tx descriptors
    977  1.28   msaitoh  ************************************************************************/
    978   1.1   msaitoh static int
    979  1.28   msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
    980  1.28   msaitoh     u32 *olinfo_status)
    981   1.1   msaitoh {
    982   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    983  1.28   msaitoh 	struct ether_vlan_header         *eh;
    984   1.1   msaitoh #ifdef INET6
    985  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    986   1.1   msaitoh #endif
    987   1.1   msaitoh #ifdef INET
    988  1.28   msaitoh 	struct ip                        *ip;
    989   1.1   msaitoh #endif
    990  1.28   msaitoh 	struct tcphdr                    *th;
    991  1.28   msaitoh 	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
    992  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    993  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    994  1.28   msaitoh 	u32                              mss_l4len_idx = 0, paylen;
    995  1.28   msaitoh 	u16                              vtag = 0, eh_type;
    996   1.1   msaitoh 
    997   1.1   msaitoh 	/*
    998   1.1   msaitoh 	 * Determine where frame payload starts.
    999   1.1   msaitoh 	 * Jump over vlan headers if already present
   1000   1.1   msaitoh 	 */
   1001   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
   1002   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   1003   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1004   1.1   msaitoh 		eh_type = eh->evl_proto;
   1005   1.1   msaitoh 	} else {
   1006   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1007   1.1   msaitoh 		eh_type = eh->evl_encap_proto;
   1008   1.1   msaitoh 	}
   1009   1.1   msaitoh 
   1010   1.1   msaitoh 	switch (ntohs(eh_type)) {
   1011   1.1   msaitoh #ifdef INET
   1012   1.1   msaitoh 	case ETHERTYPE_IP:
   1013   1.1   msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
   1014   1.1   msaitoh 		if (ip->ip_p != IPPROTO_TCP)
   1015   1.1   msaitoh 			return (ENXIO);
   1016   1.1   msaitoh 		ip->ip_sum = 0;
   1017   1.1   msaitoh 		ip_hlen = ip->ip_hl << 2;
   1018   1.1   msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1019   1.1   msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1020   1.1   msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1021   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
   1022   1.1   msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
   1023   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
   1024   1.1   msaitoh 		break;
   1025   1.1   msaitoh #endif
   1026  1.28   msaitoh #ifdef INET6
   1027  1.28   msaitoh 	case ETHERTYPE_IPV6:
   1028  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1029  1.28   msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
   1030  1.28   msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
   1031  1.28   msaitoh 			return (ENXIO);
   1032  1.28   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
   1033  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
   1034  1.28   msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
   1035  1.28   msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1036  1.28   msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1037  1.28   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
   1038  1.28   msaitoh 		break;
   1039  1.28   msaitoh #endif
   1040   1.1   msaitoh 	default:
   1041   1.1   msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
   1042   1.1   msaitoh 		    __func__, ntohs(eh_type));
   1043   1.1   msaitoh 		break;
   1044   1.1   msaitoh 	}
   1045   1.1   msaitoh 
   1046   1.1   msaitoh 	ctxd = txr->next_avail_desc;
   1047  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
   1048   1.1   msaitoh 
   1049   1.1   msaitoh 	tcp_hlen = th->th_off << 2;
   1050   1.1   msaitoh 
   1051   1.1   msaitoh 	/* This is used in the transmit desc in encap */
   1052   1.1   msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
   1053   1.1   msaitoh 
   1054   1.1   msaitoh 	/* VLAN MACLEN IPLEN */
   1055  1.29  knakahar 	if (vlan_has_tag(mp)) {
   1056  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
   1057  1.28   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
   1058   1.1   msaitoh 	}
   1059   1.1   msaitoh 
   1060   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
   1061   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
   1062   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
   1063   1.1   msaitoh 
   1064   1.1   msaitoh 	/* ADV DTYPE TUCMD */
   1065   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
   1066   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
   1067   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
   1068   1.1   msaitoh 
   1069   1.1   msaitoh 	/* MSS L4LEN IDX */
   1070   1.1   msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   1071   1.1   msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   1072   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   1073   1.1   msaitoh 
   1074   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
   1075   1.1   msaitoh 
   1076   1.1   msaitoh 	if (++ctxd == txr->num_desc)
   1077   1.1   msaitoh 		ctxd = 0;
   1078   1.1   msaitoh 
   1079   1.1   msaitoh 	txr->tx_avail--;
   1080   1.1   msaitoh 	txr->next_avail_desc = ctxd;
   1081   1.1   msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1082   1.1   msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1083   1.1   msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1084   1.1   msaitoh 	++txr->tso_tx.ev_count;
   1085  1.28   msaitoh 
   1086   1.1   msaitoh 	return (0);
   1087  1.28   msaitoh } /* ixgbe_tso_setup */
   1088   1.1   msaitoh 
   1089   1.3   msaitoh 
   1090  1.28   msaitoh /************************************************************************
   1091  1.28   msaitoh  * ixgbe_txeof
   1092   1.1   msaitoh  *
   1093  1.28   msaitoh  *   Examine each tx_buffer in the used queue. If the hardware is done
   1094  1.28   msaitoh  *   processing the packet then free associated resources. The
   1095  1.28   msaitoh  *   tx_buffer is put back on the free queue.
   1096  1.28   msaitoh  ************************************************************************/
   1097  1.32   msaitoh bool
   1098   1.1   msaitoh ixgbe_txeof(struct tx_ring *txr)
   1099   1.1   msaitoh {
   1100   1.1   msaitoh 	struct adapter		*adapter = txr->adapter;
   1101   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1102  1.28   msaitoh 	struct ixgbe_tx_buf	*buf;
   1103  1.28   msaitoh 	union ixgbe_adv_tx_desc *txd;
   1104   1.1   msaitoh 	u32			work, processed = 0;
   1105   1.7   msaitoh 	u32			limit = adapter->tx_process_limit;
   1106   1.1   msaitoh 
   1107   1.1   msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1108   1.1   msaitoh 
   1109   1.1   msaitoh #ifdef DEV_NETMAP
   1110  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1111  1.28   msaitoh 	    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
   1112  1.28   msaitoh 		struct netmap_adapter *na = NA(adapter->ifp);
   1113   1.1   msaitoh 		struct netmap_kring *kring = &na->tx_rings[txr->me];
   1114   1.1   msaitoh 		txd = txr->tx_base;
   1115   1.1   msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1116   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD);
   1117   1.1   msaitoh 		/*
   1118   1.1   msaitoh 		 * In netmap mode, all the work is done in the context
   1119   1.1   msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1120   1.1   msaitoh 		 * clients, which may be sleeping on individual rings
   1121   1.1   msaitoh 		 * or on a global resource for all rings.
   1122   1.1   msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1123   1.1   msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1124   1.1   msaitoh 		 * more frequently. This is implemented as follows:
   1125   1.1   msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1126   1.1   msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1127   1.1   msaitoh 		 *   means the user thread should not be woken up);
   1128   1.1   msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1129   1.1   msaitoh 		 *   or the slot has the DD bit set.
   1130   1.1   msaitoh 		 */
   1131   1.1   msaitoh 		if (!netmap_mitigate ||
   1132   1.1   msaitoh 		    (kring->nr_kflags < kring->nkr_num_slots &&
   1133  1.28   msaitoh 		     txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
   1134   1.1   msaitoh 			netmap_tx_irq(ifp, txr->me);
   1135   1.1   msaitoh 		}
   1136  1.32   msaitoh 		return false;
   1137   1.1   msaitoh 	}
   1138   1.1   msaitoh #endif /* DEV_NETMAP */
   1139   1.1   msaitoh 
   1140   1.1   msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1141  1.42   msaitoh 		txr->sending = false;
   1142  1.32   msaitoh 		return false;
   1143   1.1   msaitoh 	}
   1144   1.1   msaitoh 
   1145   1.1   msaitoh 	/* Get work starting point */
   1146   1.1   msaitoh 	work = txr->next_to_clean;
   1147   1.1   msaitoh 	buf = &txr->tx_buffers[work];
   1148   1.1   msaitoh 	txd = &txr->tx_base[work];
   1149   1.1   msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1150  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1151   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD);
   1152   1.8   msaitoh 
   1153   1.1   msaitoh 	do {
   1154   1.8   msaitoh 		union ixgbe_adv_tx_desc *eop = buf->eop;
   1155   1.1   msaitoh 		if (eop == NULL) /* No work */
   1156   1.1   msaitoh 			break;
   1157   1.1   msaitoh 
   1158   1.1   msaitoh 		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
   1159   1.1   msaitoh 			break;	/* I/O not complete */
   1160   1.1   msaitoh 
   1161   1.1   msaitoh 		if (buf->m_head) {
   1162  1.28   msaitoh 			txr->bytes += buf->m_head->m_pkthdr.len;
   1163  1.28   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
   1164   1.1   msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1165   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1166  1.28   msaitoh 			ixgbe_dmamap_unload(txr->txtag, buf->map);
   1167   1.1   msaitoh 			m_freem(buf->m_head);
   1168   1.1   msaitoh 			buf->m_head = NULL;
   1169   1.1   msaitoh 		}
   1170   1.1   msaitoh 		buf->eop = NULL;
   1171   1.1   msaitoh 		++txr->tx_avail;
   1172   1.1   msaitoh 
   1173   1.1   msaitoh 		/* We clean the range if multi segment */
   1174   1.1   msaitoh 		while (txd != eop) {
   1175   1.1   msaitoh 			++txd;
   1176   1.1   msaitoh 			++buf;
   1177   1.1   msaitoh 			++work;
   1178   1.1   msaitoh 			/* wrap the ring? */
   1179   1.1   msaitoh 			if (__predict_false(!work)) {
   1180   1.1   msaitoh 				work -= txr->num_desc;
   1181   1.1   msaitoh 				buf = txr->tx_buffers;
   1182   1.1   msaitoh 				txd = txr->tx_base;
   1183   1.1   msaitoh 			}
   1184   1.1   msaitoh 			if (buf->m_head) {
   1185   1.1   msaitoh 				txr->bytes +=
   1186   1.1   msaitoh 				    buf->m_head->m_pkthdr.len;
   1187   1.1   msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1188   1.1   msaitoh 				    buf->map,
   1189   1.1   msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1190   1.1   msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1191   1.1   msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1192   1.1   msaitoh 				    buf->map);
   1193   1.1   msaitoh 				m_freem(buf->m_head);
   1194   1.1   msaitoh 				buf->m_head = NULL;
   1195   1.1   msaitoh 			}
   1196   1.1   msaitoh 			++txr->tx_avail;
   1197   1.1   msaitoh 			buf->eop = NULL;
   1198   1.1   msaitoh 
   1199   1.1   msaitoh 		}
   1200   1.1   msaitoh 		++txr->packets;
   1201   1.1   msaitoh 		++processed;
   1202   1.1   msaitoh 		++ifp->if_opackets;
   1203   1.1   msaitoh 
   1204   1.1   msaitoh 		/* Try the next packet */
   1205   1.1   msaitoh 		++txd;
   1206   1.1   msaitoh 		++buf;
   1207   1.1   msaitoh 		++work;
   1208   1.1   msaitoh 		/* reset with a wrap */
   1209   1.1   msaitoh 		if (__predict_false(!work)) {
   1210   1.1   msaitoh 			work -= txr->num_desc;
   1211   1.1   msaitoh 			buf = txr->tx_buffers;
   1212   1.1   msaitoh 			txd = txr->tx_base;
   1213   1.1   msaitoh 		}
   1214   1.1   msaitoh 		prefetch(txd);
   1215   1.1   msaitoh 	} while (__predict_true(--limit));
   1216   1.1   msaitoh 
   1217   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1218   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1219   1.1   msaitoh 
   1220   1.1   msaitoh 	work += txr->num_desc;
   1221   1.1   msaitoh 	txr->next_to_clean = work;
   1222   1.1   msaitoh 
   1223  1.43   msaitoh 	if (txr->tx_avail == txr->num_desc)
   1224  1.43   msaitoh 		txr->sending = false;
   1225  1.43   msaitoh 
   1226  1.32   msaitoh 	return ((limit > 0) ? false : true);
   1227  1.28   msaitoh } /* ixgbe_txeof */
   1228   1.1   msaitoh 
   1229  1.28   msaitoh /************************************************************************
   1230  1.28   msaitoh  * ixgbe_rsc_count
   1231  1.28   msaitoh  *
   1232  1.28   msaitoh  *   Used to detect a descriptor that has been merged by Hardware RSC.
   1233  1.28   msaitoh  ************************************************************************/
   1234   1.1   msaitoh static inline u32
   1235   1.1   msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1236   1.1   msaitoh {
   1237   1.1   msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1238   1.1   msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1239  1.28   msaitoh } /* ixgbe_rsc_count */
   1240   1.1   msaitoh 
   1241  1.28   msaitoh /************************************************************************
   1242  1.28   msaitoh  * ixgbe_setup_hw_rsc
   1243   1.1   msaitoh  *
   1244  1.28   msaitoh  *   Initialize Hardware RSC (LRO) feature on 82599
   1245  1.28   msaitoh  *   for an RX ring, this is toggled by the LRO capability
   1246  1.28   msaitoh  *   even though it is transparent to the stack.
   1247  1.28   msaitoh  *
   1248  1.28   msaitoh  *   NOTE: Since this HW feature only works with IPv4 and
   1249  1.28   msaitoh  *         testing has shown soft LRO to be as effective,
   1250  1.28   msaitoh  *         this feature will be disabled by default.
   1251  1.28   msaitoh  ************************************************************************/
   1252   1.1   msaitoh static void
   1253   1.1   msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1254   1.1   msaitoh {
   1255  1.28   msaitoh 	struct	adapter  *adapter = rxr->adapter;
   1256  1.28   msaitoh 	struct	ixgbe_hw *hw = &adapter->hw;
   1257  1.28   msaitoh 	u32              rscctrl, rdrxctl;
   1258   1.1   msaitoh 
   1259   1.1   msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1260   1.1   msaitoh 	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
   1261   1.1   msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1262   1.1   msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1263   1.1   msaitoh 		return;
   1264   1.1   msaitoh 	}
   1265   1.1   msaitoh 
   1266   1.1   msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1267   1.1   msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1268  1.28   msaitoh #ifdef DEV_NETMAP
   1269  1.28   msaitoh 	/* Always strip CRC unless Netmap disabled it */
   1270  1.28   msaitoh 	if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
   1271  1.28   msaitoh 	    !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
   1272  1.28   msaitoh 	    ix_crcstrip)
   1273   1.1   msaitoh #endif /* DEV_NETMAP */
   1274  1.28   msaitoh 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1275   1.1   msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1276   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1277   1.1   msaitoh 
   1278   1.1   msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1279   1.1   msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1280   1.1   msaitoh 	/*
   1281  1.28   msaitoh 	 * Limit the total number of descriptors that
   1282  1.28   msaitoh 	 * can be combined, so it does not exceed 64K
   1283  1.28   msaitoh 	 */
   1284   1.1   msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1285   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1286   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1287   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1288   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1289   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1290   1.1   msaitoh 	else  /* Using 16K cluster */
   1291   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1292   1.1   msaitoh 
   1293   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1294   1.1   msaitoh 
   1295   1.1   msaitoh 	/* Enable TCP header recognition */
   1296   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1297  1.28   msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
   1298   1.1   msaitoh 
   1299   1.1   msaitoh 	/* Disable RSC for ACK packets */
   1300   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1301   1.1   msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1302   1.1   msaitoh 
   1303   1.1   msaitoh 	rxr->hw_rsc = TRUE;
   1304  1.28   msaitoh } /* ixgbe_setup_hw_rsc */
   1305   1.8   msaitoh 
   1306  1.28   msaitoh /************************************************************************
   1307  1.28   msaitoh  * ixgbe_refresh_mbufs
   1308   1.1   msaitoh  *
   1309  1.28   msaitoh  *   Refresh mbuf buffers for RX descriptor rings
   1310  1.28   msaitoh  *    - now keeps its own state so discards due to resource
   1311  1.28   msaitoh  *      exhaustion are unnecessary, if an mbuf cannot be obtained
   1312  1.28   msaitoh  *      it just returns, keeping its placeholder, thus it can simply
   1313  1.28   msaitoh  *      be recalled to try again.
   1314  1.28   msaitoh  ************************************************************************/
   1315   1.1   msaitoh static void
   1316   1.1   msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1317   1.1   msaitoh {
   1318  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1319  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1320  1.28   msaitoh 	struct mbuf         *mp;
   1321  1.28   msaitoh 	int                 i, j, error;
   1322  1.28   msaitoh 	bool                refreshed = false;
   1323   1.1   msaitoh 
   1324   1.1   msaitoh 	i = j = rxr->next_to_refresh;
   1325   1.1   msaitoh 	/* Control the loop with one beyond */
   1326   1.1   msaitoh 	if (++j == rxr->num_desc)
   1327   1.1   msaitoh 		j = 0;
   1328   1.1   msaitoh 
   1329   1.1   msaitoh 	while (j != limit) {
   1330   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1331   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1332   1.1   msaitoh 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1333   1.1   msaitoh 			    MT_DATA, M_PKTHDR, rxr->mbuf_sz);
   1334   1.1   msaitoh 			if (mp == NULL) {
   1335   1.1   msaitoh 				rxr->no_jmbuf.ev_count++;
   1336   1.1   msaitoh 				goto update;
   1337   1.1   msaitoh 			}
   1338   1.1   msaitoh 			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
   1339   1.1   msaitoh 				m_adj(mp, ETHER_ALIGN);
   1340   1.1   msaitoh 		} else
   1341   1.1   msaitoh 			mp = rxbuf->buf;
   1342   1.1   msaitoh 
   1343   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1344   1.1   msaitoh 
   1345   1.1   msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1346   1.1   msaitoh 		 * than replaced, there's no need to go through busdma.
   1347   1.1   msaitoh 		 */
   1348   1.1   msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1349   1.1   msaitoh 			/* Get the memory mapping */
   1350   1.4   msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1351   1.1   msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1352   1.1   msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1353   1.1   msaitoh 			if (error != 0) {
   1354  1.28   msaitoh 				printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
   1355   1.1   msaitoh 				m_free(mp);
   1356   1.1   msaitoh 				rxbuf->buf = NULL;
   1357   1.1   msaitoh 				goto update;
   1358   1.1   msaitoh 			}
   1359   1.1   msaitoh 			rxbuf->buf = mp;
   1360   1.1   msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1361   1.1   msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1362   1.1   msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1363   1.1   msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1364   1.1   msaitoh 		} else {
   1365   1.1   msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1366   1.1   msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1367   1.1   msaitoh 		}
   1368   1.1   msaitoh 
   1369   1.1   msaitoh 		refreshed = true;
   1370   1.1   msaitoh 		/* Next is precalculated */
   1371   1.1   msaitoh 		i = j;
   1372   1.1   msaitoh 		rxr->next_to_refresh = i;
   1373   1.1   msaitoh 		if (++j == rxr->num_desc)
   1374   1.1   msaitoh 			j = 0;
   1375   1.1   msaitoh 	}
   1376  1.28   msaitoh 
   1377   1.1   msaitoh update:
   1378   1.1   msaitoh 	if (refreshed) /* Update hardware tail index */
   1379  1.28   msaitoh 		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
   1380  1.28   msaitoh 
   1381   1.1   msaitoh 	return;
   1382  1.28   msaitoh } /* ixgbe_refresh_mbufs */
   1383   1.1   msaitoh 
   1384  1.28   msaitoh /************************************************************************
   1385  1.28   msaitoh  * ixgbe_allocate_receive_buffers
   1386   1.1   msaitoh  *
   1387  1.28   msaitoh  *   Allocate memory for rx_buffer structures. Since we use one
   1388  1.28   msaitoh  *   rx_buffer per received packet, the maximum number of rx_buffer's
   1389  1.28   msaitoh  *   that we'll need is equal to the number of receive descriptors
   1390  1.28   msaitoh  *   that we've allocated.
   1391  1.28   msaitoh  ************************************************************************/
   1392  1.28   msaitoh static int
   1393   1.1   msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1394   1.1   msaitoh {
   1395  1.28   msaitoh 	struct	adapter     *adapter = rxr->adapter;
   1396  1.28   msaitoh 	device_t            dev = adapter->dev;
   1397  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1398  1.28   msaitoh 	int                 bsize, error;
   1399   1.1   msaitoh 
   1400   1.1   msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1401  1.28   msaitoh 	rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
   1402  1.28   msaitoh 	    M_NOWAIT | M_ZERO);
   1403  1.28   msaitoh 	if (rxr->rx_buffers == NULL) {
   1404   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   1405   1.1   msaitoh 		error = ENOMEM;
   1406   1.1   msaitoh 		goto fail;
   1407   1.1   msaitoh 	}
   1408   1.1   msaitoh 
   1409  1.28   msaitoh 	error = ixgbe_dma_tag_create(
   1410  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
   1411  1.28   msaitoh 	         /*   alignment */ 1,
   1412  1.28   msaitoh 	         /*      bounds */ 0,
   1413  1.28   msaitoh 	         /*     maxsize */ MJUM16BYTES,
   1414  1.28   msaitoh 	         /*   nsegments */ 1,
   1415  1.28   msaitoh 	         /*  maxsegsize */ MJUM16BYTES,
   1416  1.28   msaitoh 	         /*       flags */ 0,
   1417  1.28   msaitoh 	                           &rxr->ptag);
   1418  1.28   msaitoh 	if (error != 0) {
   1419   1.1   msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1420   1.1   msaitoh 		goto fail;
   1421   1.1   msaitoh 	}
   1422   1.1   msaitoh 
   1423   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1424   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1425   1.4   msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1426   1.1   msaitoh 		if (error) {
   1427   1.1   msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1428   1.1   msaitoh 			goto fail;
   1429   1.1   msaitoh 		}
   1430   1.1   msaitoh 	}
   1431   1.1   msaitoh 
   1432   1.1   msaitoh 	return (0);
   1433   1.1   msaitoh 
   1434   1.1   msaitoh fail:
   1435   1.1   msaitoh 	/* Frees all, but can handle partial completion */
   1436   1.1   msaitoh 	ixgbe_free_receive_structures(adapter);
   1437  1.28   msaitoh 
   1438   1.1   msaitoh 	return (error);
   1439  1.28   msaitoh } /* ixgbe_allocate_receive_buffers */
   1440   1.1   msaitoh 
   1441  1.28   msaitoh /************************************************************************
   1442  1.30   msaitoh  * ixgbe_free_receive_ring
   1443  1.28   msaitoh  ************************************************************************/
   1444  1.28   msaitoh static void
   1445   1.1   msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1446  1.27   msaitoh {
   1447   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1448  1.27   msaitoh 		ixgbe_rx_discard(rxr, i);
   1449   1.1   msaitoh 	}
   1450  1.28   msaitoh } /* ixgbe_free_receive_ring */
   1451   1.1   msaitoh 
   1452  1.28   msaitoh /************************************************************************
   1453  1.28   msaitoh  * ixgbe_setup_receive_ring
   1454   1.1   msaitoh  *
   1455  1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1456  1.28   msaitoh  ************************************************************************/
   1457   1.1   msaitoh static int
   1458   1.1   msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1459   1.1   msaitoh {
   1460  1.28   msaitoh 	struct adapter        *adapter;
   1461  1.28   msaitoh 	struct ixgbe_rx_buf   *rxbuf;
   1462   1.1   msaitoh #ifdef LRO
   1463  1.28   msaitoh 	struct ifnet          *ifp;
   1464  1.28   msaitoh 	struct lro_ctrl       *lro = &rxr->lro;
   1465   1.1   msaitoh #endif /* LRO */
   1466   1.1   msaitoh #ifdef DEV_NETMAP
   1467   1.1   msaitoh 	struct netmap_adapter *na = NA(rxr->adapter->ifp);
   1468  1.28   msaitoh 	struct netmap_slot    *slot;
   1469   1.1   msaitoh #endif /* DEV_NETMAP */
   1470  1.28   msaitoh 	int                   rsize, error = 0;
   1471   1.1   msaitoh 
   1472   1.1   msaitoh 	adapter = rxr->adapter;
   1473   1.1   msaitoh #ifdef LRO
   1474   1.1   msaitoh 	ifp = adapter->ifp;
   1475   1.1   msaitoh #endif /* LRO */
   1476   1.1   msaitoh 
   1477   1.1   msaitoh 	/* Clear the ring contents */
   1478   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1479  1.28   msaitoh 
   1480   1.1   msaitoh #ifdef DEV_NETMAP
   1481  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1482  1.28   msaitoh 		slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1483   1.1   msaitoh #endif /* DEV_NETMAP */
   1484  1.28   msaitoh 
   1485   1.1   msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   1486   1.1   msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1487   1.1   msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1488   1.1   msaitoh 	/* Cache the size */
   1489   1.1   msaitoh 	rxr->mbuf_sz = adapter->rx_mbuf_sz;
   1490   1.1   msaitoh 
   1491   1.1   msaitoh 	/* Free current RX buffer structs and their mbufs */
   1492   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1493   1.1   msaitoh 
   1494   1.1   msaitoh 	/* Now replenish the mbufs */
   1495   1.1   msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1496  1.28   msaitoh 		struct mbuf *mp;
   1497   1.1   msaitoh 
   1498   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1499  1.28   msaitoh 
   1500   1.1   msaitoh #ifdef DEV_NETMAP
   1501   1.1   msaitoh 		/*
   1502   1.1   msaitoh 		 * In netmap mode, fill the map and set the buffer
   1503   1.1   msaitoh 		 * address in the NIC ring, considering the offset
   1504   1.1   msaitoh 		 * between the netmap and NIC rings (see comment in
   1505   1.1   msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1506   1.1   msaitoh 		 * an mbuf, so end the block with a continue;
   1507   1.1   msaitoh 		 */
   1508  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
   1509   1.1   msaitoh 			int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
   1510   1.1   msaitoh 			uint64_t paddr;
   1511   1.1   msaitoh 			void *addr;
   1512   1.1   msaitoh 
   1513   1.1   msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1514   1.1   msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1515   1.1   msaitoh 			/* Update descriptor and the cached value */
   1516   1.1   msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1517   1.1   msaitoh 			rxbuf->addr = htole64(paddr);
   1518   1.1   msaitoh 			continue;
   1519   1.1   msaitoh 		}
   1520   1.1   msaitoh #endif /* DEV_NETMAP */
   1521  1.28   msaitoh 
   1522  1.28   msaitoh 		rxbuf->flags = 0;
   1523   1.1   msaitoh 		rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1524   1.1   msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   1525   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1526   1.1   msaitoh 			error = ENOBUFS;
   1527  1.28   msaitoh 			goto fail;
   1528   1.1   msaitoh 		}
   1529   1.1   msaitoh 		mp = rxbuf->buf;
   1530   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1531   1.1   msaitoh 		/* Get the memory mapping */
   1532  1.28   msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
   1533  1.28   msaitoh 		    mp, BUS_DMA_NOWAIT);
   1534   1.1   msaitoh 		if (error != 0)
   1535   1.1   msaitoh                         goto fail;
   1536   1.1   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1537   1.1   msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   1538   1.1   msaitoh 		/* Update the descriptor and the cached value */
   1539   1.1   msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1540   1.1   msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1541   1.1   msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1542   1.1   msaitoh 	}
   1543   1.1   msaitoh 
   1544   1.1   msaitoh 
   1545   1.1   msaitoh 	/* Setup our descriptor indices */
   1546   1.1   msaitoh 	rxr->next_to_check = 0;
   1547   1.1   msaitoh 	rxr->next_to_refresh = 0;
   1548   1.1   msaitoh 	rxr->lro_enabled = FALSE;
   1549   1.1   msaitoh 	rxr->rx_copies.ev_count = 0;
   1550  1.13   msaitoh #if 0 /* NetBSD */
   1551   1.1   msaitoh 	rxr->rx_bytes.ev_count = 0;
   1552  1.13   msaitoh #if 1	/* Fix inconsistency */
   1553  1.13   msaitoh 	rxr->rx_packets.ev_count = 0;
   1554  1.13   msaitoh #endif
   1555  1.13   msaitoh #endif
   1556   1.1   msaitoh 	rxr->vtag_strip = FALSE;
   1557   1.1   msaitoh 
   1558   1.1   msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1559   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1560   1.1   msaitoh 
   1561   1.1   msaitoh 	/*
   1562  1.28   msaitoh 	 * Now set up the LRO interface
   1563  1.28   msaitoh 	 */
   1564   1.1   msaitoh 	if (ixgbe_rsc_enable)
   1565   1.1   msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1566   1.1   msaitoh #ifdef LRO
   1567   1.1   msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1568   1.1   msaitoh 		device_t dev = adapter->dev;
   1569   1.1   msaitoh 		int err = tcp_lro_init(lro);
   1570   1.1   msaitoh 		if (err) {
   1571   1.1   msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1572   1.1   msaitoh 			goto fail;
   1573   1.1   msaitoh 		}
   1574   1.1   msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1575   1.1   msaitoh 		rxr->lro_enabled = TRUE;
   1576   1.1   msaitoh 		lro->ifp = adapter->ifp;
   1577   1.1   msaitoh 	}
   1578   1.1   msaitoh #endif /* LRO */
   1579   1.1   msaitoh 
   1580   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1581  1.28   msaitoh 
   1582   1.1   msaitoh 	return (0);
   1583   1.1   msaitoh 
   1584   1.1   msaitoh fail:
   1585   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1586   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1587  1.28   msaitoh 
   1588   1.1   msaitoh 	return (error);
   1589  1.28   msaitoh } /* ixgbe_setup_receive_ring */
   1590   1.1   msaitoh 
   1591  1.28   msaitoh /************************************************************************
   1592  1.28   msaitoh  * ixgbe_setup_receive_structures - Initialize all receive rings.
   1593  1.28   msaitoh  ************************************************************************/
   1594   1.1   msaitoh int
   1595   1.1   msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
   1596   1.1   msaitoh {
   1597   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1598  1.28   msaitoh 	int            j;
   1599   1.1   msaitoh 
   1600  1.30   msaitoh 	/*
   1601  1.30   msaitoh 	 * Now reinitialize our supply of jumbo mbufs.  The number
   1602  1.30   msaitoh 	 * or size of jumbo mbufs may have changed.
   1603  1.30   msaitoh 	 * Assume all of rxr->ptag are the same.
   1604  1.30   msaitoh 	 */
   1605  1.41   msaitoh 	ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat,
   1606  1.30   msaitoh 	    (2 * adapter->num_rx_desc) * adapter->num_queues,
   1607  1.30   msaitoh 	    adapter->rx_mbuf_sz);
   1608  1.30   msaitoh 
   1609   1.1   msaitoh 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   1610   1.1   msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1611   1.1   msaitoh 			goto fail;
   1612   1.1   msaitoh 
   1613   1.1   msaitoh 	return (0);
   1614   1.1   msaitoh fail:
   1615   1.1   msaitoh 	/*
   1616   1.1   msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1617   1.1   msaitoh 	 * the rings that completed, the failing case will have
   1618   1.1   msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1619   1.1   msaitoh 	 */
   1620   1.1   msaitoh 	for (int i = 0; i < j; ++i) {
   1621   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   1622  1.27   msaitoh 		IXGBE_RX_LOCK(rxr);
   1623   1.1   msaitoh 		ixgbe_free_receive_ring(rxr);
   1624  1.27   msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1625   1.1   msaitoh 	}
   1626   1.1   msaitoh 
   1627   1.1   msaitoh 	return (ENOBUFS);
   1628  1.28   msaitoh } /* ixgbe_setup_receive_structures */
   1629   1.1   msaitoh 
   1630   1.3   msaitoh 
   1631  1.28   msaitoh /************************************************************************
   1632  1.28   msaitoh  * ixgbe_free_receive_structures - Free all receive rings.
   1633  1.28   msaitoh  ************************************************************************/
   1634   1.1   msaitoh void
   1635   1.1   msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
   1636   1.1   msaitoh {
   1637   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1638   1.1   msaitoh 
   1639   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1640   1.1   msaitoh 
   1641   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1642   1.1   msaitoh 		ixgbe_free_receive_buffers(rxr);
   1643   1.1   msaitoh #ifdef LRO
   1644   1.1   msaitoh 		/* Free LRO memory */
   1645  1.28   msaitoh 		tcp_lro_free(&rxr->lro);
   1646   1.1   msaitoh #endif /* LRO */
   1647   1.1   msaitoh 		/* Free the ring memory as well */
   1648   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   1649   1.1   msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1650   1.1   msaitoh 	}
   1651   1.1   msaitoh 
   1652   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   1653  1.28   msaitoh } /* ixgbe_free_receive_structures */
   1654   1.1   msaitoh 
   1655   1.1   msaitoh 
   1656  1.28   msaitoh /************************************************************************
   1657  1.28   msaitoh  * ixgbe_free_receive_buffers - Free receive ring data structures
   1658  1.28   msaitoh  ************************************************************************/
   1659   1.1   msaitoh static void
   1660   1.1   msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1661   1.1   msaitoh {
   1662  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1663  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1664   1.1   msaitoh 
   1665   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1666   1.1   msaitoh 
   1667   1.1   msaitoh 	/* Cleanup any existing buffers */
   1668   1.1   msaitoh 	if (rxr->rx_buffers != NULL) {
   1669   1.1   msaitoh 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   1670   1.1   msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1671  1.27   msaitoh 			ixgbe_rx_discard(rxr, i);
   1672   1.1   msaitoh 			if (rxbuf->pmap != NULL) {
   1673   1.1   msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1674   1.1   msaitoh 				rxbuf->pmap = NULL;
   1675   1.1   msaitoh 			}
   1676   1.1   msaitoh 		}
   1677   1.1   msaitoh 		if (rxr->rx_buffers != NULL) {
   1678   1.1   msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1679   1.1   msaitoh 			rxr->rx_buffers = NULL;
   1680   1.1   msaitoh 		}
   1681   1.1   msaitoh 	}
   1682   1.1   msaitoh 
   1683   1.1   msaitoh 	if (rxr->ptag != NULL) {
   1684   1.1   msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1685   1.1   msaitoh 		rxr->ptag = NULL;
   1686   1.1   msaitoh 	}
   1687   1.1   msaitoh 
   1688   1.1   msaitoh 	return;
   1689  1.28   msaitoh } /* ixgbe_free_receive_buffers */
   1690   1.1   msaitoh 
   1691  1.28   msaitoh /************************************************************************
   1692  1.28   msaitoh  * ixgbe_rx_input
   1693  1.28   msaitoh  ************************************************************************/
   1694   1.1   msaitoh static __inline void
   1695  1.28   msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
   1696  1.28   msaitoh     u32 ptype)
   1697   1.1   msaitoh {
   1698  1.20   msaitoh 	struct adapter	*adapter = ifp->if_softc;
   1699   1.1   msaitoh 
   1700   1.1   msaitoh #ifdef LRO
   1701   1.1   msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1702   1.1   msaitoh 
   1703  1.28   msaitoh 	/*
   1704  1.28   msaitoh 	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1705  1.28   msaitoh 	 * should be computed by hardware. Also it should not have VLAN tag in
   1706  1.28   msaitoh 	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1707  1.28   msaitoh 	 */
   1708   1.1   msaitoh         if (rxr->lro_enabled &&
   1709   1.1   msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1710   1.1   msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1711   1.1   msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1712   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1713   1.1   msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1714   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1715   1.1   msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1716   1.1   msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1717   1.1   msaitoh                 /*
   1718   1.1   msaitoh                  * Send to the stack if:
   1719   1.1   msaitoh                  **  - LRO not enabled, or
   1720   1.1   msaitoh                  **  - no LRO resources, or
   1721   1.1   msaitoh                  **  - lro enqueue fails
   1722   1.1   msaitoh                  */
   1723   1.1   msaitoh                 if (rxr->lro.lro_cnt != 0)
   1724   1.1   msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1725   1.1   msaitoh                                 return;
   1726   1.1   msaitoh         }
   1727   1.1   msaitoh #endif /* LRO */
   1728   1.1   msaitoh 
   1729  1.20   msaitoh 	if_percpuq_enqueue(adapter->ipq, m);
   1730  1.28   msaitoh } /* ixgbe_rx_input */
   1731   1.1   msaitoh 
   1732  1.28   msaitoh /************************************************************************
   1733  1.28   msaitoh  * ixgbe_rx_discard
   1734  1.28   msaitoh  ************************************************************************/
   1735   1.1   msaitoh static __inline void
   1736   1.1   msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1737   1.1   msaitoh {
   1738  1.28   msaitoh 	struct ixgbe_rx_buf *rbuf;
   1739   1.1   msaitoh 
   1740   1.1   msaitoh 	rbuf = &rxr->rx_buffers[i];
   1741   1.1   msaitoh 
   1742   1.1   msaitoh 	/*
   1743  1.28   msaitoh 	 * With advanced descriptors the writeback
   1744  1.28   msaitoh 	 * clobbers the buffer addrs, so its easier
   1745  1.28   msaitoh 	 * to just free the existing mbufs and take
   1746  1.28   msaitoh 	 * the normal refresh path to get new buffers
   1747  1.28   msaitoh 	 * and mapping.
   1748  1.28   msaitoh 	 */
   1749   1.1   msaitoh 
   1750  1.26   msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   1751  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1752  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1753   1.1   msaitoh 		m_freem(rbuf->fmp);
   1754   1.1   msaitoh 		rbuf->fmp = NULL;
   1755   1.1   msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1756   1.1   msaitoh 	} else if (rbuf->buf) {
   1757  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1758  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1759   1.1   msaitoh 		m_free(rbuf->buf);
   1760   1.1   msaitoh 		rbuf->buf = NULL;
   1761   1.1   msaitoh 	}
   1762   1.4   msaitoh 	ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1763   1.1   msaitoh 
   1764   1.1   msaitoh 	rbuf->flags = 0;
   1765   1.1   msaitoh 
   1766   1.1   msaitoh 	return;
   1767  1.28   msaitoh } /* ixgbe_rx_discard */
   1768   1.1   msaitoh 
   1769   1.1   msaitoh 
   1770  1.28   msaitoh /************************************************************************
   1771  1.28   msaitoh  * ixgbe_rxeof
   1772   1.1   msaitoh  *
   1773  1.28   msaitoh  *   Executes in interrupt context. It replenishes the
   1774  1.28   msaitoh  *   mbufs in the descriptor and sends data which has
   1775  1.28   msaitoh  *   been dma'ed into host memory to upper layer.
   1776   1.1   msaitoh  *
   1777  1.28   msaitoh  *   Return TRUE for more work, FALSE for all clean.
   1778  1.28   msaitoh  ************************************************************************/
   1779   1.1   msaitoh bool
   1780   1.1   msaitoh ixgbe_rxeof(struct ix_queue *que)
   1781   1.1   msaitoh {
   1782   1.1   msaitoh 	struct adapter		*adapter = que->adapter;
   1783   1.1   msaitoh 	struct rx_ring		*rxr = que->rxr;
   1784   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1785   1.1   msaitoh #ifdef LRO
   1786   1.1   msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1787   1.1   msaitoh #endif /* LRO */
   1788  1.28   msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1789  1.28   msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1790   1.1   msaitoh 	int			i, nextp, processed = 0;
   1791   1.1   msaitoh 	u32			staterr = 0;
   1792   1.7   msaitoh 	u32			count = adapter->rx_process_limit;
   1793   1.1   msaitoh #ifdef RSS
   1794   1.1   msaitoh 	u16			pkt_info;
   1795   1.1   msaitoh #endif
   1796   1.1   msaitoh 
   1797   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1798   1.1   msaitoh 
   1799   1.1   msaitoh #ifdef DEV_NETMAP
   1800  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
   1801  1.28   msaitoh 		/* Same as the txeof routine: wakeup clients on intr. */
   1802  1.28   msaitoh 		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1803  1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1804  1.28   msaitoh 			return (FALSE);
   1805  1.28   msaitoh 		}
   1806   1.1   msaitoh 	}
   1807   1.1   msaitoh #endif /* DEV_NETMAP */
   1808   1.1   msaitoh 
   1809   1.1   msaitoh 	for (i = rxr->next_to_check; count != 0;) {
   1810  1.28   msaitoh 		struct mbuf *sendmp, *mp;
   1811  1.28   msaitoh 		u32         rsc, ptype;
   1812  1.28   msaitoh 		u16         len;
   1813  1.28   msaitoh 		u16         vtag = 0;
   1814  1.28   msaitoh 		bool        eop;
   1815   1.1   msaitoh 
   1816   1.1   msaitoh 		/* Sync the ring. */
   1817   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1818   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1819   1.1   msaitoh 
   1820   1.1   msaitoh 		cur = &rxr->rx_base[i];
   1821   1.1   msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1822   1.1   msaitoh #ifdef RSS
   1823   1.1   msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1824   1.1   msaitoh #endif
   1825   1.1   msaitoh 
   1826   1.1   msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1827   1.1   msaitoh 			break;
   1828   1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1829   1.1   msaitoh 			break;
   1830   1.1   msaitoh 
   1831   1.1   msaitoh 		count--;
   1832   1.1   msaitoh 		sendmp = NULL;
   1833   1.1   msaitoh 		nbuf = NULL;
   1834   1.1   msaitoh 		rsc = 0;
   1835   1.1   msaitoh 		cur->wb.upper.status_error = 0;
   1836   1.1   msaitoh 		rbuf = &rxr->rx_buffers[i];
   1837   1.1   msaitoh 		mp = rbuf->buf;
   1838   1.1   msaitoh 
   1839   1.1   msaitoh 		len = le16toh(cur->wb.upper.length);
   1840   1.1   msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1841   1.1   msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1842   1.1   msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1843   1.1   msaitoh 
   1844   1.1   msaitoh 		/* Make sure bad packets are discarded */
   1845   1.1   msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1846   1.3   msaitoh #if __FreeBSD_version >= 1100036
   1847  1.28   msaitoh 			if (adapter->feat_en & IXGBE_FEATURE_VF)
   1848   1.4   msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1849   1.3   msaitoh #endif
   1850   1.1   msaitoh 			rxr->rx_discarded.ev_count++;
   1851   1.1   msaitoh 			ixgbe_rx_discard(rxr, i);
   1852   1.1   msaitoh 			goto next_desc;
   1853   1.1   msaitoh 		}
   1854   1.1   msaitoh 
   1855  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1856  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1857  1.27   msaitoh 
   1858   1.1   msaitoh 		/*
   1859  1.28   msaitoh 		 * On 82599 which supports a hardware
   1860  1.28   msaitoh 		 * LRO (called HW RSC), packets need
   1861  1.28   msaitoh 		 * not be fragmented across sequential
   1862  1.28   msaitoh 		 * descriptors, rather the next descriptor
   1863  1.28   msaitoh 		 * is indicated in bits of the descriptor.
   1864  1.28   msaitoh 		 * This also means that we might proceses
   1865  1.28   msaitoh 		 * more than one packet at a time, something
   1866  1.28   msaitoh 		 * that has never been true before, it
   1867  1.28   msaitoh 		 * required eliminating global chain pointers
   1868  1.28   msaitoh 		 * in favor of what we are doing here.  -jfv
   1869  1.28   msaitoh 		 */
   1870   1.1   msaitoh 		if (!eop) {
   1871   1.1   msaitoh 			/*
   1872  1.28   msaitoh 			 * Figure out the next descriptor
   1873  1.28   msaitoh 			 * of this frame.
   1874  1.28   msaitoh 			 */
   1875   1.1   msaitoh 			if (rxr->hw_rsc == TRUE) {
   1876   1.1   msaitoh 				rsc = ixgbe_rsc_count(cur);
   1877   1.1   msaitoh 				rxr->rsc_num += (rsc - 1);
   1878   1.1   msaitoh 			}
   1879   1.1   msaitoh 			if (rsc) { /* Get hardware index */
   1880  1.28   msaitoh 				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
   1881   1.1   msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1882   1.1   msaitoh 			} else { /* Just sequential */
   1883   1.1   msaitoh 				nextp = i + 1;
   1884   1.1   msaitoh 				if (nextp == adapter->num_rx_desc)
   1885   1.1   msaitoh 					nextp = 0;
   1886   1.1   msaitoh 			}
   1887   1.1   msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1888   1.1   msaitoh 			prefetch(nbuf);
   1889   1.1   msaitoh 		}
   1890   1.1   msaitoh 		/*
   1891  1.28   msaitoh 		 * Rather than using the fmp/lmp global pointers
   1892  1.28   msaitoh 		 * we now keep the head of a packet chain in the
   1893  1.28   msaitoh 		 * buffer struct and pass this along from one
   1894  1.28   msaitoh 		 * descriptor to the next, until we get EOP.
   1895  1.28   msaitoh 		 */
   1896   1.1   msaitoh 		mp->m_len = len;
   1897   1.1   msaitoh 		/*
   1898  1.28   msaitoh 		 * See if there is a stored head
   1899  1.28   msaitoh 		 * that determines what we are
   1900  1.28   msaitoh 		 */
   1901   1.1   msaitoh 		sendmp = rbuf->fmp;
   1902   1.1   msaitoh 		if (sendmp != NULL) {  /* secondary frag */
   1903   1.1   msaitoh 			rbuf->buf = rbuf->fmp = NULL;
   1904   1.1   msaitoh 			mp->m_flags &= ~M_PKTHDR;
   1905   1.1   msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   1906   1.1   msaitoh 		} else {
   1907   1.1   msaitoh 			/*
   1908   1.1   msaitoh 			 * Optimize.  This might be a small packet,
   1909   1.1   msaitoh 			 * maybe just a TCP ACK.  Do a fast copy that
   1910   1.1   msaitoh 			 * is cache aligned into a new mbuf, and
   1911   1.1   msaitoh 			 * leave the old mbuf+cluster for re-use.
   1912   1.1   msaitoh 			 */
   1913   1.1   msaitoh 			if (eop && len <= IXGBE_RX_COPY_LEN) {
   1914   1.1   msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1915   1.1   msaitoh 				if (sendmp != NULL) {
   1916  1.28   msaitoh 					sendmp->m_data += IXGBE_RX_COPY_ALIGN;
   1917  1.28   msaitoh 					ixgbe_bcopy(mp->m_data, sendmp->m_data,
   1918  1.28   msaitoh 					    len);
   1919   1.1   msaitoh 					sendmp->m_len = len;
   1920   1.1   msaitoh 					rxr->rx_copies.ev_count++;
   1921   1.1   msaitoh 					rbuf->flags |= IXGBE_RX_COPY;
   1922   1.1   msaitoh 				}
   1923   1.1   msaitoh 			}
   1924   1.1   msaitoh 			if (sendmp == NULL) {
   1925   1.1   msaitoh 				rbuf->buf = rbuf->fmp = NULL;
   1926   1.1   msaitoh 				sendmp = mp;
   1927   1.1   msaitoh 			}
   1928   1.1   msaitoh 
   1929   1.1   msaitoh 			/* first desc of a non-ps chain */
   1930   1.1   msaitoh 			sendmp->m_flags |= M_PKTHDR;
   1931   1.1   msaitoh 			sendmp->m_pkthdr.len = mp->m_len;
   1932   1.1   msaitoh 		}
   1933   1.1   msaitoh 		++processed;
   1934   1.1   msaitoh 
   1935   1.1   msaitoh 		/* Pass the head pointer on */
   1936   1.1   msaitoh 		if (eop == 0) {
   1937   1.1   msaitoh 			nbuf->fmp = sendmp;
   1938   1.1   msaitoh 			sendmp = NULL;
   1939   1.1   msaitoh 			mp->m_next = nbuf->buf;
   1940   1.1   msaitoh 		} else { /* Sending this frame */
   1941   1.1   msaitoh 			m_set_rcvif(sendmp, ifp);
   1942  1.31   msaitoh 			++rxr->packets;
   1943   1.1   msaitoh 			rxr->rx_packets.ev_count++;
   1944   1.1   msaitoh 			/* capture data for AIM */
   1945   1.1   msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   1946   1.1   msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   1947   1.1   msaitoh 			/* Process vlan info */
   1948  1.28   msaitoh 			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
   1949   1.1   msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   1950   1.1   msaitoh 			if (vtag) {
   1951  1.29  knakahar 				vlan_set_tag(sendmp, vtag);
   1952   1.1   msaitoh 			}
   1953   1.1   msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   1954   1.1   msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   1955   1.3   msaitoh 				   &adapter->stats.pf);
   1956   1.1   msaitoh 			}
   1957   1.8   msaitoh 
   1958   1.6   msaitoh #if 0 /* FreeBSD */
   1959  1.28   msaitoh 			/*
   1960  1.28   msaitoh 			 * In case of multiqueue, we have RXCSUM.PCSD bit set
   1961  1.28   msaitoh 			 * and never cleared. This means we have RSS hash
   1962  1.28   msaitoh 			 * available to be used.
   1963  1.28   msaitoh 			 */
   1964  1.28   msaitoh 			if (adapter->num_queues > 1) {
   1965  1.28   msaitoh 				sendmp->m_pkthdr.flowid =
   1966  1.28   msaitoh 				    le32toh(cur->wb.lower.hi_dword.rss);
   1967  1.44   msaitoh 				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   1968  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4:
   1969  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1970  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV4);
   1971  1.28   msaitoh 					break;
   1972  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   1973  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1974  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV4);
   1975  1.28   msaitoh 					break;
   1976  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6:
   1977  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1978  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6);
   1979  1.28   msaitoh 					break;
   1980  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   1981  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1982  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6);
   1983  1.28   msaitoh 					break;
   1984  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   1985  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1986  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6_EX);
   1987  1.28   msaitoh 					break;
   1988  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   1989  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1990  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6_EX);
   1991  1.28   msaitoh 					break;
   1992   1.6   msaitoh #if __FreeBSD_version > 1100000
   1993  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   1994  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1995  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV4);
   1996  1.28   msaitoh 					break;
   1997  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   1998  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1999  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6);
   2000  1.28   msaitoh 					break;
   2001  1.44   msaitoh 				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   2002  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2003  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6_EX);
   2004  1.28   msaitoh 					break;
   2005  1.28   msaitoh #endif
   2006  1.44   msaitoh 				default:
   2007  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   2008  1.28   msaitoh 					    M_HASHTYPE_OPAQUE_HASH);
   2009  1.28   msaitoh 				}
   2010  1.28   msaitoh 			} else {
   2011  1.28   msaitoh 				sendmp->m_pkthdr.flowid = que->msix;
   2012   1.1   msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   2013   1.1   msaitoh 			}
   2014   1.8   msaitoh #endif
   2015   1.1   msaitoh 		}
   2016   1.1   msaitoh next_desc:
   2017   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   2018   1.1   msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2019   1.1   msaitoh 
   2020   1.1   msaitoh 		/* Advance our pointers to the next descriptor. */
   2021   1.1   msaitoh 		if (++i == rxr->num_desc)
   2022   1.1   msaitoh 			i = 0;
   2023   1.1   msaitoh 
   2024   1.1   msaitoh 		/* Now send to the stack or do LRO */
   2025   1.1   msaitoh 		if (sendmp != NULL) {
   2026   1.1   msaitoh 			rxr->next_to_check = i;
   2027  1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   2028   1.1   msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   2029  1.28   msaitoh 			IXGBE_RX_LOCK(rxr);
   2030   1.1   msaitoh 			i = rxr->next_to_check;
   2031   1.1   msaitoh 		}
   2032   1.1   msaitoh 
   2033  1.28   msaitoh 		/* Every 8 descriptors we go to refresh mbufs */
   2034   1.1   msaitoh 		if (processed == 8) {
   2035   1.1   msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   2036   1.1   msaitoh 			processed = 0;
   2037   1.1   msaitoh 		}
   2038   1.1   msaitoh 	}
   2039   1.1   msaitoh 
   2040   1.1   msaitoh 	/* Refresh any remaining buf structs */
   2041   1.1   msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   2042   1.1   msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   2043   1.1   msaitoh 
   2044   1.1   msaitoh 	rxr->next_to_check = i;
   2045   1.1   msaitoh 
   2046  1.28   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   2047  1.28   msaitoh 
   2048   1.1   msaitoh #ifdef LRO
   2049   1.1   msaitoh 	/*
   2050   1.1   msaitoh 	 * Flush any outstanding LRO work
   2051   1.1   msaitoh 	 */
   2052  1.10   msaitoh 	tcp_lro_flush_all(lro);
   2053   1.1   msaitoh #endif /* LRO */
   2054   1.1   msaitoh 
   2055   1.1   msaitoh 	/*
   2056  1.28   msaitoh 	 * Still have cleaning to do?
   2057  1.28   msaitoh 	 */
   2058   1.1   msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   2059  1.28   msaitoh 		return (TRUE);
   2060  1.28   msaitoh 
   2061  1.28   msaitoh 	return (FALSE);
   2062  1.28   msaitoh } /* ixgbe_rxeof */
   2063   1.1   msaitoh 
   2064   1.1   msaitoh 
   2065  1.28   msaitoh /************************************************************************
   2066  1.28   msaitoh  * ixgbe_rx_checksum
   2067   1.1   msaitoh  *
   2068  1.28   msaitoh  *   Verify that the hardware indicated that the checksum is valid.
   2069  1.28   msaitoh  *   Inform the stack about the status of checksum so that stack
   2070  1.28   msaitoh  *   doesn't spend time verifying the checksum.
   2071  1.28   msaitoh  ************************************************************************/
   2072   1.1   msaitoh static void
   2073   1.1   msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2074   1.1   msaitoh     struct ixgbe_hw_stats *stats)
   2075   1.1   msaitoh {
   2076  1.28   msaitoh 	u16  status = (u16)staterr;
   2077  1.28   msaitoh 	u8   errors = (u8)(staterr >> 24);
   2078   1.1   msaitoh #if 0
   2079  1.28   msaitoh 	bool sctp = false;
   2080   1.1   msaitoh 
   2081   1.1   msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2082   1.1   msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2083   1.8   msaitoh 		sctp = true;
   2084   1.1   msaitoh #endif
   2085   1.1   msaitoh 
   2086   1.8   msaitoh 	/* IPv4 checksum */
   2087   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2088   1.1   msaitoh 		stats->ipcs.ev_count++;
   2089   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2090   1.1   msaitoh 			/* IP Checksum Good */
   2091   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2092   1.1   msaitoh 		} else {
   2093   1.1   msaitoh 			stats->ipcs_bad.ev_count++;
   2094   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2095   1.1   msaitoh 		}
   2096   1.1   msaitoh 	}
   2097   1.8   msaitoh 	/* TCP/UDP/SCTP checksum */
   2098   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2099   1.1   msaitoh 		stats->l4cs.ev_count++;
   2100   1.1   msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2101   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2102   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2103   1.1   msaitoh 		} else {
   2104   1.1   msaitoh 			stats->l4cs_bad.ev_count++;
   2105   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2106   1.1   msaitoh 		}
   2107   1.1   msaitoh 	}
   2108  1.28   msaitoh } /* ixgbe_rx_checksum */
   2109   1.1   msaitoh 
   2110  1.28   msaitoh /************************************************************************
   2111  1.28   msaitoh  * ixgbe_dma_malloc
   2112  1.28   msaitoh  ************************************************************************/
   2113   1.1   msaitoh int
   2114   1.1   msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2115   1.1   msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2116   1.1   msaitoh {
   2117   1.1   msaitoh 	device_t dev = adapter->dev;
   2118  1.28   msaitoh 	int      r, rsegs;
   2119   1.1   msaitoh 
   2120  1.28   msaitoh 	r = ixgbe_dma_tag_create(
   2121  1.28   msaitoh 	     /*      parent */ adapter->osdep.dmat,
   2122  1.28   msaitoh 	     /*   alignment */ DBA_ALIGN,
   2123  1.28   msaitoh 	     /*      bounds */ 0,
   2124  1.28   msaitoh 	     /*     maxsize */ size,
   2125  1.28   msaitoh 	     /*   nsegments */ 1,
   2126  1.28   msaitoh 	     /*  maxsegsize */ size,
   2127  1.28   msaitoh 	     /*       flags */ BUS_DMA_ALLOCNOW,
   2128   1.1   msaitoh 			       &dma->dma_tag);
   2129   1.1   msaitoh 	if (r != 0) {
   2130   1.1   msaitoh 		aprint_error_dev(dev,
   2131  1.44   msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
   2132  1.44   msaitoh 		    r);
   2133   1.1   msaitoh 		goto fail_0;
   2134   1.1   msaitoh 	}
   2135   1.1   msaitoh 
   2136  1.28   msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
   2137  1.28   msaitoh 	    dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
   2138  1.28   msaitoh 	    &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2139   1.1   msaitoh 	if (r != 0) {
   2140   1.1   msaitoh 		aprint_error_dev(dev,
   2141   1.1   msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2142   1.1   msaitoh 		goto fail_1;
   2143   1.1   msaitoh 	}
   2144   1.1   msaitoh 
   2145   1.1   msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2146   1.1   msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2147   1.1   msaitoh 	if (r != 0) {
   2148   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2149   1.1   msaitoh 		    __func__, r);
   2150   1.1   msaitoh 		goto fail_2;
   2151   1.1   msaitoh 	}
   2152   1.1   msaitoh 
   2153   1.1   msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2154   1.1   msaitoh 	if (r != 0) {
   2155   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2156   1.1   msaitoh 		    __func__, r);
   2157   1.1   msaitoh 		goto fail_3;
   2158   1.1   msaitoh 	}
   2159   1.1   msaitoh 
   2160  1.28   msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
   2161  1.28   msaitoh 	    dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
   2162   1.1   msaitoh 	if (r != 0) {
   2163   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2164   1.1   msaitoh 		    __func__, r);
   2165   1.1   msaitoh 		goto fail_4;
   2166   1.1   msaitoh 	}
   2167   1.1   msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2168   1.1   msaitoh 	dma->dma_size = size;
   2169   1.1   msaitoh 	return 0;
   2170   1.1   msaitoh fail_4:
   2171   1.1   msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2172   1.1   msaitoh fail_3:
   2173   1.1   msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2174   1.1   msaitoh fail_2:
   2175   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2176   1.1   msaitoh fail_1:
   2177   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2178   1.1   msaitoh fail_0:
   2179   1.1   msaitoh 
   2180  1.28   msaitoh 	return (r);
   2181  1.28   msaitoh } /* ixgbe_dma_malloc */
   2182  1.28   msaitoh 
   2183  1.28   msaitoh /************************************************************************
   2184  1.28   msaitoh  * ixgbe_dma_free
   2185  1.28   msaitoh  ************************************************************************/
   2186   1.3   msaitoh void
   2187   1.1   msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2188   1.1   msaitoh {
   2189   1.1   msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2190   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2191   1.1   msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2192   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2193   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2194  1.28   msaitoh } /* ixgbe_dma_free */
   2195   1.1   msaitoh 
   2196   1.1   msaitoh 
   2197  1.28   msaitoh /************************************************************************
   2198  1.28   msaitoh  * ixgbe_allocate_queues
   2199   1.1   msaitoh  *
   2200  1.28   msaitoh  *   Allocate memory for the transmit and receive rings, and then
   2201  1.28   msaitoh  *   the descriptors associated with each, called only once at attach.
   2202  1.28   msaitoh  ************************************************************************/
   2203   1.1   msaitoh int
   2204   1.1   msaitoh ixgbe_allocate_queues(struct adapter *adapter)
   2205   1.1   msaitoh {
   2206   1.1   msaitoh 	device_t	dev = adapter->dev;
   2207   1.1   msaitoh 	struct ix_queue	*que;
   2208   1.1   msaitoh 	struct tx_ring	*txr;
   2209   1.1   msaitoh 	struct rx_ring	*rxr;
   2210  1.28   msaitoh 	int             rsize, tsize, error = IXGBE_SUCCESS;
   2211  1.28   msaitoh 	int             txconf = 0, rxconf = 0;
   2212   1.1   msaitoh 
   2213  1.28   msaitoh 	/* First, allocate the top level queue structs */
   2214  1.28   msaitoh 	adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
   2215  1.28   msaitoh             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2216  1.28   msaitoh         if (adapter->queues == NULL) {
   2217  1.28   msaitoh 		aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2218   1.1   msaitoh                 error = ENOMEM;
   2219   1.1   msaitoh                 goto fail;
   2220   1.1   msaitoh         }
   2221   1.1   msaitoh 
   2222  1.28   msaitoh 	/* Second, allocate the TX ring struct memory */
   2223  1.28   msaitoh 	adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
   2224  1.28   msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2225  1.28   msaitoh 	if (adapter->tx_rings == NULL) {
   2226   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2227   1.1   msaitoh 		error = ENOMEM;
   2228   1.1   msaitoh 		goto tx_fail;
   2229   1.1   msaitoh 	}
   2230   1.1   msaitoh 
   2231  1.28   msaitoh 	/* Third, allocate the RX ring */
   2232  1.28   msaitoh 	adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
   2233  1.28   msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2234  1.28   msaitoh 	if (adapter->rx_rings == NULL) {
   2235   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2236   1.1   msaitoh 		error = ENOMEM;
   2237   1.1   msaitoh 		goto rx_fail;
   2238   1.1   msaitoh 	}
   2239   1.1   msaitoh 
   2240   1.1   msaitoh 	/* For the ring itself */
   2241  1.28   msaitoh 	tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
   2242  1.28   msaitoh 	    DBA_ALIGN);
   2243   1.1   msaitoh 
   2244   1.1   msaitoh 	/*
   2245   1.1   msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2246   1.1   msaitoh 	 * possibility that things fail midcourse and we need to
   2247   1.1   msaitoh 	 * undo memory gracefully
   2248  1.28   msaitoh 	 */
   2249   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2250   1.1   msaitoh 		/* Set up some basics */
   2251   1.1   msaitoh 		txr = &adapter->tx_rings[i];
   2252   1.1   msaitoh 		txr->adapter = adapter;
   2253  1.28   msaitoh 		txr->txr_interq = NULL;
   2254  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2255   1.5   msaitoh #ifdef PCI_IOV
   2256  1.28   msaitoh 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2257  1.28   msaitoh 		    i);
   2258   1.5   msaitoh #else
   2259   1.1   msaitoh 		txr->me = i;
   2260   1.5   msaitoh #endif
   2261   1.1   msaitoh 		txr->num_desc = adapter->num_tx_desc;
   2262   1.1   msaitoh 
   2263   1.1   msaitoh 		/* Initialize the TX side lock */
   2264   1.1   msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2265   1.1   msaitoh 
   2266  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
   2267  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2268   1.1   msaitoh 			aprint_error_dev(dev,
   2269   1.1   msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2270   1.1   msaitoh 			error = ENOMEM;
   2271   1.1   msaitoh 			goto err_tx_desc;
   2272   1.1   msaitoh 		}
   2273   1.1   msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2274   1.1   msaitoh 		bzero((void *)txr->tx_base, tsize);
   2275   1.1   msaitoh 
   2276  1.28   msaitoh 		/* Now allocate transmit buffers for the ring */
   2277  1.28   msaitoh 		if (ixgbe_allocate_transmit_buffers(txr)) {
   2278   1.1   msaitoh 			aprint_error_dev(dev,
   2279   1.1   msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2280   1.1   msaitoh 			error = ENOMEM;
   2281   1.1   msaitoh 			goto err_tx_desc;
   2282   1.1   msaitoh         	}
   2283  1.28   msaitoh 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   2284  1.28   msaitoh 			/* Allocate a buf ring */
   2285  1.28   msaitoh 			txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
   2286  1.28   msaitoh 			if (txr->txr_interq == NULL) {
   2287  1.28   msaitoh 				aprint_error_dev(dev,
   2288  1.28   msaitoh 				    "Critical Failure setting up buf ring\n");
   2289  1.28   msaitoh 				error = ENOMEM;
   2290  1.28   msaitoh 				goto err_tx_desc;
   2291  1.28   msaitoh 			}
   2292  1.28   msaitoh 		}
   2293   1.1   msaitoh 	}
   2294   1.1   msaitoh 
   2295   1.1   msaitoh 	/*
   2296   1.1   msaitoh 	 * Next the RX queues...
   2297   1.1   msaitoh 	 */
   2298  1.28   msaitoh 	rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
   2299  1.28   msaitoh 	    DBA_ALIGN);
   2300   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2301   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   2302   1.1   msaitoh 		/* Set up some basics */
   2303   1.1   msaitoh 		rxr->adapter = adapter;
   2304   1.5   msaitoh #ifdef PCI_IOV
   2305  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2306  1.28   msaitoh 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2307  1.28   msaitoh 		    i);
   2308   1.5   msaitoh #else
   2309   1.1   msaitoh 		rxr->me = i;
   2310   1.5   msaitoh #endif
   2311   1.1   msaitoh 		rxr->num_desc = adapter->num_rx_desc;
   2312   1.1   msaitoh 
   2313   1.1   msaitoh 		/* Initialize the RX side lock */
   2314   1.1   msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2315   1.1   msaitoh 
   2316  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
   2317  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2318   1.1   msaitoh 			aprint_error_dev(dev,
   2319   1.1   msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2320   1.1   msaitoh 			error = ENOMEM;
   2321   1.1   msaitoh 			goto err_rx_desc;
   2322   1.1   msaitoh 		}
   2323   1.1   msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2324   1.1   msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2325   1.1   msaitoh 
   2326  1.28   msaitoh 		/* Allocate receive buffers for the ring */
   2327   1.1   msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2328   1.1   msaitoh 			aprint_error_dev(dev,
   2329   1.1   msaitoh 			    "Critical Failure setting up receive buffers\n");
   2330   1.1   msaitoh 			error = ENOMEM;
   2331   1.1   msaitoh 			goto err_rx_desc;
   2332   1.1   msaitoh 		}
   2333   1.1   msaitoh 	}
   2334   1.1   msaitoh 
   2335   1.1   msaitoh 	/*
   2336  1.28   msaitoh 	 * Finally set up the queue holding structs
   2337  1.28   msaitoh 	 */
   2338   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++) {
   2339   1.1   msaitoh 		que = &adapter->queues[i];
   2340   1.1   msaitoh 		que->adapter = adapter;
   2341   1.3   msaitoh 		que->me = i;
   2342   1.1   msaitoh 		que->txr = &adapter->tx_rings[i];
   2343   1.1   msaitoh 		que->rxr = &adapter->rx_rings[i];
   2344  1.33  knakahar 
   2345  1.37  knakahar 		mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
   2346  1.37  knakahar 		que->disabled_count = 0;
   2347   1.1   msaitoh 	}
   2348   1.1   msaitoh 
   2349   1.1   msaitoh 	return (0);
   2350   1.1   msaitoh 
   2351   1.1   msaitoh err_rx_desc:
   2352   1.1   msaitoh 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2353   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2354   1.1   msaitoh err_tx_desc:
   2355   1.1   msaitoh 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2356   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
   2357   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   2358   1.1   msaitoh rx_fail:
   2359   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
   2360   1.1   msaitoh tx_fail:
   2361   1.1   msaitoh 	free(adapter->queues, M_DEVBUF);
   2362   1.1   msaitoh fail:
   2363   1.1   msaitoh 	return (error);
   2364  1.28   msaitoh } /* ixgbe_allocate_queues */
   2365