Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.29
      1  1.29  knakahar /* $NetBSD: ix_txrx.c,v 1.29 2017/09/26 07:42:06 knakahara Exp $ */
      2  1.28   msaitoh 
      3   1.1   msaitoh /******************************************************************************
      4   1.1   msaitoh 
      5  1.28   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      6   1.1   msaitoh   All rights reserved.
      7  1.28   msaitoh 
      8  1.28   msaitoh   Redistribution and use in source and binary forms, with or without
      9   1.1   msaitoh   modification, are permitted provided that the following conditions are met:
     10  1.28   msaitoh 
     11  1.28   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     12   1.1   msaitoh       this list of conditions and the following disclaimer.
     13  1.28   msaitoh 
     14  1.28   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     15  1.28   msaitoh       notice, this list of conditions and the following disclaimer in the
     16   1.1   msaitoh       documentation and/or other materials provided with the distribution.
     17  1.28   msaitoh 
     18  1.28   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     19  1.28   msaitoh       contributors may be used to endorse or promote products derived from
     20   1.1   msaitoh       this software without specific prior written permission.
     21  1.28   msaitoh 
     22   1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23  1.28   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  1.28   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  1.28   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26  1.28   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  1.28   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  1.28   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  1.28   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  1.28   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     33   1.1   msaitoh 
     34   1.1   msaitoh ******************************************************************************/
     35  1.28   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 321476 2017-07-25 14:38:30Z sbruno $*/
     36  1.28   msaitoh 
     37   1.1   msaitoh /*
     38   1.1   msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39   1.1   msaitoh  * All rights reserved.
     40   1.1   msaitoh  *
     41   1.1   msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     42   1.1   msaitoh  * by Coyote Point Systems, Inc.
     43   1.1   msaitoh  *
     44   1.1   msaitoh  * Redistribution and use in source and binary forms, with or without
     45   1.1   msaitoh  * modification, are permitted provided that the following conditions
     46   1.1   msaitoh  * are met:
     47   1.1   msaitoh  * 1. Redistributions of source code must retain the above copyright
     48   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer.
     49   1.1   msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1   msaitoh  *    notice, this list of conditions and the following disclaimer in the
     51   1.1   msaitoh  *    documentation and/or other materials provided with the distribution.
     52   1.1   msaitoh  *
     53   1.1   msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54   1.1   msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55   1.1   msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56   1.1   msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57   1.1   msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58   1.1   msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59   1.1   msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60   1.1   msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61   1.1   msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62   1.1   msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63   1.1   msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     64   1.1   msaitoh  */
     65   1.1   msaitoh 
     66   1.8   msaitoh #include "opt_inet.h"
     67   1.8   msaitoh #include "opt_inet6.h"
     68   1.8   msaitoh 
     69   1.1   msaitoh #include "ixgbe.h"
     70   1.1   msaitoh 
     71   1.1   msaitoh /*
     72  1.28   msaitoh  * HW RSC control:
     73  1.28   msaitoh  *  this feature only works with
     74  1.28   msaitoh  *  IPv4, and only on 82599 and later.
     75  1.28   msaitoh  *  Also this will cause IP forwarding to
     76  1.28   msaitoh  *  fail and that can't be controlled by
     77  1.28   msaitoh  *  the stack as LRO can. For all these
     78  1.28   msaitoh  *  reasons I've deemed it best to leave
     79  1.28   msaitoh  *  this off and not bother with a tuneable
     80  1.28   msaitoh  *  interface, this would need to be compiled
     81  1.28   msaitoh  *  to enable.
     82  1.28   msaitoh  */
     83   1.1   msaitoh static bool ixgbe_rsc_enable = FALSE;
     84   1.1   msaitoh 
     85   1.3   msaitoh /*
     86  1.28   msaitoh  * For Flow Director: this is the
     87  1.28   msaitoh  * number of TX packets we sample
     88  1.28   msaitoh  * for the filter pool, this means
     89  1.28   msaitoh  * every 20th packet will be probed.
     90  1.28   msaitoh  *
     91  1.28   msaitoh  * This feature can be disabled by
     92  1.28   msaitoh  * setting this to 0.
     93  1.28   msaitoh  */
     94   1.3   msaitoh static int atr_sample_rate = 20;
     95   1.3   msaitoh 
     96  1.28   msaitoh /************************************************************************
     97   1.3   msaitoh  *  Local Function prototypes
     98  1.28   msaitoh  ************************************************************************/
     99  1.28   msaitoh static void          ixgbe_setup_transmit_ring(struct tx_ring *);
    100  1.28   msaitoh static void          ixgbe_free_transmit_buffers(struct tx_ring *);
    101  1.28   msaitoh static int           ixgbe_setup_receive_ring(struct rx_ring *);
    102  1.28   msaitoh static void          ixgbe_free_receive_buffers(struct rx_ring *);
    103  1.28   msaitoh static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
    104  1.28   msaitoh                                        struct ixgbe_hw_stats *);
    105  1.28   msaitoh static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
    106  1.28   msaitoh static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
    107  1.28   msaitoh static int           ixgbe_tx_ctx_setup(struct tx_ring *,
    108  1.28   msaitoh                                         struct mbuf *, u32 *, u32 *);
    109  1.28   msaitoh static int           ixgbe_tso_setup(struct tx_ring *,
    110  1.28   msaitoh                                      struct mbuf *, u32 *, u32 *);
    111   1.1   msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    112   1.1   msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    113  1.28   msaitoh                                     struct mbuf *, u32);
    114  1.28   msaitoh static int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
    115  1.28   msaitoh                                       struct ixgbe_dma_alloc *, int);
    116  1.28   msaitoh static void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    117   1.1   msaitoh 
    118   1.1   msaitoh static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    119   1.1   msaitoh 
    120  1.28   msaitoh /************************************************************************
    121  1.28   msaitoh  * ixgbe_legacy_start_locked - Transmit entry point
    122   1.1   msaitoh  *
    123  1.28   msaitoh  *   Called by the stack to initiate a transmit.
    124  1.28   msaitoh  *   The driver will remain in this routine as long as there are
    125  1.28   msaitoh  *   packets to transmit and transmit resources are available.
    126  1.28   msaitoh  *   In case resources are not available, the stack is notified
    127  1.28   msaitoh  *   and the packet is requeued.
    128  1.28   msaitoh  ************************************************************************/
    129  1.28   msaitoh int
    130  1.28   msaitoh ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    131   1.1   msaitoh {
    132   1.1   msaitoh 	int rc;
    133   1.1   msaitoh 	struct mbuf    *m_head;
    134   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    135   1.1   msaitoh 
    136   1.1   msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    137   1.1   msaitoh 
    138   1.1   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    139  1.28   msaitoh 		return (ENETDOWN);
    140   1.1   msaitoh 	if (!adapter->link_active)
    141  1.28   msaitoh 		return (ENETDOWN);
    142   1.1   msaitoh 
    143   1.1   msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    144   1.1   msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    145   1.1   msaitoh 			break;
    146   1.1   msaitoh 
    147   1.1   msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    148   1.1   msaitoh 		if (m_head == NULL)
    149   1.1   msaitoh 			break;
    150   1.1   msaitoh 
    151   1.1   msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    152   1.1   msaitoh 			break;
    153   1.1   msaitoh 		}
    154   1.1   msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    155   1.1   msaitoh 		if (rc != 0) {
    156   1.1   msaitoh 			m_freem(m_head);
    157   1.1   msaitoh 			continue;
    158   1.1   msaitoh 		}
    159   1.1   msaitoh 
    160   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    161   1.1   msaitoh 		bpf_mtap(ifp, m_head);
    162   1.1   msaitoh 	}
    163   1.1   msaitoh 
    164  1.28   msaitoh 	return IXGBE_SUCCESS;
    165  1.28   msaitoh } /* ixgbe_legacy_start_locked */
    166  1.28   msaitoh 
    167  1.28   msaitoh /************************************************************************
    168  1.28   msaitoh  * ixgbe_legacy_start
    169  1.28   msaitoh  *
    170  1.28   msaitoh  *   Called by the stack, this always uses the first tx ring,
    171  1.28   msaitoh  *   and should not be used with multiqueue tx enabled.
    172  1.28   msaitoh  ************************************************************************/
    173   1.1   msaitoh void
    174  1.28   msaitoh ixgbe_legacy_start(struct ifnet *ifp)
    175   1.1   msaitoh {
    176   1.1   msaitoh 	struct adapter *adapter = ifp->if_softc;
    177  1.28   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    178   1.1   msaitoh 
    179   1.1   msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    180   1.1   msaitoh 		IXGBE_TX_LOCK(txr);
    181  1.28   msaitoh 		ixgbe_legacy_start_locked(ifp, txr);
    182   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    183   1.1   msaitoh 	}
    184  1.28   msaitoh } /* ixgbe_legacy_start */
    185   1.1   msaitoh 
    186  1.28   msaitoh /************************************************************************
    187  1.28   msaitoh  * ixgbe_mq_start - Multiqueue Transmit Entry Point
    188  1.28   msaitoh  *
    189  1.28   msaitoh  *   (if_transmit function)
    190  1.28   msaitoh  ************************************************************************/
    191   1.1   msaitoh int
    192   1.1   msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    193   1.1   msaitoh {
    194   1.1   msaitoh 	struct adapter	*adapter = ifp->if_softc;
    195   1.1   msaitoh 	struct tx_ring	*txr;
    196   1.1   msaitoh 	int 		i, err = 0;
    197  1.28   msaitoh #ifdef RSS
    198   1.1   msaitoh 	uint32_t bucket_id;
    199   1.1   msaitoh #endif
    200   1.1   msaitoh 
    201   1.1   msaitoh 	/*
    202   1.1   msaitoh 	 * When doing RSS, map it to the same outbound queue
    203   1.1   msaitoh 	 * as the incoming flow would be mapped to.
    204   1.1   msaitoh 	 *
    205   1.1   msaitoh 	 * If everything is setup correctly, it should be the
    206   1.1   msaitoh 	 * same bucket that the current CPU we're on is.
    207   1.1   msaitoh 	 */
    208  1.28   msaitoh #ifdef RSS
    209   1.1   msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    210  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
    211  1.28   msaitoh 		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
    212  1.28   msaitoh 		    &bucket_id) == 0)) {
    213   1.1   msaitoh 			i = bucket_id % adapter->num_queues;
    214   1.8   msaitoh #ifdef IXGBE_DEBUG
    215   1.8   msaitoh 			if (bucket_id > adapter->num_queues)
    216  1.28   msaitoh 				if_printf(ifp,
    217  1.28   msaitoh 				    "bucket_id (%d) > num_queues (%d)\n",
    218  1.28   msaitoh 				    bucket_id, adapter->num_queues);
    219   1.8   msaitoh #endif
    220   1.8   msaitoh 		} else
    221   1.1   msaitoh 			i = m->m_pkthdr.flowid % adapter->num_queues;
    222   1.3   msaitoh 	} else
    223  1.28   msaitoh #endif /* 0 */
    224  1.18   msaitoh 		i = cpu_index(curcpu()) % adapter->num_queues;
    225   1.3   msaitoh 
    226   1.3   msaitoh 	/* Check for a hung queue and pick alternative */
    227   1.3   msaitoh 	if (((1 << i) & adapter->active_queues) == 0)
    228  1.18   msaitoh 		i = ffs64(adapter->active_queues);
    229   1.1   msaitoh 
    230   1.1   msaitoh 	txr = &adapter->tx_rings[i];
    231   1.1   msaitoh 
    232  1.18   msaitoh 	err = pcq_put(txr->txr_interq, m);
    233  1.18   msaitoh 	if (err == false) {
    234  1.18   msaitoh 		m_freem(m);
    235  1.18   msaitoh 		txr->pcq_drops.ev_count++;
    236   1.1   msaitoh 		return (err);
    237  1.18   msaitoh 	}
    238   1.1   msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    239   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    240   1.1   msaitoh 		IXGBE_TX_UNLOCK(txr);
    241   1.1   msaitoh 	} else
    242  1.18   msaitoh 		softint_schedule(txr->txr_si);
    243   1.1   msaitoh 
    244   1.1   msaitoh 	return (0);
    245  1.28   msaitoh } /* ixgbe_mq_start */
    246   1.1   msaitoh 
    247  1.28   msaitoh /************************************************************************
    248  1.28   msaitoh  * ixgbe_mq_start_locked
    249  1.28   msaitoh  ************************************************************************/
    250   1.1   msaitoh int
    251   1.1   msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    252   1.1   msaitoh {
    253  1.28   msaitoh 	struct mbuf    *next;
    254  1.28   msaitoh 	int            enqueued = 0, err = 0;
    255   1.1   msaitoh 
    256  1.28   msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    257  1.28   msaitoh 		return (ENETDOWN);
    258  1.28   msaitoh 	if (txr->adapter->link_active == 0)
    259   1.1   msaitoh 		return (ENETDOWN);
    260   1.1   msaitoh 
    261   1.1   msaitoh 	/* Process the queue */
    262  1.18   msaitoh 	while ((next = pcq_get(txr->txr_interq)) != NULL) {
    263  1.18   msaitoh 		if ((err = ixgbe_xmit(txr, next)) != 0) {
    264  1.18   msaitoh 			m_freem(next);
    265  1.18   msaitoh 			/* All errors are counted in ixgbe_xmit() */
    266   1.1   msaitoh 			break;
    267   1.1   msaitoh 		}
    268   1.1   msaitoh 		enqueued++;
    269   1.3   msaitoh #if __FreeBSD_version >= 1100036
    270   1.4   msaitoh 		/*
    271   1.4   msaitoh 		 * Since we're looking at the tx ring, we can check
    272   1.4   msaitoh 		 * to see if we're a VF by examing our tail register
    273   1.4   msaitoh 		 * address.
    274   1.4   msaitoh 		 */
    275  1.28   msaitoh 		if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
    276  1.28   msaitoh 		    (next->m_flags & M_MCAST))
    277   1.3   msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    278   1.3   msaitoh #endif
    279   1.1   msaitoh 		/* Send a copy of the frame to the BPF listener */
    280   1.1   msaitoh 		bpf_mtap(ifp, next);
    281   1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    282   1.1   msaitoh 			break;
    283   1.1   msaitoh 	}
    284   1.1   msaitoh 
    285  1.28   msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
    286   1.1   msaitoh 		ixgbe_txeof(txr);
    287   1.1   msaitoh 
    288   1.1   msaitoh 	return (err);
    289  1.28   msaitoh } /* ixgbe_mq_start_locked */
    290   1.1   msaitoh 
    291  1.28   msaitoh /************************************************************************
    292  1.28   msaitoh  * ixgbe_deferred_mq_start
    293  1.28   msaitoh  *
    294  1.28   msaitoh  *   Called from a taskqueue to drain queued transmit packets.
    295  1.28   msaitoh  ************************************************************************/
    296   1.1   msaitoh void
    297  1.18   msaitoh ixgbe_deferred_mq_start(void *arg)
    298   1.1   msaitoh {
    299   1.1   msaitoh 	struct tx_ring *txr = arg;
    300   1.1   msaitoh 	struct adapter *adapter = txr->adapter;
    301  1.28   msaitoh 	struct ifnet   *ifp = adapter->ifp;
    302   1.1   msaitoh 
    303   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    304  1.18   msaitoh 	if (pcq_peek(txr->txr_interq) != NULL)
    305   1.1   msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    306   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    307  1.28   msaitoh } /* ixgbe_deferred_mq_start */
    308   1.3   msaitoh 
    309  1.28   msaitoh /************************************************************************
    310  1.28   msaitoh  * ixgbe_xmit
    311   1.1   msaitoh  *
    312  1.28   msaitoh  *   Maps the mbufs to tx descriptors, allowing the
    313  1.28   msaitoh  *   TX engine to transmit the packets.
    314   1.1   msaitoh  *
    315  1.28   msaitoh  *   Return 0 on success, positive on failure
    316  1.28   msaitoh  ************************************************************************/
    317   1.1   msaitoh static int
    318   1.1   msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    319   1.1   msaitoh {
    320  1.28   msaitoh 	struct adapter          *adapter = txr->adapter;
    321  1.28   msaitoh 	struct ixgbe_tx_buf     *txbuf;
    322   1.1   msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    323  1.28   msaitoh 	struct ifnet	        *ifp = adapter->ifp;
    324  1.28   msaitoh 	int                     i, j, error;
    325  1.28   msaitoh 	int                     first;
    326  1.28   msaitoh 	u32                     olinfo_status = 0, cmd_type_len;
    327  1.28   msaitoh 	bool                    remap = TRUE;
    328  1.28   msaitoh 	bus_dmamap_t            map;
    329   1.1   msaitoh 
    330   1.1   msaitoh 	/* Basic descriptor defines */
    331  1.28   msaitoh 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    332   1.1   msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    333   1.1   msaitoh 
    334  1.29  knakahar 	if (vlan_has_tag(m_head))
    335  1.28   msaitoh 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    336   1.1   msaitoh 
    337  1.28   msaitoh 	/*
    338  1.28   msaitoh 	 * Important to capture the first descriptor
    339  1.28   msaitoh 	 * used because it will contain the index of
    340  1.28   msaitoh 	 * the one we tell the hardware to report back
    341  1.28   msaitoh 	 */
    342  1.28   msaitoh 	first = txr->next_avail_desc;
    343   1.1   msaitoh 	txbuf = &txr->tx_buffers[first];
    344   1.1   msaitoh 	map = txbuf->map;
    345   1.1   msaitoh 
    346   1.1   msaitoh 	/*
    347   1.1   msaitoh 	 * Map the packet for DMA.
    348   1.1   msaitoh 	 */
    349  1.22   msaitoh retry:
    350  1.28   msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
    351  1.28   msaitoh 	    BUS_DMA_NOWAIT);
    352   1.1   msaitoh 
    353   1.1   msaitoh 	if (__predict_false(error)) {
    354  1.22   msaitoh 		struct mbuf *m;
    355   1.1   msaitoh 
    356   1.1   msaitoh 		switch (error) {
    357   1.1   msaitoh 		case EAGAIN:
    358   1.1   msaitoh 			adapter->eagain_tx_dma_setup.ev_count++;
    359   1.1   msaitoh 			return EAGAIN;
    360   1.1   msaitoh 		case ENOMEM:
    361   1.1   msaitoh 			adapter->enomem_tx_dma_setup.ev_count++;
    362   1.1   msaitoh 			return EAGAIN;
    363   1.1   msaitoh 		case EFBIG:
    364  1.22   msaitoh 			/* Try it again? - one try */
    365  1.22   msaitoh 			if (remap == TRUE) {
    366  1.22   msaitoh 				remap = FALSE;
    367  1.22   msaitoh 				/*
    368  1.22   msaitoh 				 * XXX: m_defrag will choke on
    369  1.22   msaitoh 				 * non-MCLBYTES-sized clusters
    370  1.22   msaitoh 				 */
    371  1.22   msaitoh 				adapter->efbig_tx_dma_setup.ev_count++;
    372  1.22   msaitoh 				m = m_defrag(m_head, M_NOWAIT);
    373  1.22   msaitoh 				if (m == NULL) {
    374  1.22   msaitoh 					adapter->mbuf_defrag_failed.ev_count++;
    375  1.22   msaitoh 					return ENOBUFS;
    376  1.22   msaitoh 				}
    377  1.22   msaitoh 				m_head = m;
    378  1.22   msaitoh 				goto retry;
    379  1.22   msaitoh 			} else {
    380  1.22   msaitoh 				adapter->efbig2_tx_dma_setup.ev_count++;
    381  1.22   msaitoh 				return error;
    382  1.22   msaitoh 			}
    383   1.1   msaitoh 		case EINVAL:
    384   1.1   msaitoh 			adapter->einval_tx_dma_setup.ev_count++;
    385   1.1   msaitoh 			return error;
    386   1.1   msaitoh 		default:
    387   1.1   msaitoh 			adapter->other_tx_dma_setup.ev_count++;
    388   1.1   msaitoh 			return error;
    389   1.1   msaitoh 		}
    390   1.1   msaitoh 	}
    391   1.1   msaitoh 
    392   1.1   msaitoh 	/* Make certain there are enough descriptors */
    393  1.10   msaitoh 	if (txr->tx_avail < (map->dm_nsegs + 2)) {
    394   1.1   msaitoh 		txr->no_desc_avail.ev_count++;
    395   1.1   msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    396   1.1   msaitoh 		return EAGAIN;
    397   1.1   msaitoh 	}
    398   1.1   msaitoh 
    399   1.1   msaitoh 	/*
    400   1.4   msaitoh 	 * Set up the appropriate offload context
    401   1.4   msaitoh 	 * this will consume the first descriptor
    402   1.4   msaitoh 	 */
    403   1.1   msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    404   1.1   msaitoh 	if (__predict_false(error)) {
    405   1.1   msaitoh 		return (error);
    406   1.1   msaitoh 	}
    407   1.1   msaitoh 
    408   1.1   msaitoh 	/* Do the flow director magic */
    409  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
    410  1.28   msaitoh 	    (txr->atr_sample) && (!adapter->fdir_reinit)) {
    411   1.1   msaitoh 		++txr->atr_count;
    412   1.1   msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    413   1.1   msaitoh 			ixgbe_atr(txr, m_head);
    414   1.1   msaitoh 			txr->atr_count = 0;
    415   1.1   msaitoh 		}
    416   1.1   msaitoh 	}
    417   1.1   msaitoh 
    418   1.8   msaitoh 	olinfo_status |= IXGBE_ADVTXD_CC;
    419   1.1   msaitoh 	i = txr->next_avail_desc;
    420   1.1   msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    421   1.1   msaitoh 		bus_size_t seglen;
    422   1.1   msaitoh 		bus_addr_t segaddr;
    423   1.1   msaitoh 
    424   1.1   msaitoh 		txbuf = &txr->tx_buffers[i];
    425   1.1   msaitoh 		txd = &txr->tx_base[i];
    426   1.1   msaitoh 		seglen = map->dm_segs[j].ds_len;
    427   1.1   msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    428   1.1   msaitoh 
    429   1.1   msaitoh 		txd->read.buffer_addr = segaddr;
    430   1.1   msaitoh 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
    431  1.28   msaitoh 		    cmd_type_len | seglen);
    432   1.1   msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    433   1.1   msaitoh 
    434   1.1   msaitoh 		if (++i == txr->num_desc)
    435   1.1   msaitoh 			i = 0;
    436   1.1   msaitoh 	}
    437   1.1   msaitoh 
    438  1.28   msaitoh 	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    439   1.1   msaitoh 	txr->tx_avail -= map->dm_nsegs;
    440   1.1   msaitoh 	txr->next_avail_desc = i;
    441   1.1   msaitoh 
    442   1.1   msaitoh 	txbuf->m_head = m_head;
    443   1.1   msaitoh 	/*
    444   1.4   msaitoh 	 * Here we swap the map so the last descriptor,
    445   1.4   msaitoh 	 * which gets the completion interrupt has the
    446   1.4   msaitoh 	 * real map, and the first descriptor gets the
    447   1.4   msaitoh 	 * unused map from this descriptor.
    448   1.4   msaitoh 	 */
    449   1.1   msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    450   1.1   msaitoh 	txbuf->map = map;
    451   1.1   msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    452   1.1   msaitoh 	    BUS_DMASYNC_PREWRITE);
    453   1.1   msaitoh 
    454  1.28   msaitoh 	/* Set the EOP descriptor that will be marked done */
    455  1.28   msaitoh 	txbuf = &txr->tx_buffers[first];
    456   1.1   msaitoh 	txbuf->eop = txd;
    457   1.1   msaitoh 
    458  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    459   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    460   1.1   msaitoh 	/*
    461   1.1   msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    462   1.1   msaitoh 	 * hardware that this frame is available to transmit.
    463   1.1   msaitoh 	 */
    464   1.1   msaitoh 	++txr->total_packets.ev_count;
    465   1.3   msaitoh 	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
    466   1.3   msaitoh 
    467  1.23   msaitoh 	/*
    468  1.23   msaitoh 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
    469  1.23   msaitoh 	 */
    470  1.23   msaitoh 	ifp->if_obytes += m_head->m_pkthdr.len;
    471  1.23   msaitoh 	if (m_head->m_flags & M_MCAST)
    472  1.23   msaitoh 		ifp->if_omcasts++;
    473  1.23   msaitoh 
    474   1.3   msaitoh 	/* Mark queue as having work */
    475   1.3   msaitoh 	if (txr->busy == 0)
    476   1.3   msaitoh 		txr->busy = 1;
    477   1.1   msaitoh 
    478  1.28   msaitoh 	return (0);
    479  1.28   msaitoh } /* ixgbe_xmit */
    480   1.1   msaitoh 
    481  1.16   msaitoh 
    482  1.28   msaitoh /************************************************************************
    483  1.28   msaitoh  * ixgbe_allocate_transmit_buffers
    484   1.1   msaitoh  *
    485  1.28   msaitoh  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
    486  1.28   msaitoh  *   the information needed to transmit a packet on the wire. This is
    487  1.28   msaitoh  *   called only once at attach, setup is done every reset.
    488  1.28   msaitoh  ************************************************************************/
    489  1.28   msaitoh static int
    490   1.1   msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    491   1.1   msaitoh {
    492  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    493  1.28   msaitoh 	device_t            dev = adapter->dev;
    494   1.1   msaitoh 	struct ixgbe_tx_buf *txbuf;
    495  1.28   msaitoh 	int                 error, i;
    496   1.1   msaitoh 
    497   1.1   msaitoh 	/*
    498   1.1   msaitoh 	 * Setup DMA descriptor areas.
    499   1.1   msaitoh 	 */
    500  1.28   msaitoh 	error = ixgbe_dma_tag_create(
    501  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
    502  1.28   msaitoh 	         /*   alignment */ 1,
    503  1.28   msaitoh 	         /*      bounds */ 0,
    504  1.28   msaitoh 	         /*     maxsize */ IXGBE_TSO_SIZE,
    505  1.28   msaitoh 	         /*   nsegments */ adapter->num_segs,
    506  1.28   msaitoh 	         /*  maxsegsize */ PAGE_SIZE,
    507  1.28   msaitoh 	         /*       flags */ 0,
    508  1.28   msaitoh 	                           &txr->txtag);
    509  1.28   msaitoh 	if (error != 0) {
    510   1.1   msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    511   1.1   msaitoh 		goto fail;
    512   1.1   msaitoh 	}
    513   1.1   msaitoh 
    514  1.28   msaitoh 	txr->tx_buffers =
    515   1.1   msaitoh 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
    516  1.28   msaitoh 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
    517  1.28   msaitoh 	if (txr->tx_buffers == NULL) {
    518   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
    519   1.1   msaitoh 		error = ENOMEM;
    520   1.1   msaitoh 		goto fail;
    521   1.1   msaitoh 	}
    522   1.1   msaitoh 
    523  1.28   msaitoh 	/* Create the descriptor buffer dma maps */
    524   1.1   msaitoh 	txbuf = txr->tx_buffers;
    525   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
    526   1.1   msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    527   1.1   msaitoh 		if (error != 0) {
    528   1.1   msaitoh 			aprint_error_dev(dev,
    529   1.1   msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    530   1.1   msaitoh 			goto fail;
    531   1.1   msaitoh 		}
    532   1.1   msaitoh 	}
    533   1.1   msaitoh 
    534   1.1   msaitoh 	return 0;
    535   1.1   msaitoh fail:
    536   1.1   msaitoh 	/* We free all, it handles case where we are in the middle */
    537  1.15   msaitoh #if 0 /* XXX was FreeBSD */
    538   1.1   msaitoh 	ixgbe_free_transmit_structures(adapter);
    539  1.15   msaitoh #else
    540  1.15   msaitoh 	ixgbe_free_transmit_buffers(txr);
    541  1.15   msaitoh #endif
    542   1.1   msaitoh 	return (error);
    543  1.28   msaitoh } /* ixgbe_allocate_transmit_buffers */
    544   1.1   msaitoh 
    545  1.28   msaitoh /************************************************************************
    546  1.28   msaitoh  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
    547  1.28   msaitoh  ************************************************************************/
    548   1.1   msaitoh static void
    549   1.1   msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    550   1.1   msaitoh {
    551  1.28   msaitoh 	struct adapter        *adapter = txr->adapter;
    552  1.28   msaitoh 	struct ixgbe_tx_buf   *txbuf;
    553   1.1   msaitoh #ifdef DEV_NETMAP
    554   1.1   msaitoh 	struct netmap_adapter *na = NA(adapter->ifp);
    555  1.28   msaitoh 	struct netmap_slot    *slot;
    556   1.1   msaitoh #endif /* DEV_NETMAP */
    557   1.1   msaitoh 
    558   1.1   msaitoh 	/* Clear the old ring contents */
    559   1.1   msaitoh 	IXGBE_TX_LOCK(txr);
    560  1.28   msaitoh 
    561   1.1   msaitoh #ifdef DEV_NETMAP
    562  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
    563  1.28   msaitoh 		/*
    564  1.28   msaitoh 		 * (under lock): if in netmap mode, do some consistency
    565  1.28   msaitoh 		 * checks and set slot to entry 0 of the netmap ring.
    566  1.28   msaitoh 		 */
    567  1.28   msaitoh 		slot = netmap_reset(na, NR_TX, txr->me, 0);
    568  1.28   msaitoh 	}
    569   1.1   msaitoh #endif /* DEV_NETMAP */
    570  1.28   msaitoh 
    571   1.1   msaitoh 	bzero((void *)txr->tx_base,
    572  1.28   msaitoh 	    (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
    573   1.1   msaitoh 	/* Reset indices */
    574   1.1   msaitoh 	txr->next_avail_desc = 0;
    575   1.1   msaitoh 	txr->next_to_clean = 0;
    576   1.1   msaitoh 
    577   1.1   msaitoh 	/* Free any existing tx buffers. */
    578  1.28   msaitoh 	txbuf = txr->tx_buffers;
    579   1.5   msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    580   1.1   msaitoh 		if (txbuf->m_head != NULL) {
    581   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    582   1.1   msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    583   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    584   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    585   1.1   msaitoh 			m_freem(txbuf->m_head);
    586   1.1   msaitoh 			txbuf->m_head = NULL;
    587   1.1   msaitoh 		}
    588  1.28   msaitoh 
    589   1.1   msaitoh #ifdef DEV_NETMAP
    590   1.1   msaitoh 		/*
    591   1.1   msaitoh 		 * In netmap mode, set the map for the packet buffer.
    592   1.1   msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    593   1.1   msaitoh 		 * the physical buffer address in the NIC ring.
    594   1.1   msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    595   1.1   msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    596   1.1   msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    597   1.1   msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    598   1.1   msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    599   1.1   msaitoh 		 */
    600  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
    601   1.1   msaitoh 			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
    602   1.5   msaitoh 			netmap_load_map(na, txr->txtag,
    603   1.5   msaitoh 			    txbuf->map, NMB(na, slot + si));
    604   1.1   msaitoh 		}
    605   1.1   msaitoh #endif /* DEV_NETMAP */
    606  1.28   msaitoh 
    607   1.1   msaitoh 		/* Clear the EOP descriptor pointer */
    608   1.1   msaitoh 		txbuf->eop = NULL;
    609  1.28   msaitoh 	}
    610   1.1   msaitoh 
    611   1.1   msaitoh 	/* Set the rate at which we sample packets */
    612  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
    613   1.1   msaitoh 		txr->atr_sample = atr_sample_rate;
    614   1.1   msaitoh 
    615   1.1   msaitoh 	/* Set number of descriptors available */
    616   1.1   msaitoh 	txr->tx_avail = adapter->num_tx_desc;
    617   1.1   msaitoh 
    618   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    619   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    620   1.1   msaitoh 	IXGBE_TX_UNLOCK(txr);
    621  1.28   msaitoh } /* ixgbe_setup_transmit_ring */
    622   1.1   msaitoh 
    623  1.28   msaitoh /************************************************************************
    624  1.28   msaitoh  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
    625  1.28   msaitoh  ************************************************************************/
    626   1.1   msaitoh int
    627   1.1   msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
    628   1.1   msaitoh {
    629   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    630   1.1   msaitoh 
    631   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++)
    632   1.1   msaitoh 		ixgbe_setup_transmit_ring(txr);
    633   1.1   msaitoh 
    634   1.1   msaitoh 	return (0);
    635  1.28   msaitoh } /* ixgbe_setup_transmit_structures */
    636   1.1   msaitoh 
    637  1.28   msaitoh /************************************************************************
    638  1.28   msaitoh  * ixgbe_free_transmit_structures - Free all transmit rings.
    639  1.28   msaitoh  ************************************************************************/
    640   1.1   msaitoh void
    641   1.1   msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
    642   1.1   msaitoh {
    643   1.1   msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    644   1.1   msaitoh 
    645   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    646   1.1   msaitoh 		ixgbe_free_transmit_buffers(txr);
    647   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
    648   1.1   msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    649   1.1   msaitoh 	}
    650   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
    651  1.28   msaitoh } /* ixgbe_free_transmit_structures */
    652   1.1   msaitoh 
    653  1.28   msaitoh /************************************************************************
    654  1.28   msaitoh  * ixgbe_free_transmit_buffers
    655   1.1   msaitoh  *
    656  1.28   msaitoh  *   Free transmit ring related data structures.
    657  1.28   msaitoh  ************************************************************************/
    658   1.1   msaitoh static void
    659   1.1   msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    660   1.1   msaitoh {
    661  1.28   msaitoh 	struct adapter      *adapter = txr->adapter;
    662   1.1   msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    663  1.28   msaitoh 	int                 i;
    664   1.1   msaitoh 
    665  1.14   msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
    666   1.1   msaitoh 
    667   1.1   msaitoh 	if (txr->tx_buffers == NULL)
    668   1.1   msaitoh 		return;
    669   1.1   msaitoh 
    670   1.1   msaitoh 	tx_buffer = txr->tx_buffers;
    671   1.1   msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
    672   1.1   msaitoh 		if (tx_buffer->m_head != NULL) {
    673   1.1   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    674   1.1   msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    675   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
    676   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    677   1.1   msaitoh 			m_freem(tx_buffer->m_head);
    678   1.1   msaitoh 			tx_buffer->m_head = NULL;
    679   1.1   msaitoh 			if (tx_buffer->map != NULL) {
    680   1.1   msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    681   1.1   msaitoh 				    tx_buffer->map);
    682   1.1   msaitoh 				tx_buffer->map = NULL;
    683   1.1   msaitoh 			}
    684   1.1   msaitoh 		} else if (tx_buffer->map != NULL) {
    685   1.1   msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    686   1.1   msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    687   1.1   msaitoh 			tx_buffer->map = NULL;
    688   1.1   msaitoh 		}
    689   1.1   msaitoh 	}
    690  1.18   msaitoh 	if (txr->txr_interq != NULL) {
    691  1.18   msaitoh 		struct mbuf *m;
    692  1.18   msaitoh 
    693  1.18   msaitoh 		while ((m = pcq_get(txr->txr_interq)) != NULL)
    694  1.18   msaitoh 			m_freem(m);
    695  1.18   msaitoh 		pcq_destroy(txr->txr_interq);
    696  1.18   msaitoh 	}
    697   1.1   msaitoh 	if (txr->tx_buffers != NULL) {
    698   1.1   msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    699   1.1   msaitoh 		txr->tx_buffers = NULL;
    700   1.1   msaitoh 	}
    701   1.1   msaitoh 	if (txr->txtag != NULL) {
    702   1.1   msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    703   1.1   msaitoh 		txr->txtag = NULL;
    704   1.1   msaitoh 	}
    705  1.28   msaitoh } /* ixgbe_free_transmit_buffers */
    706   1.1   msaitoh 
    707  1.28   msaitoh /************************************************************************
    708  1.28   msaitoh  * ixgbe_tx_ctx_setup
    709   1.1   msaitoh  *
    710  1.28   msaitoh  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
    711  1.28   msaitoh  ************************************************************************/
    712   1.1   msaitoh static int
    713   1.1   msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    714   1.1   msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    715   1.1   msaitoh {
    716  1.28   msaitoh 	struct adapter                   *adapter = txr->adapter;
    717   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    718  1.28   msaitoh 	struct ether_vlan_header         *eh;
    719   1.8   msaitoh #ifdef INET
    720  1.28   msaitoh 	struct ip                        *ip;
    721   1.8   msaitoh #endif
    722   1.8   msaitoh #ifdef INET6
    723  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    724   1.8   msaitoh #endif
    725  1.28   msaitoh 	int                              ehdrlen, ip_hlen = 0;
    726  1.28   msaitoh 	int                              offload = TRUE;
    727  1.28   msaitoh 	int                              ctxd = txr->next_avail_desc;
    728  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    729  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    730  1.28   msaitoh 	u16                              vtag = 0;
    731  1.28   msaitoh 	u16                              etype;
    732  1.28   msaitoh 	u8                               ipproto = 0;
    733  1.28   msaitoh 	char                             *l3d;
    734   1.8   msaitoh 
    735   1.1   msaitoh 
    736   1.1   msaitoh 	/* First check if TSO is to be used */
    737  1.28   msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
    738  1.17   msaitoh 		int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
    739  1.17   msaitoh 
    740  1.21   msaitoh 		if (rv != 0)
    741  1.17   msaitoh 			++adapter->tso_err.ev_count;
    742  1.21   msaitoh 		return rv;
    743  1.17   msaitoh 	}
    744   1.1   msaitoh 
    745   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    746   1.1   msaitoh 		offload = FALSE;
    747   1.1   msaitoh 
    748   1.1   msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    749  1.28   msaitoh 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    750   1.1   msaitoh 
    751   1.1   msaitoh 	/* Now ready a context descriptor */
    752  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    753   1.1   msaitoh 
    754   1.1   msaitoh 	/*
    755  1.28   msaitoh 	 * In advanced descriptors the vlan tag must
    756  1.28   msaitoh 	 * be placed into the context descriptor. Hence
    757  1.28   msaitoh 	 * we need to make one even if not doing offloads.
    758  1.28   msaitoh 	 */
    759  1.29  knakahar 	if (vlan_has_tag(mp)) {
    760  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    761   1.1   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    762  1.28   msaitoh 	} else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
    763  1.28   msaitoh 	           (offload == FALSE))
    764   1.4   msaitoh 		return (0);
    765   1.1   msaitoh 
    766   1.1   msaitoh 	/*
    767   1.1   msaitoh 	 * Determine where frame payload starts.
    768   1.1   msaitoh 	 * Jump over vlan headers if already present,
    769   1.1   msaitoh 	 * helpful for QinQ too.
    770   1.1   msaitoh 	 */
    771   1.1   msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    772   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    773   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    774   1.1   msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    775   1.1   msaitoh 		etype = ntohs(eh->evl_proto);
    776   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    777   1.1   msaitoh 	} else {
    778   1.1   msaitoh 		etype = ntohs(eh->evl_encap_proto);
    779   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    780   1.1   msaitoh 	}
    781   1.1   msaitoh 
    782   1.1   msaitoh 	/* Set the ether header length */
    783   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    784   1.1   msaitoh 
    785   1.3   msaitoh 	if (offload == FALSE)
    786   1.3   msaitoh 		goto no_offloads;
    787   1.3   msaitoh 
    788   1.8   msaitoh 	/*
    789  1.28   msaitoh 	 * If the first mbuf only includes the ethernet header,
    790  1.28   msaitoh 	 * jump to the next one
    791  1.28   msaitoh 	 * XXX: This assumes the stack splits mbufs containing headers
    792  1.28   msaitoh 	 *      on header boundaries
    793   1.8   msaitoh 	 * XXX: And assumes the entire IP header is contained in one mbuf
    794   1.8   msaitoh 	 */
    795   1.8   msaitoh 	if (mp->m_len == ehdrlen && mp->m_next)
    796   1.8   msaitoh 		l3d = mtod(mp->m_next, char *);
    797   1.8   msaitoh 	else
    798   1.8   msaitoh 		l3d = mtod(mp, char *) + ehdrlen;
    799   1.8   msaitoh 
    800   1.1   msaitoh 	switch (etype) {
    801   1.9   msaitoh #ifdef INET
    802   1.1   msaitoh 	case ETHERTYPE_IP:
    803   1.8   msaitoh 		ip = (struct ip *)(l3d);
    804   1.8   msaitoh 		ip_hlen = ip->ip_hl << 2;
    805   1.8   msaitoh 		ipproto = ip->ip_p;
    806   1.8   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    807   1.1   msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    808   1.8   msaitoh 		    ip->ip_sum == 0);
    809   1.1   msaitoh 		break;
    810   1.9   msaitoh #endif
    811   1.9   msaitoh #ifdef INET6
    812   1.1   msaitoh 	case ETHERTYPE_IPV6:
    813   1.8   msaitoh 		ip6 = (struct ip6_hdr *)(l3d);
    814   1.8   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    815   1.8   msaitoh 		ipproto = ip6->ip6_nxt;
    816   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    817   1.1   msaitoh 		break;
    818   1.9   msaitoh #endif
    819   1.1   msaitoh 	default:
    820  1.11   msaitoh 		offload = false;
    821   1.1   msaitoh 		break;
    822   1.1   msaitoh 	}
    823   1.1   msaitoh 
    824   1.1   msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    825   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    826   1.1   msaitoh 
    827   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    828   1.1   msaitoh 
    829   1.8   msaitoh 	/* No support for offloads for non-L4 next headers */
    830   1.8   msaitoh  	switch (ipproto) {
    831   1.8   msaitoh  		case IPPROTO_TCP:
    832  1.28   msaitoh 			if (mp->m_pkthdr.csum_flags &
    833  1.28   msaitoh 			    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
    834   1.8   msaitoh 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    835   1.8   msaitoh 			else
    836   1.8   msaitoh 				offload = false;
    837   1.8   msaitoh 			break;
    838   1.8   msaitoh 		case IPPROTO_UDP:
    839  1.28   msaitoh 			if (mp->m_pkthdr.csum_flags &
    840  1.28   msaitoh 			    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
    841   1.8   msaitoh 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    842   1.8   msaitoh 			else
    843   1.8   msaitoh 				offload = false;
    844   1.8   msaitoh 			break;
    845  1.11   msaitoh 		default:
    846  1.11   msaitoh 			offload = false;
    847  1.11   msaitoh 			break;
    848   1.8   msaitoh 	}
    849   1.8   msaitoh 
    850   1.8   msaitoh 	if (offload) /* Insert L4 checksum into data descriptors */
    851   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    852   1.1   msaitoh 
    853   1.3   msaitoh no_offloads:
    854   1.3   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    855   1.3   msaitoh 
    856   1.1   msaitoh 	/* Now copy bits into descriptor */
    857   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    858   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    859   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    860   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(0);
    861   1.1   msaitoh 
    862   1.1   msaitoh 	/* We've consumed the first desc, adjust counters */
    863   1.1   msaitoh 	if (++ctxd == txr->num_desc)
    864   1.1   msaitoh 		ctxd = 0;
    865   1.1   msaitoh 	txr->next_avail_desc = ctxd;
    866   1.1   msaitoh 	--txr->tx_avail;
    867   1.1   msaitoh 
    868  1.28   msaitoh 	return (0);
    869  1.28   msaitoh } /* ixgbe_tx_ctx_setup */
    870   1.1   msaitoh 
    871  1.28   msaitoh /************************************************************************
    872  1.28   msaitoh  * ixgbe_tso_setup
    873   1.1   msaitoh  *
    874  1.28   msaitoh  *   Setup work for hardware segmentation offload (TSO) on
    875  1.28   msaitoh  *   adapters using advanced tx descriptors
    876  1.28   msaitoh  ************************************************************************/
    877   1.1   msaitoh static int
    878  1.28   msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
    879  1.28   msaitoh     u32 *olinfo_status)
    880   1.1   msaitoh {
    881   1.1   msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    882  1.28   msaitoh 	struct ether_vlan_header         *eh;
    883   1.1   msaitoh #ifdef INET6
    884  1.28   msaitoh 	struct ip6_hdr                   *ip6;
    885   1.1   msaitoh #endif
    886   1.1   msaitoh #ifdef INET
    887  1.28   msaitoh 	struct ip                        *ip;
    888   1.1   msaitoh #endif
    889  1.28   msaitoh 	struct tcphdr                    *th;
    890  1.28   msaitoh 	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
    891  1.28   msaitoh 	u32                              vlan_macip_lens = 0;
    892  1.28   msaitoh 	u32                              type_tucmd_mlhl = 0;
    893  1.28   msaitoh 	u32                              mss_l4len_idx = 0, paylen;
    894  1.28   msaitoh 	u16                              vtag = 0, eh_type;
    895   1.1   msaitoh 
    896   1.1   msaitoh 	/*
    897   1.1   msaitoh 	 * Determine where frame payload starts.
    898   1.1   msaitoh 	 * Jump over vlan headers if already present
    899   1.1   msaitoh 	 */
    900   1.1   msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    901   1.1   msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    902   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    903   1.1   msaitoh 		eh_type = eh->evl_proto;
    904   1.1   msaitoh 	} else {
    905   1.1   msaitoh 		ehdrlen = ETHER_HDR_LEN;
    906   1.1   msaitoh 		eh_type = eh->evl_encap_proto;
    907   1.1   msaitoh 	}
    908   1.1   msaitoh 
    909   1.1   msaitoh 	switch (ntohs(eh_type)) {
    910   1.1   msaitoh #ifdef INET
    911   1.1   msaitoh 	case ETHERTYPE_IP:
    912   1.1   msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
    913   1.1   msaitoh 		if (ip->ip_p != IPPROTO_TCP)
    914   1.1   msaitoh 			return (ENXIO);
    915   1.1   msaitoh 		ip->ip_sum = 0;
    916   1.1   msaitoh 		ip_hlen = ip->ip_hl << 2;
    917   1.1   msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
    918   1.1   msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
    919   1.1   msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
    920   1.1   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    921   1.1   msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
    922   1.1   msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    923   1.1   msaitoh 		break;
    924   1.1   msaitoh #endif
    925  1.28   msaitoh #ifdef INET6
    926  1.28   msaitoh 	case ETHERTYPE_IPV6:
    927  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    928  1.28   msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
    929  1.28   msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
    930  1.28   msaitoh 			return (ENXIO);
    931  1.28   msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    932  1.28   msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    933  1.28   msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
    934  1.28   msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
    935  1.28   msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
    936  1.28   msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    937  1.28   msaitoh 		break;
    938  1.28   msaitoh #endif
    939   1.1   msaitoh 	default:
    940   1.1   msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
    941   1.1   msaitoh 		    __func__, ntohs(eh_type));
    942   1.1   msaitoh 		break;
    943   1.1   msaitoh 	}
    944   1.1   msaitoh 
    945   1.1   msaitoh 	ctxd = txr->next_avail_desc;
    946  1.28   msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
    947   1.1   msaitoh 
    948   1.1   msaitoh 	tcp_hlen = th->th_off << 2;
    949   1.1   msaitoh 
    950   1.1   msaitoh 	/* This is used in the transmit desc in encap */
    951   1.1   msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
    952   1.1   msaitoh 
    953   1.1   msaitoh 	/* VLAN MACLEN IPLEN */
    954  1.29  knakahar 	if (vlan_has_tag(mp)) {
    955  1.29  knakahar 		vtag = htole16(vlan_get_tag(mp));
    956  1.28   msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    957   1.1   msaitoh 	}
    958   1.1   msaitoh 
    959   1.1   msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    960   1.1   msaitoh 	vlan_macip_lens |= ip_hlen;
    961   1.1   msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    962   1.1   msaitoh 
    963   1.1   msaitoh 	/* ADV DTYPE TUCMD */
    964   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    965   1.1   msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    966   1.1   msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    967   1.1   msaitoh 
    968   1.1   msaitoh 	/* MSS L4LEN IDX */
    969   1.1   msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
    970   1.1   msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
    971   1.1   msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
    972   1.1   msaitoh 
    973   1.1   msaitoh 	TXD->seqnum_seed = htole32(0);
    974   1.1   msaitoh 
    975   1.1   msaitoh 	if (++ctxd == txr->num_desc)
    976   1.1   msaitoh 		ctxd = 0;
    977   1.1   msaitoh 
    978   1.1   msaitoh 	txr->tx_avail--;
    979   1.1   msaitoh 	txr->next_avail_desc = ctxd;
    980   1.1   msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
    981   1.1   msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    982   1.1   msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
    983   1.1   msaitoh 	++txr->tso_tx.ev_count;
    984  1.28   msaitoh 
    985   1.1   msaitoh 	return (0);
    986  1.28   msaitoh } /* ixgbe_tso_setup */
    987   1.1   msaitoh 
    988   1.3   msaitoh 
    989  1.28   msaitoh /************************************************************************
    990  1.28   msaitoh  * ixgbe_txeof
    991   1.1   msaitoh  *
    992  1.28   msaitoh  *   Examine each tx_buffer in the used queue. If the hardware is done
    993  1.28   msaitoh  *   processing the packet then free associated resources. The
    994  1.28   msaitoh  *   tx_buffer is put back on the free queue.
    995  1.28   msaitoh  ************************************************************************/
    996   1.1   msaitoh void
    997   1.1   msaitoh ixgbe_txeof(struct tx_ring *txr)
    998   1.1   msaitoh {
    999   1.1   msaitoh 	struct adapter		*adapter = txr->adapter;
   1000   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1001  1.28   msaitoh 	struct ixgbe_tx_buf	*buf;
   1002  1.28   msaitoh 	union ixgbe_adv_tx_desc *txd;
   1003   1.1   msaitoh 	u32			work, processed = 0;
   1004   1.7   msaitoh 	u32			limit = adapter->tx_process_limit;
   1005   1.1   msaitoh 
   1006   1.1   msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1007   1.1   msaitoh 
   1008   1.1   msaitoh #ifdef DEV_NETMAP
   1009  1.28   msaitoh 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1010  1.28   msaitoh 	    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
   1011  1.28   msaitoh 		struct netmap_adapter *na = NA(adapter->ifp);
   1012   1.1   msaitoh 		struct netmap_kring *kring = &na->tx_rings[txr->me];
   1013   1.1   msaitoh 		txd = txr->tx_base;
   1014   1.1   msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1015   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD);
   1016   1.1   msaitoh 		/*
   1017   1.1   msaitoh 		 * In netmap mode, all the work is done in the context
   1018   1.1   msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1019   1.1   msaitoh 		 * clients, which may be sleeping on individual rings
   1020   1.1   msaitoh 		 * or on a global resource for all rings.
   1021   1.1   msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1022   1.1   msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1023   1.1   msaitoh 		 * more frequently. This is implemented as follows:
   1024   1.1   msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1025   1.1   msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1026   1.1   msaitoh 		 *   means the user thread should not be woken up);
   1027   1.1   msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1028   1.1   msaitoh 		 *   or the slot has the DD bit set.
   1029   1.1   msaitoh 		 */
   1030   1.1   msaitoh 		if (!netmap_mitigate ||
   1031   1.1   msaitoh 		    (kring->nr_kflags < kring->nkr_num_slots &&
   1032  1.28   msaitoh 		     txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
   1033   1.1   msaitoh 			netmap_tx_irq(ifp, txr->me);
   1034   1.1   msaitoh 		}
   1035   1.1   msaitoh 		return;
   1036   1.1   msaitoh 	}
   1037   1.1   msaitoh #endif /* DEV_NETMAP */
   1038   1.1   msaitoh 
   1039   1.1   msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1040   1.3   msaitoh 		txr->busy = 0;
   1041   1.1   msaitoh 		return;
   1042   1.1   msaitoh 	}
   1043   1.1   msaitoh 
   1044   1.1   msaitoh 	/* Get work starting point */
   1045   1.1   msaitoh 	work = txr->next_to_clean;
   1046   1.1   msaitoh 	buf = &txr->tx_buffers[work];
   1047   1.1   msaitoh 	txd = &txr->tx_base[work];
   1048   1.1   msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1049  1.28   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1050   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD);
   1051   1.8   msaitoh 
   1052   1.1   msaitoh 	do {
   1053   1.8   msaitoh 		union ixgbe_adv_tx_desc *eop = buf->eop;
   1054   1.1   msaitoh 		if (eop == NULL) /* No work */
   1055   1.1   msaitoh 			break;
   1056   1.1   msaitoh 
   1057   1.1   msaitoh 		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
   1058   1.1   msaitoh 			break;	/* I/O not complete */
   1059   1.1   msaitoh 
   1060   1.1   msaitoh 		if (buf->m_head) {
   1061  1.28   msaitoh 			txr->bytes += buf->m_head->m_pkthdr.len;
   1062  1.28   msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
   1063   1.1   msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1064   1.1   msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1065  1.28   msaitoh 			ixgbe_dmamap_unload(txr->txtag, buf->map);
   1066   1.1   msaitoh 			m_freem(buf->m_head);
   1067   1.1   msaitoh 			buf->m_head = NULL;
   1068   1.1   msaitoh 		}
   1069   1.1   msaitoh 		buf->eop = NULL;
   1070   1.1   msaitoh 		++txr->tx_avail;
   1071   1.1   msaitoh 
   1072   1.1   msaitoh 		/* We clean the range if multi segment */
   1073   1.1   msaitoh 		while (txd != eop) {
   1074   1.1   msaitoh 			++txd;
   1075   1.1   msaitoh 			++buf;
   1076   1.1   msaitoh 			++work;
   1077   1.1   msaitoh 			/* wrap the ring? */
   1078   1.1   msaitoh 			if (__predict_false(!work)) {
   1079   1.1   msaitoh 				work -= txr->num_desc;
   1080   1.1   msaitoh 				buf = txr->tx_buffers;
   1081   1.1   msaitoh 				txd = txr->tx_base;
   1082   1.1   msaitoh 			}
   1083   1.1   msaitoh 			if (buf->m_head) {
   1084   1.1   msaitoh 				txr->bytes +=
   1085   1.1   msaitoh 				    buf->m_head->m_pkthdr.len;
   1086   1.1   msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1087   1.1   msaitoh 				    buf->map,
   1088   1.1   msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1089   1.1   msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1090   1.1   msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1091   1.1   msaitoh 				    buf->map);
   1092   1.1   msaitoh 				m_freem(buf->m_head);
   1093   1.1   msaitoh 				buf->m_head = NULL;
   1094   1.1   msaitoh 			}
   1095   1.1   msaitoh 			++txr->tx_avail;
   1096   1.1   msaitoh 			buf->eop = NULL;
   1097   1.1   msaitoh 
   1098   1.1   msaitoh 		}
   1099   1.1   msaitoh 		++txr->packets;
   1100   1.1   msaitoh 		++processed;
   1101   1.1   msaitoh 		++ifp->if_opackets;
   1102   1.1   msaitoh 
   1103   1.1   msaitoh 		/* Try the next packet */
   1104   1.1   msaitoh 		++txd;
   1105   1.1   msaitoh 		++buf;
   1106   1.1   msaitoh 		++work;
   1107   1.1   msaitoh 		/* reset with a wrap */
   1108   1.1   msaitoh 		if (__predict_false(!work)) {
   1109   1.1   msaitoh 			work -= txr->num_desc;
   1110   1.1   msaitoh 			buf = txr->tx_buffers;
   1111   1.1   msaitoh 			txd = txr->tx_base;
   1112   1.1   msaitoh 		}
   1113   1.1   msaitoh 		prefetch(txd);
   1114   1.1   msaitoh 	} while (__predict_true(--limit));
   1115   1.1   msaitoh 
   1116   1.1   msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1117   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1118   1.1   msaitoh 
   1119   1.1   msaitoh 	work += txr->num_desc;
   1120   1.1   msaitoh 	txr->next_to_clean = work;
   1121   1.1   msaitoh 
   1122   1.1   msaitoh 	/*
   1123  1.28   msaitoh 	 * Queue Hang detection, we know there's
   1124  1.28   msaitoh 	 * work outstanding or the first return
   1125  1.28   msaitoh 	 * would have been taken, so increment busy
   1126  1.28   msaitoh 	 * if nothing managed to get cleaned, then
   1127  1.28   msaitoh 	 * in local_timer it will be checked and
   1128  1.28   msaitoh 	 * marked as HUNG if it exceeds a MAX attempt.
   1129  1.28   msaitoh 	 */
   1130   1.3   msaitoh 	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
   1131   1.3   msaitoh 		++txr->busy;
   1132   1.3   msaitoh 	/*
   1133  1.28   msaitoh 	 * If anything gets cleaned we reset state to 1,
   1134  1.28   msaitoh 	 * note this will turn off HUNG if its set.
   1135  1.28   msaitoh 	 */
   1136   1.3   msaitoh 	if (processed)
   1137   1.3   msaitoh 		txr->busy = 1;
   1138   1.1   msaitoh 
   1139   1.1   msaitoh 	if (txr->tx_avail == txr->num_desc)
   1140   1.3   msaitoh 		txr->busy = 0;
   1141   1.1   msaitoh 
   1142   1.1   msaitoh 	return;
   1143  1.28   msaitoh } /* ixgbe_txeof */
   1144   1.1   msaitoh 
   1145  1.28   msaitoh /************************************************************************
   1146  1.28   msaitoh  * ixgbe_rsc_count
   1147  1.28   msaitoh  *
   1148  1.28   msaitoh  *   Used to detect a descriptor that has been merged by Hardware RSC.
   1149  1.28   msaitoh  ************************************************************************/
   1150   1.1   msaitoh static inline u32
   1151   1.1   msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1152   1.1   msaitoh {
   1153   1.1   msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1154   1.1   msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1155  1.28   msaitoh } /* ixgbe_rsc_count */
   1156   1.1   msaitoh 
   1157  1.28   msaitoh /************************************************************************
   1158  1.28   msaitoh  * ixgbe_setup_hw_rsc
   1159   1.1   msaitoh  *
   1160  1.28   msaitoh  *   Initialize Hardware RSC (LRO) feature on 82599
   1161  1.28   msaitoh  *   for an RX ring, this is toggled by the LRO capability
   1162  1.28   msaitoh  *   even though it is transparent to the stack.
   1163  1.28   msaitoh  *
   1164  1.28   msaitoh  *   NOTE: Since this HW feature only works with IPv4 and
   1165  1.28   msaitoh  *         testing has shown soft LRO to be as effective,
   1166  1.28   msaitoh  *         this feature will be disabled by default.
   1167  1.28   msaitoh  ************************************************************************/
   1168   1.1   msaitoh static void
   1169   1.1   msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1170   1.1   msaitoh {
   1171  1.28   msaitoh 	struct	adapter  *adapter = rxr->adapter;
   1172  1.28   msaitoh 	struct	ixgbe_hw *hw = &adapter->hw;
   1173  1.28   msaitoh 	u32              rscctrl, rdrxctl;
   1174   1.1   msaitoh 
   1175   1.1   msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1176   1.1   msaitoh 	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
   1177   1.1   msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1178   1.1   msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1179   1.1   msaitoh 		return;
   1180   1.1   msaitoh 	}
   1181   1.1   msaitoh 
   1182   1.1   msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1183   1.1   msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1184  1.28   msaitoh #ifdef DEV_NETMAP
   1185  1.28   msaitoh 	/* Always strip CRC unless Netmap disabled it */
   1186  1.28   msaitoh 	if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
   1187  1.28   msaitoh 	    !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
   1188  1.28   msaitoh 	    ix_crcstrip)
   1189   1.1   msaitoh #endif /* DEV_NETMAP */
   1190  1.28   msaitoh 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1191   1.1   msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1192   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1193   1.1   msaitoh 
   1194   1.1   msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1195   1.1   msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1196   1.1   msaitoh 	/*
   1197  1.28   msaitoh 	 * Limit the total number of descriptors that
   1198  1.28   msaitoh 	 * can be combined, so it does not exceed 64K
   1199  1.28   msaitoh 	 */
   1200   1.1   msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1201   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1202   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1203   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1204   1.1   msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1205   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1206   1.1   msaitoh 	else  /* Using 16K cluster */
   1207   1.1   msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1208   1.1   msaitoh 
   1209   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1210   1.1   msaitoh 
   1211   1.1   msaitoh 	/* Enable TCP header recognition */
   1212   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1213  1.28   msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
   1214   1.1   msaitoh 
   1215   1.1   msaitoh 	/* Disable RSC for ACK packets */
   1216   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1217   1.1   msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1218   1.1   msaitoh 
   1219   1.1   msaitoh 	rxr->hw_rsc = TRUE;
   1220  1.28   msaitoh } /* ixgbe_setup_hw_rsc */
   1221   1.8   msaitoh 
   1222  1.28   msaitoh /************************************************************************
   1223  1.28   msaitoh  * ixgbe_refresh_mbufs
   1224   1.1   msaitoh  *
   1225  1.28   msaitoh  *   Refresh mbuf buffers for RX descriptor rings
   1226  1.28   msaitoh  *    - now keeps its own state so discards due to resource
   1227  1.28   msaitoh  *      exhaustion are unnecessary, if an mbuf cannot be obtained
   1228  1.28   msaitoh  *      it just returns, keeping its placeholder, thus it can simply
   1229  1.28   msaitoh  *      be recalled to try again.
   1230  1.28   msaitoh  ************************************************************************/
   1231   1.1   msaitoh static void
   1232   1.1   msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1233   1.1   msaitoh {
   1234  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1235  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1236  1.28   msaitoh 	struct mbuf         *mp;
   1237  1.28   msaitoh 	int                 i, j, error;
   1238  1.28   msaitoh 	bool                refreshed = false;
   1239   1.1   msaitoh 
   1240   1.1   msaitoh 	i = j = rxr->next_to_refresh;
   1241   1.1   msaitoh 	/* Control the loop with one beyond */
   1242   1.1   msaitoh 	if (++j == rxr->num_desc)
   1243   1.1   msaitoh 		j = 0;
   1244   1.1   msaitoh 
   1245   1.1   msaitoh 	while (j != limit) {
   1246   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1247   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1248   1.1   msaitoh 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1249   1.1   msaitoh 			    MT_DATA, M_PKTHDR, rxr->mbuf_sz);
   1250   1.1   msaitoh 			if (mp == NULL) {
   1251   1.1   msaitoh 				rxr->no_jmbuf.ev_count++;
   1252   1.1   msaitoh 				goto update;
   1253   1.1   msaitoh 			}
   1254   1.1   msaitoh 			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
   1255   1.1   msaitoh 				m_adj(mp, ETHER_ALIGN);
   1256   1.1   msaitoh 		} else
   1257   1.1   msaitoh 			mp = rxbuf->buf;
   1258   1.1   msaitoh 
   1259   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1260   1.1   msaitoh 
   1261   1.1   msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1262   1.1   msaitoh 		 * than replaced, there's no need to go through busdma.
   1263   1.1   msaitoh 		 */
   1264   1.1   msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1265   1.1   msaitoh 			/* Get the memory mapping */
   1266   1.4   msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1267   1.1   msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1268   1.1   msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1269   1.1   msaitoh 			if (error != 0) {
   1270  1.28   msaitoh 				printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
   1271   1.1   msaitoh 				m_free(mp);
   1272   1.1   msaitoh 				rxbuf->buf = NULL;
   1273   1.1   msaitoh 				goto update;
   1274   1.1   msaitoh 			}
   1275   1.1   msaitoh 			rxbuf->buf = mp;
   1276   1.1   msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1277   1.1   msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1278   1.1   msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1279   1.1   msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1280   1.1   msaitoh 		} else {
   1281   1.1   msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1282   1.1   msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1283   1.1   msaitoh 		}
   1284   1.1   msaitoh 
   1285   1.1   msaitoh 		refreshed = true;
   1286   1.1   msaitoh 		/* Next is precalculated */
   1287   1.1   msaitoh 		i = j;
   1288   1.1   msaitoh 		rxr->next_to_refresh = i;
   1289   1.1   msaitoh 		if (++j == rxr->num_desc)
   1290   1.1   msaitoh 			j = 0;
   1291   1.1   msaitoh 	}
   1292  1.28   msaitoh 
   1293   1.1   msaitoh update:
   1294   1.1   msaitoh 	if (refreshed) /* Update hardware tail index */
   1295  1.28   msaitoh 		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
   1296  1.28   msaitoh 
   1297   1.1   msaitoh 	return;
   1298  1.28   msaitoh } /* ixgbe_refresh_mbufs */
   1299   1.1   msaitoh 
   1300  1.28   msaitoh /************************************************************************
   1301  1.28   msaitoh  * ixgbe_allocate_receive_buffers
   1302   1.1   msaitoh  *
   1303  1.28   msaitoh  *   Allocate memory for rx_buffer structures. Since we use one
   1304  1.28   msaitoh  *   rx_buffer per received packet, the maximum number of rx_buffer's
   1305  1.28   msaitoh  *   that we'll need is equal to the number of receive descriptors
   1306  1.28   msaitoh  *   that we've allocated.
   1307  1.28   msaitoh  ************************************************************************/
   1308  1.28   msaitoh static int
   1309   1.1   msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1310   1.1   msaitoh {
   1311  1.28   msaitoh 	struct	adapter     *adapter = rxr->adapter;
   1312  1.28   msaitoh 	device_t            dev = adapter->dev;
   1313  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1314  1.28   msaitoh 	int                 bsize, error;
   1315   1.1   msaitoh 
   1316   1.1   msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1317  1.28   msaitoh 	rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
   1318  1.28   msaitoh 	    M_NOWAIT | M_ZERO);
   1319  1.28   msaitoh 	if (rxr->rx_buffers == NULL) {
   1320   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   1321   1.1   msaitoh 		error = ENOMEM;
   1322   1.1   msaitoh 		goto fail;
   1323   1.1   msaitoh 	}
   1324   1.1   msaitoh 
   1325  1.28   msaitoh 	error = ixgbe_dma_tag_create(
   1326  1.28   msaitoh 	         /*      parent */ adapter->osdep.dmat,
   1327  1.28   msaitoh 	         /*   alignment */ 1,
   1328  1.28   msaitoh 	         /*      bounds */ 0,
   1329  1.28   msaitoh 	         /*     maxsize */ MJUM16BYTES,
   1330  1.28   msaitoh 	         /*   nsegments */ 1,
   1331  1.28   msaitoh 	         /*  maxsegsize */ MJUM16BYTES,
   1332  1.28   msaitoh 	         /*       flags */ 0,
   1333  1.28   msaitoh 	                           &rxr->ptag);
   1334  1.28   msaitoh 	if (error != 0) {
   1335   1.1   msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1336   1.1   msaitoh 		goto fail;
   1337   1.1   msaitoh 	}
   1338   1.1   msaitoh 
   1339   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1340   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1341   1.4   msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1342   1.1   msaitoh 		if (error) {
   1343   1.1   msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1344   1.1   msaitoh 			goto fail;
   1345   1.1   msaitoh 		}
   1346   1.1   msaitoh 	}
   1347   1.1   msaitoh 
   1348   1.1   msaitoh 	return (0);
   1349   1.1   msaitoh 
   1350   1.1   msaitoh fail:
   1351   1.1   msaitoh 	/* Frees all, but can handle partial completion */
   1352   1.1   msaitoh 	ixgbe_free_receive_structures(adapter);
   1353  1.28   msaitoh 
   1354   1.1   msaitoh 	return (error);
   1355  1.28   msaitoh } /* ixgbe_allocate_receive_buffers */
   1356   1.1   msaitoh 
   1357  1.28   msaitoh /************************************************************************
   1358  1.28   msaitoh  * ixgbe_setup_receive_ring
   1359  1.28   msaitoh  *
   1360  1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1361  1.28   msaitoh  ************************************************************************/
   1362  1.28   msaitoh static void
   1363   1.1   msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1364  1.27   msaitoh {
   1365   1.5   msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1366  1.27   msaitoh 		ixgbe_rx_discard(rxr, i);
   1367   1.1   msaitoh 	}
   1368  1.28   msaitoh } /* ixgbe_free_receive_ring */
   1369   1.1   msaitoh 
   1370  1.28   msaitoh /************************************************************************
   1371  1.28   msaitoh  * ixgbe_setup_receive_ring
   1372   1.1   msaitoh  *
   1373  1.28   msaitoh  *   Initialize a receive ring and its buffers.
   1374  1.28   msaitoh  ************************************************************************/
   1375   1.1   msaitoh static int
   1376   1.1   msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1377   1.1   msaitoh {
   1378  1.28   msaitoh 	struct adapter        *adapter;
   1379  1.28   msaitoh 	struct ixgbe_rx_buf   *rxbuf;
   1380   1.1   msaitoh #ifdef LRO
   1381  1.28   msaitoh 	struct ifnet          *ifp;
   1382  1.28   msaitoh 	struct lro_ctrl       *lro = &rxr->lro;
   1383   1.1   msaitoh #endif /* LRO */
   1384   1.1   msaitoh #ifdef DEV_NETMAP
   1385   1.1   msaitoh 	struct netmap_adapter *na = NA(rxr->adapter->ifp);
   1386  1.28   msaitoh 	struct netmap_slot    *slot;
   1387   1.1   msaitoh #endif /* DEV_NETMAP */
   1388  1.28   msaitoh 	int                   rsize, error = 0;
   1389   1.1   msaitoh 
   1390   1.1   msaitoh 	adapter = rxr->adapter;
   1391   1.1   msaitoh #ifdef LRO
   1392   1.1   msaitoh 	ifp = adapter->ifp;
   1393   1.1   msaitoh #endif /* LRO */
   1394   1.1   msaitoh 
   1395   1.1   msaitoh 	/* Clear the ring contents */
   1396   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1397  1.28   msaitoh 
   1398   1.1   msaitoh #ifdef DEV_NETMAP
   1399  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1400  1.28   msaitoh 		slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1401   1.1   msaitoh #endif /* DEV_NETMAP */
   1402  1.28   msaitoh 
   1403   1.1   msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   1404   1.1   msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1405   1.1   msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1406   1.1   msaitoh 	/* Cache the size */
   1407   1.1   msaitoh 	rxr->mbuf_sz = adapter->rx_mbuf_sz;
   1408   1.1   msaitoh 
   1409   1.1   msaitoh 	/* Free current RX buffer structs and their mbufs */
   1410   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1411   1.1   msaitoh 
   1412   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1413   1.1   msaitoh 
   1414   1.1   msaitoh 	/* Now reinitialize our supply of jumbo mbufs.  The number
   1415   1.1   msaitoh 	 * or size of jumbo mbufs may have changed.
   1416   1.1   msaitoh 	 */
   1417   1.1   msaitoh 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   1418  1.25   msaitoh 	    (2 * adapter->num_rx_desc) * adapter->num_queues,
   1419  1.25   msaitoh 	    adapter->rx_mbuf_sz);
   1420   1.1   msaitoh 
   1421   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1422   1.1   msaitoh 
   1423   1.1   msaitoh 	/* Now replenish the mbufs */
   1424   1.1   msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1425  1.28   msaitoh 		struct mbuf *mp;
   1426   1.1   msaitoh 
   1427   1.1   msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1428  1.28   msaitoh 
   1429   1.1   msaitoh #ifdef DEV_NETMAP
   1430   1.1   msaitoh 		/*
   1431   1.1   msaitoh 		 * In netmap mode, fill the map and set the buffer
   1432   1.1   msaitoh 		 * address in the NIC ring, considering the offset
   1433   1.1   msaitoh 		 * between the netmap and NIC rings (see comment in
   1434   1.1   msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1435   1.1   msaitoh 		 * an mbuf, so end the block with a continue;
   1436   1.1   msaitoh 		 */
   1437  1.28   msaitoh 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
   1438   1.1   msaitoh 			int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
   1439   1.1   msaitoh 			uint64_t paddr;
   1440   1.1   msaitoh 			void *addr;
   1441   1.1   msaitoh 
   1442   1.1   msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1443   1.1   msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1444   1.1   msaitoh 			/* Update descriptor and the cached value */
   1445   1.1   msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1446   1.1   msaitoh 			rxbuf->addr = htole64(paddr);
   1447   1.1   msaitoh 			continue;
   1448   1.1   msaitoh 		}
   1449   1.1   msaitoh #endif /* DEV_NETMAP */
   1450  1.28   msaitoh 
   1451  1.28   msaitoh 		rxbuf->flags = 0;
   1452   1.1   msaitoh 		rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1453   1.1   msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   1454   1.1   msaitoh 		if (rxbuf->buf == NULL) {
   1455   1.1   msaitoh 			error = ENOBUFS;
   1456  1.28   msaitoh 			goto fail;
   1457   1.1   msaitoh 		}
   1458   1.1   msaitoh 		mp = rxbuf->buf;
   1459   1.1   msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1460   1.1   msaitoh 		/* Get the memory mapping */
   1461  1.28   msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
   1462  1.28   msaitoh 		    mp, BUS_DMA_NOWAIT);
   1463   1.1   msaitoh 		if (error != 0)
   1464   1.1   msaitoh                         goto fail;
   1465   1.1   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1466   1.1   msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   1467   1.1   msaitoh 		/* Update the descriptor and the cached value */
   1468   1.1   msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1469   1.1   msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1470   1.1   msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1471   1.1   msaitoh 	}
   1472   1.1   msaitoh 
   1473   1.1   msaitoh 
   1474   1.1   msaitoh 	/* Setup our descriptor indices */
   1475   1.1   msaitoh 	rxr->next_to_check = 0;
   1476   1.1   msaitoh 	rxr->next_to_refresh = 0;
   1477   1.1   msaitoh 	rxr->lro_enabled = FALSE;
   1478   1.1   msaitoh 	rxr->rx_copies.ev_count = 0;
   1479  1.13   msaitoh #if 0 /* NetBSD */
   1480   1.1   msaitoh 	rxr->rx_bytes.ev_count = 0;
   1481  1.13   msaitoh #if 1	/* Fix inconsistency */
   1482  1.13   msaitoh 	rxr->rx_packets.ev_count = 0;
   1483  1.13   msaitoh #endif
   1484  1.13   msaitoh #endif
   1485   1.1   msaitoh 	rxr->vtag_strip = FALSE;
   1486   1.1   msaitoh 
   1487   1.1   msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1488   1.1   msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1489   1.1   msaitoh 
   1490   1.1   msaitoh 	/*
   1491  1.28   msaitoh 	 * Now set up the LRO interface
   1492  1.28   msaitoh 	 */
   1493   1.1   msaitoh 	if (ixgbe_rsc_enable)
   1494   1.1   msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1495   1.1   msaitoh #ifdef LRO
   1496   1.1   msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1497   1.1   msaitoh 		device_t dev = adapter->dev;
   1498   1.1   msaitoh 		int err = tcp_lro_init(lro);
   1499   1.1   msaitoh 		if (err) {
   1500   1.1   msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1501   1.1   msaitoh 			goto fail;
   1502   1.1   msaitoh 		}
   1503   1.1   msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1504   1.1   msaitoh 		rxr->lro_enabled = TRUE;
   1505   1.1   msaitoh 		lro->ifp = adapter->ifp;
   1506   1.1   msaitoh 	}
   1507   1.1   msaitoh #endif /* LRO */
   1508   1.1   msaitoh 
   1509   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1510  1.28   msaitoh 
   1511   1.1   msaitoh 	return (0);
   1512   1.1   msaitoh 
   1513   1.1   msaitoh fail:
   1514   1.1   msaitoh 	ixgbe_free_receive_ring(rxr);
   1515   1.1   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1516  1.28   msaitoh 
   1517   1.1   msaitoh 	return (error);
   1518  1.28   msaitoh } /* ixgbe_setup_receive_ring */
   1519   1.1   msaitoh 
   1520  1.28   msaitoh /************************************************************************
   1521  1.28   msaitoh  * ixgbe_setup_receive_structures - Initialize all receive rings.
   1522  1.28   msaitoh  ************************************************************************/
   1523   1.1   msaitoh int
   1524   1.1   msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
   1525   1.1   msaitoh {
   1526   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1527  1.28   msaitoh 	int            j;
   1528   1.1   msaitoh 
   1529   1.1   msaitoh 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   1530   1.1   msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1531   1.1   msaitoh 			goto fail;
   1532   1.1   msaitoh 
   1533   1.1   msaitoh 	return (0);
   1534   1.1   msaitoh fail:
   1535   1.1   msaitoh 	/*
   1536   1.1   msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1537   1.1   msaitoh 	 * the rings that completed, the failing case will have
   1538   1.1   msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1539   1.1   msaitoh 	 */
   1540   1.1   msaitoh 	for (int i = 0; i < j; ++i) {
   1541   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   1542  1.27   msaitoh 		IXGBE_RX_LOCK(rxr);
   1543   1.1   msaitoh 		ixgbe_free_receive_ring(rxr);
   1544  1.27   msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1545   1.1   msaitoh 	}
   1546   1.1   msaitoh 
   1547   1.1   msaitoh 	return (ENOBUFS);
   1548  1.28   msaitoh } /* ixgbe_setup_receive_structures */
   1549   1.1   msaitoh 
   1550   1.3   msaitoh 
   1551  1.28   msaitoh /************************************************************************
   1552  1.28   msaitoh  * ixgbe_free_receive_structures - Free all receive rings.
   1553  1.28   msaitoh  ************************************************************************/
   1554   1.1   msaitoh void
   1555   1.1   msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
   1556   1.1   msaitoh {
   1557   1.1   msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1558   1.1   msaitoh 
   1559   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1560   1.1   msaitoh 
   1561   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1562   1.1   msaitoh 		ixgbe_free_receive_buffers(rxr);
   1563   1.1   msaitoh #ifdef LRO
   1564   1.1   msaitoh 		/* Free LRO memory */
   1565  1.28   msaitoh 		tcp_lro_free(&rxr->lro);
   1566   1.1   msaitoh #endif /* LRO */
   1567   1.1   msaitoh 		/* Free the ring memory as well */
   1568   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   1569   1.1   msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1570   1.1   msaitoh 	}
   1571   1.1   msaitoh 
   1572   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   1573  1.28   msaitoh } /* ixgbe_free_receive_structures */
   1574   1.1   msaitoh 
   1575   1.1   msaitoh 
   1576  1.28   msaitoh /************************************************************************
   1577  1.28   msaitoh  * ixgbe_free_receive_buffers - Free receive ring data structures
   1578  1.28   msaitoh  ************************************************************************/
   1579   1.1   msaitoh static void
   1580   1.1   msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1581   1.1   msaitoh {
   1582  1.28   msaitoh 	struct adapter      *adapter = rxr->adapter;
   1583  1.28   msaitoh 	struct ixgbe_rx_buf *rxbuf;
   1584   1.1   msaitoh 
   1585   1.1   msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1586   1.1   msaitoh 
   1587   1.1   msaitoh 	/* Cleanup any existing buffers */
   1588   1.1   msaitoh 	if (rxr->rx_buffers != NULL) {
   1589   1.1   msaitoh 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   1590   1.1   msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1591  1.27   msaitoh 			ixgbe_rx_discard(rxr, i);
   1592   1.1   msaitoh 			if (rxbuf->pmap != NULL) {
   1593   1.1   msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1594   1.1   msaitoh 				rxbuf->pmap = NULL;
   1595   1.1   msaitoh 			}
   1596   1.1   msaitoh 		}
   1597   1.1   msaitoh 		if (rxr->rx_buffers != NULL) {
   1598   1.1   msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1599   1.1   msaitoh 			rxr->rx_buffers = NULL;
   1600   1.1   msaitoh 		}
   1601   1.1   msaitoh 	}
   1602   1.1   msaitoh 
   1603   1.1   msaitoh 	if (rxr->ptag != NULL) {
   1604   1.1   msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1605   1.1   msaitoh 		rxr->ptag = NULL;
   1606   1.1   msaitoh 	}
   1607   1.1   msaitoh 
   1608   1.1   msaitoh 	return;
   1609  1.28   msaitoh } /* ixgbe_free_receive_buffers */
   1610   1.1   msaitoh 
   1611  1.28   msaitoh /************************************************************************
   1612  1.28   msaitoh  * ixgbe_rx_input
   1613  1.28   msaitoh  ************************************************************************/
   1614   1.1   msaitoh static __inline void
   1615  1.28   msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
   1616  1.28   msaitoh     u32 ptype)
   1617   1.1   msaitoh {
   1618  1.20   msaitoh 	struct adapter	*adapter = ifp->if_softc;
   1619   1.1   msaitoh 
   1620   1.1   msaitoh #ifdef LRO
   1621   1.1   msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1622   1.1   msaitoh 
   1623  1.28   msaitoh 	/*
   1624  1.28   msaitoh 	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1625  1.28   msaitoh 	 * should be computed by hardware. Also it should not have VLAN tag in
   1626  1.28   msaitoh 	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1627  1.28   msaitoh 	 */
   1628   1.1   msaitoh         if (rxr->lro_enabled &&
   1629   1.1   msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1630   1.1   msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1631   1.1   msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1632   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1633   1.1   msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1634   1.1   msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1635   1.1   msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1636   1.1   msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1637   1.1   msaitoh                 /*
   1638   1.1   msaitoh                  * Send to the stack if:
   1639   1.1   msaitoh                  **  - LRO not enabled, or
   1640   1.1   msaitoh                  **  - no LRO resources, or
   1641   1.1   msaitoh                  **  - lro enqueue fails
   1642   1.1   msaitoh                  */
   1643   1.1   msaitoh                 if (rxr->lro.lro_cnt != 0)
   1644   1.1   msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1645   1.1   msaitoh                                 return;
   1646   1.1   msaitoh         }
   1647   1.1   msaitoh #endif /* LRO */
   1648   1.1   msaitoh 
   1649  1.20   msaitoh 	if_percpuq_enqueue(adapter->ipq, m);
   1650  1.28   msaitoh } /* ixgbe_rx_input */
   1651   1.1   msaitoh 
   1652  1.28   msaitoh /************************************************************************
   1653  1.28   msaitoh  * ixgbe_rx_discard
   1654  1.28   msaitoh  ************************************************************************/
   1655   1.1   msaitoh static __inline void
   1656   1.1   msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1657   1.1   msaitoh {
   1658  1.28   msaitoh 	struct ixgbe_rx_buf *rbuf;
   1659   1.1   msaitoh 
   1660   1.1   msaitoh 	rbuf = &rxr->rx_buffers[i];
   1661   1.1   msaitoh 
   1662   1.1   msaitoh 	/*
   1663  1.28   msaitoh 	 * With advanced descriptors the writeback
   1664  1.28   msaitoh 	 * clobbers the buffer addrs, so its easier
   1665  1.28   msaitoh 	 * to just free the existing mbufs and take
   1666  1.28   msaitoh 	 * the normal refresh path to get new buffers
   1667  1.28   msaitoh 	 * and mapping.
   1668  1.28   msaitoh 	 */
   1669   1.1   msaitoh 
   1670  1.26   msaitoh 	if (rbuf->fmp != NULL) {/* Partial chain ? */
   1671  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1672  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1673   1.1   msaitoh 		m_freem(rbuf->fmp);
   1674   1.1   msaitoh 		rbuf->fmp = NULL;
   1675   1.1   msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1676   1.1   msaitoh 	} else if (rbuf->buf) {
   1677  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1678  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1679   1.1   msaitoh 		m_free(rbuf->buf);
   1680   1.1   msaitoh 		rbuf->buf = NULL;
   1681   1.1   msaitoh 	}
   1682   1.4   msaitoh 	ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1683   1.1   msaitoh 
   1684   1.1   msaitoh 	rbuf->flags = 0;
   1685   1.1   msaitoh 
   1686   1.1   msaitoh 	return;
   1687  1.28   msaitoh } /* ixgbe_rx_discard */
   1688   1.1   msaitoh 
   1689   1.1   msaitoh 
   1690  1.28   msaitoh /************************************************************************
   1691  1.28   msaitoh  * ixgbe_rxeof
   1692   1.1   msaitoh  *
   1693  1.28   msaitoh  *   Executes in interrupt context. It replenishes the
   1694  1.28   msaitoh  *   mbufs in the descriptor and sends data which has
   1695  1.28   msaitoh  *   been dma'ed into host memory to upper layer.
   1696   1.1   msaitoh  *
   1697  1.28   msaitoh  *   Return TRUE for more work, FALSE for all clean.
   1698  1.28   msaitoh  ************************************************************************/
   1699   1.1   msaitoh bool
   1700   1.1   msaitoh ixgbe_rxeof(struct ix_queue *que)
   1701   1.1   msaitoh {
   1702   1.1   msaitoh 	struct adapter		*adapter = que->adapter;
   1703   1.1   msaitoh 	struct rx_ring		*rxr = que->rxr;
   1704   1.1   msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1705   1.1   msaitoh #ifdef LRO
   1706   1.1   msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1707   1.1   msaitoh #endif /* LRO */
   1708  1.28   msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1709  1.28   msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1710   1.1   msaitoh 	int			i, nextp, processed = 0;
   1711   1.1   msaitoh 	u32			staterr = 0;
   1712   1.7   msaitoh 	u32			count = adapter->rx_process_limit;
   1713   1.1   msaitoh #ifdef RSS
   1714   1.1   msaitoh 	u16			pkt_info;
   1715   1.1   msaitoh #endif
   1716   1.1   msaitoh 
   1717   1.1   msaitoh 	IXGBE_RX_LOCK(rxr);
   1718   1.1   msaitoh 
   1719   1.1   msaitoh #ifdef DEV_NETMAP
   1720  1.28   msaitoh 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
   1721  1.28   msaitoh 		/* Same as the txeof routine: wakeup clients on intr. */
   1722  1.28   msaitoh 		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1723  1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1724  1.28   msaitoh 			return (FALSE);
   1725  1.28   msaitoh 		}
   1726   1.1   msaitoh 	}
   1727   1.1   msaitoh #endif /* DEV_NETMAP */
   1728   1.1   msaitoh 
   1729   1.1   msaitoh 	for (i = rxr->next_to_check; count != 0;) {
   1730  1.28   msaitoh 		struct mbuf *sendmp, *mp;
   1731  1.28   msaitoh 		u32         rsc, ptype;
   1732  1.28   msaitoh 		u16         len;
   1733  1.28   msaitoh 		u16         vtag = 0;
   1734  1.28   msaitoh 		bool        eop;
   1735   1.1   msaitoh 
   1736   1.1   msaitoh 		/* Sync the ring. */
   1737   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1738   1.1   msaitoh 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1739   1.1   msaitoh 
   1740   1.1   msaitoh 		cur = &rxr->rx_base[i];
   1741   1.1   msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1742   1.1   msaitoh #ifdef RSS
   1743   1.1   msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1744   1.1   msaitoh #endif
   1745   1.1   msaitoh 
   1746   1.1   msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1747   1.1   msaitoh 			break;
   1748   1.1   msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1749   1.1   msaitoh 			break;
   1750   1.1   msaitoh 
   1751   1.1   msaitoh 		count--;
   1752   1.1   msaitoh 		sendmp = NULL;
   1753   1.1   msaitoh 		nbuf = NULL;
   1754   1.1   msaitoh 		rsc = 0;
   1755   1.1   msaitoh 		cur->wb.upper.status_error = 0;
   1756   1.1   msaitoh 		rbuf = &rxr->rx_buffers[i];
   1757   1.1   msaitoh 		mp = rbuf->buf;
   1758   1.1   msaitoh 
   1759   1.1   msaitoh 		len = le16toh(cur->wb.upper.length);
   1760   1.1   msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1761   1.1   msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1762   1.1   msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1763   1.1   msaitoh 
   1764   1.1   msaitoh 		/* Make sure bad packets are discarded */
   1765   1.1   msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1766   1.3   msaitoh #if __FreeBSD_version >= 1100036
   1767  1.28   msaitoh 			if (adapter->feat_en & IXGBE_FEATURE_VF)
   1768   1.4   msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1769   1.3   msaitoh #endif
   1770   1.1   msaitoh 			rxr->rx_discarded.ev_count++;
   1771   1.1   msaitoh 			ixgbe_rx_discard(rxr, i);
   1772   1.1   msaitoh 			goto next_desc;
   1773   1.1   msaitoh 		}
   1774   1.1   msaitoh 
   1775  1.27   msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
   1776  1.27   msaitoh 		    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
   1777  1.27   msaitoh 
   1778   1.1   msaitoh 		/*
   1779  1.28   msaitoh 		 * On 82599 which supports a hardware
   1780  1.28   msaitoh 		 * LRO (called HW RSC), packets need
   1781  1.28   msaitoh 		 * not be fragmented across sequential
   1782  1.28   msaitoh 		 * descriptors, rather the next descriptor
   1783  1.28   msaitoh 		 * is indicated in bits of the descriptor.
   1784  1.28   msaitoh 		 * This also means that we might proceses
   1785  1.28   msaitoh 		 * more than one packet at a time, something
   1786  1.28   msaitoh 		 * that has never been true before, it
   1787  1.28   msaitoh 		 * required eliminating global chain pointers
   1788  1.28   msaitoh 		 * in favor of what we are doing here.  -jfv
   1789  1.28   msaitoh 		 */
   1790   1.1   msaitoh 		if (!eop) {
   1791   1.1   msaitoh 			/*
   1792  1.28   msaitoh 			 * Figure out the next descriptor
   1793  1.28   msaitoh 			 * of this frame.
   1794  1.28   msaitoh 			 */
   1795   1.1   msaitoh 			if (rxr->hw_rsc == TRUE) {
   1796   1.1   msaitoh 				rsc = ixgbe_rsc_count(cur);
   1797   1.1   msaitoh 				rxr->rsc_num += (rsc - 1);
   1798   1.1   msaitoh 			}
   1799   1.1   msaitoh 			if (rsc) { /* Get hardware index */
   1800  1.28   msaitoh 				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
   1801   1.1   msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1802   1.1   msaitoh 			} else { /* Just sequential */
   1803   1.1   msaitoh 				nextp = i + 1;
   1804   1.1   msaitoh 				if (nextp == adapter->num_rx_desc)
   1805   1.1   msaitoh 					nextp = 0;
   1806   1.1   msaitoh 			}
   1807   1.1   msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1808   1.1   msaitoh 			prefetch(nbuf);
   1809   1.1   msaitoh 		}
   1810   1.1   msaitoh 		/*
   1811  1.28   msaitoh 		 * Rather than using the fmp/lmp global pointers
   1812  1.28   msaitoh 		 * we now keep the head of a packet chain in the
   1813  1.28   msaitoh 		 * buffer struct and pass this along from one
   1814  1.28   msaitoh 		 * descriptor to the next, until we get EOP.
   1815  1.28   msaitoh 		 */
   1816   1.1   msaitoh 		mp->m_len = len;
   1817   1.1   msaitoh 		/*
   1818  1.28   msaitoh 		 * See if there is a stored head
   1819  1.28   msaitoh 		 * that determines what we are
   1820  1.28   msaitoh 		 */
   1821   1.1   msaitoh 		sendmp = rbuf->fmp;
   1822   1.1   msaitoh 		if (sendmp != NULL) {  /* secondary frag */
   1823   1.1   msaitoh 			rbuf->buf = rbuf->fmp = NULL;
   1824   1.1   msaitoh 			mp->m_flags &= ~M_PKTHDR;
   1825   1.1   msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   1826   1.1   msaitoh 		} else {
   1827   1.1   msaitoh 			/*
   1828   1.1   msaitoh 			 * Optimize.  This might be a small packet,
   1829   1.1   msaitoh 			 * maybe just a TCP ACK.  Do a fast copy that
   1830   1.1   msaitoh 			 * is cache aligned into a new mbuf, and
   1831   1.1   msaitoh 			 * leave the old mbuf+cluster for re-use.
   1832   1.1   msaitoh 			 */
   1833   1.1   msaitoh 			if (eop && len <= IXGBE_RX_COPY_LEN) {
   1834   1.1   msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1835   1.1   msaitoh 				if (sendmp != NULL) {
   1836  1.28   msaitoh 					sendmp->m_data += IXGBE_RX_COPY_ALIGN;
   1837  1.28   msaitoh 					ixgbe_bcopy(mp->m_data, sendmp->m_data,
   1838  1.28   msaitoh 					    len);
   1839   1.1   msaitoh 					sendmp->m_len = len;
   1840   1.1   msaitoh 					rxr->rx_copies.ev_count++;
   1841   1.1   msaitoh 					rbuf->flags |= IXGBE_RX_COPY;
   1842   1.1   msaitoh 				}
   1843   1.1   msaitoh 			}
   1844   1.1   msaitoh 			if (sendmp == NULL) {
   1845   1.1   msaitoh 				rbuf->buf = rbuf->fmp = NULL;
   1846   1.1   msaitoh 				sendmp = mp;
   1847   1.1   msaitoh 			}
   1848   1.1   msaitoh 
   1849   1.1   msaitoh 			/* first desc of a non-ps chain */
   1850   1.1   msaitoh 			sendmp->m_flags |= M_PKTHDR;
   1851   1.1   msaitoh 			sendmp->m_pkthdr.len = mp->m_len;
   1852   1.1   msaitoh 		}
   1853   1.1   msaitoh 		++processed;
   1854   1.1   msaitoh 
   1855   1.1   msaitoh 		/* Pass the head pointer on */
   1856   1.1   msaitoh 		if (eop == 0) {
   1857   1.1   msaitoh 			nbuf->fmp = sendmp;
   1858   1.1   msaitoh 			sendmp = NULL;
   1859   1.1   msaitoh 			mp->m_next = nbuf->buf;
   1860   1.1   msaitoh 		} else { /* Sending this frame */
   1861   1.1   msaitoh 			m_set_rcvif(sendmp, ifp);
   1862   1.1   msaitoh 			rxr->rx_packets.ev_count++;
   1863   1.1   msaitoh 			/* capture data for AIM */
   1864   1.1   msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   1865   1.1   msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   1866   1.1   msaitoh 			/* Process vlan info */
   1867  1.28   msaitoh 			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
   1868   1.1   msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   1869   1.1   msaitoh 			if (vtag) {
   1870  1.29  knakahar 				vlan_set_tag(sendmp, vtag);
   1871   1.1   msaitoh 			}
   1872   1.1   msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   1873   1.1   msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   1874   1.3   msaitoh 				   &adapter->stats.pf);
   1875   1.1   msaitoh 			}
   1876   1.8   msaitoh 
   1877   1.6   msaitoh #if 0 /* FreeBSD */
   1878  1.28   msaitoh 			/*
   1879  1.28   msaitoh 			 * In case of multiqueue, we have RXCSUM.PCSD bit set
   1880  1.28   msaitoh 			 * and never cleared. This means we have RSS hash
   1881  1.28   msaitoh 			 * available to be used.
   1882  1.28   msaitoh 			 */
   1883  1.28   msaitoh 			if (adapter->num_queues > 1) {
   1884  1.28   msaitoh 				sendmp->m_pkthdr.flowid =
   1885  1.28   msaitoh 				    le32toh(cur->wb.lower.hi_dword.rss);
   1886  1.28   msaitoh 				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   1887  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV4:
   1888  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1889  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV4);
   1890  1.28   msaitoh 					break;
   1891  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   1892  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1893  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV4);
   1894  1.28   msaitoh 					break;
   1895  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6:
   1896  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1897  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6);
   1898  1.28   msaitoh 					break;
   1899  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   1900  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1901  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6);
   1902  1.28   msaitoh 					break;
   1903  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   1904  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1905  1.28   msaitoh 					    M_HASHTYPE_RSS_IPV6_EX);
   1906  1.28   msaitoh 					break;
   1907  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   1908  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1909  1.28   msaitoh 					    M_HASHTYPE_RSS_TCP_IPV6_EX);
   1910  1.28   msaitoh 					break;
   1911   1.6   msaitoh #if __FreeBSD_version > 1100000
   1912  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   1913  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1914  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV4);
   1915  1.28   msaitoh 					break;
   1916  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   1917  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1918  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6);
   1919  1.28   msaitoh 					break;
   1920  1.28   msaitoh 				    case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   1921  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1922  1.28   msaitoh 					    M_HASHTYPE_RSS_UDP_IPV6_EX);
   1923  1.28   msaitoh 					break;
   1924  1.28   msaitoh #endif
   1925  1.28   msaitoh 				    default:
   1926  1.28   msaitoh 					M_HASHTYPE_SET(sendmp,
   1927  1.28   msaitoh 					    M_HASHTYPE_OPAQUE_HASH);
   1928  1.28   msaitoh 				}
   1929  1.28   msaitoh 			} else {
   1930  1.28   msaitoh 				sendmp->m_pkthdr.flowid = que->msix;
   1931   1.1   msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   1932   1.1   msaitoh 			}
   1933   1.8   msaitoh #endif
   1934   1.1   msaitoh 		}
   1935   1.1   msaitoh next_desc:
   1936   1.1   msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1937   1.1   msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1938   1.1   msaitoh 
   1939   1.1   msaitoh 		/* Advance our pointers to the next descriptor. */
   1940   1.1   msaitoh 		if (++i == rxr->num_desc)
   1941   1.1   msaitoh 			i = 0;
   1942   1.1   msaitoh 
   1943   1.1   msaitoh 		/* Now send to the stack or do LRO */
   1944   1.1   msaitoh 		if (sendmp != NULL) {
   1945   1.1   msaitoh 			rxr->next_to_check = i;
   1946  1.28   msaitoh 			IXGBE_RX_UNLOCK(rxr);
   1947   1.1   msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   1948  1.28   msaitoh 			IXGBE_RX_LOCK(rxr);
   1949   1.1   msaitoh 			i = rxr->next_to_check;
   1950   1.1   msaitoh 		}
   1951   1.1   msaitoh 
   1952  1.28   msaitoh 		/* Every 8 descriptors we go to refresh mbufs */
   1953   1.1   msaitoh 		if (processed == 8) {
   1954   1.1   msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   1955   1.1   msaitoh 			processed = 0;
   1956   1.1   msaitoh 		}
   1957   1.1   msaitoh 	}
   1958   1.1   msaitoh 
   1959   1.1   msaitoh 	/* Refresh any remaining buf structs */
   1960   1.1   msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   1961   1.1   msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   1962   1.1   msaitoh 
   1963   1.1   msaitoh 	rxr->next_to_check = i;
   1964   1.1   msaitoh 
   1965  1.28   msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1966  1.28   msaitoh 
   1967   1.1   msaitoh #ifdef LRO
   1968   1.1   msaitoh 	/*
   1969   1.1   msaitoh 	 * Flush any outstanding LRO work
   1970   1.1   msaitoh 	 */
   1971  1.10   msaitoh 	tcp_lro_flush_all(lro);
   1972   1.1   msaitoh #endif /* LRO */
   1973   1.1   msaitoh 
   1974   1.1   msaitoh 	/*
   1975  1.28   msaitoh 	 * Still have cleaning to do?
   1976  1.28   msaitoh 	 */
   1977   1.1   msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   1978  1.28   msaitoh 		return (TRUE);
   1979  1.28   msaitoh 
   1980  1.28   msaitoh 	return (FALSE);
   1981  1.28   msaitoh } /* ixgbe_rxeof */
   1982   1.1   msaitoh 
   1983   1.1   msaitoh 
   1984  1.28   msaitoh /************************************************************************
   1985  1.28   msaitoh  * ixgbe_rx_checksum
   1986   1.1   msaitoh  *
   1987  1.28   msaitoh  *   Verify that the hardware indicated that the checksum is valid.
   1988  1.28   msaitoh  *   Inform the stack about the status of checksum so that stack
   1989  1.28   msaitoh  *   doesn't spend time verifying the checksum.
   1990  1.28   msaitoh  ************************************************************************/
   1991   1.1   msaitoh static void
   1992   1.1   msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   1993   1.1   msaitoh     struct ixgbe_hw_stats *stats)
   1994   1.1   msaitoh {
   1995  1.28   msaitoh 	u16  status = (u16)staterr;
   1996  1.28   msaitoh 	u8   errors = (u8)(staterr >> 24);
   1997   1.1   msaitoh #if 0
   1998  1.28   msaitoh 	bool sctp = false;
   1999   1.1   msaitoh 
   2000   1.1   msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2001   1.1   msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2002   1.8   msaitoh 		sctp = true;
   2003   1.1   msaitoh #endif
   2004   1.1   msaitoh 
   2005   1.8   msaitoh 	/* IPv4 checksum */
   2006   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2007   1.1   msaitoh 		stats->ipcs.ev_count++;
   2008   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2009   1.1   msaitoh 			/* IP Checksum Good */
   2010   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2011   1.1   msaitoh 		} else {
   2012   1.1   msaitoh 			stats->ipcs_bad.ev_count++;
   2013   1.1   msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2014   1.1   msaitoh 		}
   2015   1.1   msaitoh 	}
   2016   1.8   msaitoh 	/* TCP/UDP/SCTP checksum */
   2017   1.1   msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2018   1.1   msaitoh 		stats->l4cs.ev_count++;
   2019   1.1   msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2020   1.1   msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2021   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2022   1.1   msaitoh 		} else {
   2023   1.1   msaitoh 			stats->l4cs_bad.ev_count++;
   2024   1.1   msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2025   1.1   msaitoh 		}
   2026   1.1   msaitoh 	}
   2027  1.28   msaitoh } /* ixgbe_rx_checksum */
   2028   1.1   msaitoh 
   2029  1.28   msaitoh /************************************************************************
   2030  1.28   msaitoh  * ixgbe_dma_malloc
   2031  1.28   msaitoh  ************************************************************************/
   2032   1.1   msaitoh int
   2033   1.1   msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2034   1.1   msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2035   1.1   msaitoh {
   2036   1.1   msaitoh 	device_t dev = adapter->dev;
   2037  1.28   msaitoh 	int      r, rsegs;
   2038   1.1   msaitoh 
   2039  1.28   msaitoh 	r = ixgbe_dma_tag_create(
   2040  1.28   msaitoh 	     /*      parent */ adapter->osdep.dmat,
   2041  1.28   msaitoh 	     /*   alignment */ DBA_ALIGN,
   2042  1.28   msaitoh 	     /*      bounds */ 0,
   2043  1.28   msaitoh 	     /*     maxsize */ size,
   2044  1.28   msaitoh 	     /*   nsegments */ 1,
   2045  1.28   msaitoh 	     /*  maxsegsize */ size,
   2046  1.28   msaitoh 	     /*       flags */ BUS_DMA_ALLOCNOW,
   2047   1.1   msaitoh 			       &dma->dma_tag);
   2048   1.1   msaitoh 	if (r != 0) {
   2049   1.1   msaitoh 		aprint_error_dev(dev,
   2050   1.1   msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2051   1.1   msaitoh 		goto fail_0;
   2052   1.1   msaitoh 	}
   2053   1.1   msaitoh 
   2054  1.28   msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
   2055  1.28   msaitoh 	    dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
   2056  1.28   msaitoh 	    &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2057   1.1   msaitoh 	if (r != 0) {
   2058   1.1   msaitoh 		aprint_error_dev(dev,
   2059   1.1   msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2060   1.1   msaitoh 		goto fail_1;
   2061   1.1   msaitoh 	}
   2062   1.1   msaitoh 
   2063   1.1   msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2064   1.1   msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2065   1.1   msaitoh 	if (r != 0) {
   2066   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2067   1.1   msaitoh 		    __func__, r);
   2068   1.1   msaitoh 		goto fail_2;
   2069   1.1   msaitoh 	}
   2070   1.1   msaitoh 
   2071   1.1   msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2072   1.1   msaitoh 	if (r != 0) {
   2073   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2074   1.1   msaitoh 		    __func__, r);
   2075   1.1   msaitoh 		goto fail_3;
   2076   1.1   msaitoh 	}
   2077   1.1   msaitoh 
   2078  1.28   msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
   2079  1.28   msaitoh 	    dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
   2080   1.1   msaitoh 	if (r != 0) {
   2081   1.1   msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2082   1.1   msaitoh 		    __func__, r);
   2083   1.1   msaitoh 		goto fail_4;
   2084   1.1   msaitoh 	}
   2085   1.1   msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2086   1.1   msaitoh 	dma->dma_size = size;
   2087   1.1   msaitoh 	return 0;
   2088   1.1   msaitoh fail_4:
   2089   1.1   msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2090   1.1   msaitoh fail_3:
   2091   1.1   msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2092   1.1   msaitoh fail_2:
   2093   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2094   1.1   msaitoh fail_1:
   2095   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2096   1.1   msaitoh fail_0:
   2097   1.1   msaitoh 
   2098  1.28   msaitoh 	return (r);
   2099  1.28   msaitoh } /* ixgbe_dma_malloc */
   2100  1.28   msaitoh 
   2101  1.28   msaitoh /************************************************************************
   2102  1.28   msaitoh  * ixgbe_dma_free
   2103  1.28   msaitoh  ************************************************************************/
   2104   1.3   msaitoh void
   2105   1.1   msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2106   1.1   msaitoh {
   2107   1.1   msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2108   1.1   msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2109   1.1   msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2110   1.1   msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2111   1.1   msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2112  1.28   msaitoh } /* ixgbe_dma_free */
   2113   1.1   msaitoh 
   2114   1.1   msaitoh 
   2115  1.28   msaitoh /************************************************************************
   2116  1.28   msaitoh  * ixgbe_allocate_queues
   2117   1.1   msaitoh  *
   2118  1.28   msaitoh  *   Allocate memory for the transmit and receive rings, and then
   2119  1.28   msaitoh  *   the descriptors associated with each, called only once at attach.
   2120  1.28   msaitoh  ************************************************************************/
   2121   1.1   msaitoh int
   2122   1.1   msaitoh ixgbe_allocate_queues(struct adapter *adapter)
   2123   1.1   msaitoh {
   2124   1.1   msaitoh 	device_t	dev = adapter->dev;
   2125   1.1   msaitoh 	struct ix_queue	*que;
   2126   1.1   msaitoh 	struct tx_ring	*txr;
   2127   1.1   msaitoh 	struct rx_ring	*rxr;
   2128  1.28   msaitoh 	int             rsize, tsize, error = IXGBE_SUCCESS;
   2129  1.28   msaitoh 	int             txconf = 0, rxconf = 0;
   2130   1.1   msaitoh 
   2131  1.28   msaitoh 	/* First, allocate the top level queue structs */
   2132  1.28   msaitoh 	adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
   2133  1.28   msaitoh             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2134  1.28   msaitoh         if (adapter->queues == NULL) {
   2135  1.28   msaitoh 		aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2136   1.1   msaitoh                 error = ENOMEM;
   2137   1.1   msaitoh                 goto fail;
   2138   1.1   msaitoh         }
   2139   1.1   msaitoh 
   2140  1.28   msaitoh 	/* Second, allocate the TX ring struct memory */
   2141  1.28   msaitoh 	adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
   2142  1.28   msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2143  1.28   msaitoh 	if (adapter->tx_rings == NULL) {
   2144   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2145   1.1   msaitoh 		error = ENOMEM;
   2146   1.1   msaitoh 		goto tx_fail;
   2147   1.1   msaitoh 	}
   2148   1.1   msaitoh 
   2149  1.28   msaitoh 	/* Third, allocate the RX ring */
   2150  1.28   msaitoh 	adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
   2151  1.28   msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
   2152  1.28   msaitoh 	if (adapter->rx_rings == NULL) {
   2153   1.1   msaitoh 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2154   1.1   msaitoh 		error = ENOMEM;
   2155   1.1   msaitoh 		goto rx_fail;
   2156   1.1   msaitoh 	}
   2157   1.1   msaitoh 
   2158   1.1   msaitoh 	/* For the ring itself */
   2159  1.28   msaitoh 	tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
   2160  1.28   msaitoh 	    DBA_ALIGN);
   2161   1.1   msaitoh 
   2162   1.1   msaitoh 	/*
   2163   1.1   msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2164   1.1   msaitoh 	 * possibility that things fail midcourse and we need to
   2165   1.1   msaitoh 	 * undo memory gracefully
   2166  1.28   msaitoh 	 */
   2167   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2168   1.1   msaitoh 		/* Set up some basics */
   2169   1.1   msaitoh 		txr = &adapter->tx_rings[i];
   2170   1.1   msaitoh 		txr->adapter = adapter;
   2171  1.28   msaitoh 		txr->txr_interq = NULL;
   2172  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2173   1.5   msaitoh #ifdef PCI_IOV
   2174  1.28   msaitoh 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2175  1.28   msaitoh 		    i);
   2176   1.5   msaitoh #else
   2177   1.1   msaitoh 		txr->me = i;
   2178   1.5   msaitoh #endif
   2179   1.1   msaitoh 		txr->num_desc = adapter->num_tx_desc;
   2180   1.1   msaitoh 
   2181   1.1   msaitoh 		/* Initialize the TX side lock */
   2182   1.1   msaitoh 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2183   1.1   msaitoh 		    device_xname(dev), txr->me);
   2184   1.1   msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2185   1.1   msaitoh 
   2186  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
   2187  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2188   1.1   msaitoh 			aprint_error_dev(dev,
   2189   1.1   msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2190   1.1   msaitoh 			error = ENOMEM;
   2191   1.1   msaitoh 			goto err_tx_desc;
   2192   1.1   msaitoh 		}
   2193   1.1   msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2194   1.1   msaitoh 		bzero((void *)txr->tx_base, tsize);
   2195   1.1   msaitoh 
   2196  1.28   msaitoh 		/* Now allocate transmit buffers for the ring */
   2197  1.28   msaitoh 		if (ixgbe_allocate_transmit_buffers(txr)) {
   2198   1.1   msaitoh 			aprint_error_dev(dev,
   2199   1.1   msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2200   1.1   msaitoh 			error = ENOMEM;
   2201   1.1   msaitoh 			goto err_tx_desc;
   2202   1.1   msaitoh         	}
   2203  1.28   msaitoh 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   2204  1.28   msaitoh 			/* Allocate a buf ring */
   2205  1.28   msaitoh 			txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
   2206  1.28   msaitoh 			if (txr->txr_interq == NULL) {
   2207  1.28   msaitoh 				aprint_error_dev(dev,
   2208  1.28   msaitoh 				    "Critical Failure setting up buf ring\n");
   2209  1.28   msaitoh 				error = ENOMEM;
   2210  1.28   msaitoh 				goto err_tx_desc;
   2211  1.28   msaitoh 			}
   2212  1.28   msaitoh 		}
   2213   1.1   msaitoh 	}
   2214   1.1   msaitoh 
   2215   1.1   msaitoh 	/*
   2216   1.1   msaitoh 	 * Next the RX queues...
   2217   1.1   msaitoh 	 */
   2218  1.28   msaitoh 	rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
   2219  1.28   msaitoh 	    DBA_ALIGN);
   2220   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2221   1.1   msaitoh 		rxr = &adapter->rx_rings[i];
   2222   1.1   msaitoh 		/* Set up some basics */
   2223   1.1   msaitoh 		rxr->adapter = adapter;
   2224   1.5   msaitoh #ifdef PCI_IOV
   2225  1.28   msaitoh 		/* In case SR-IOV is enabled, align the index properly */
   2226  1.28   msaitoh 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
   2227  1.28   msaitoh 		    i);
   2228   1.5   msaitoh #else
   2229   1.1   msaitoh 		rxr->me = i;
   2230   1.5   msaitoh #endif
   2231   1.1   msaitoh 		rxr->num_desc = adapter->num_rx_desc;
   2232   1.1   msaitoh 
   2233   1.1   msaitoh 		/* Initialize the RX side lock */
   2234   1.1   msaitoh 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2235   1.1   msaitoh 		    device_xname(dev), rxr->me);
   2236   1.1   msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2237   1.1   msaitoh 
   2238  1.28   msaitoh 		if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
   2239  1.28   msaitoh 		    BUS_DMA_NOWAIT)) {
   2240   1.1   msaitoh 			aprint_error_dev(dev,
   2241   1.1   msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2242   1.1   msaitoh 			error = ENOMEM;
   2243   1.1   msaitoh 			goto err_rx_desc;
   2244   1.1   msaitoh 		}
   2245   1.1   msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2246   1.1   msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2247   1.1   msaitoh 
   2248  1.28   msaitoh 		/* Allocate receive buffers for the ring */
   2249   1.1   msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2250   1.1   msaitoh 			aprint_error_dev(dev,
   2251   1.1   msaitoh 			    "Critical Failure setting up receive buffers\n");
   2252   1.1   msaitoh 			error = ENOMEM;
   2253   1.1   msaitoh 			goto err_rx_desc;
   2254   1.1   msaitoh 		}
   2255   1.1   msaitoh 	}
   2256   1.1   msaitoh 
   2257   1.1   msaitoh 	/*
   2258  1.28   msaitoh 	 * Finally set up the queue holding structs
   2259  1.28   msaitoh 	 */
   2260   1.1   msaitoh 	for (int i = 0; i < adapter->num_queues; i++) {
   2261   1.1   msaitoh 		que = &adapter->queues[i];
   2262   1.1   msaitoh 		que->adapter = adapter;
   2263   1.3   msaitoh 		que->me = i;
   2264   1.1   msaitoh 		que->txr = &adapter->tx_rings[i];
   2265   1.1   msaitoh 		que->rxr = &adapter->rx_rings[i];
   2266   1.1   msaitoh 	}
   2267   1.1   msaitoh 
   2268   1.1   msaitoh 	return (0);
   2269   1.1   msaitoh 
   2270   1.1   msaitoh err_rx_desc:
   2271   1.1   msaitoh 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2272   1.1   msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2273   1.1   msaitoh err_tx_desc:
   2274   1.1   msaitoh 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2275   1.1   msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
   2276   1.1   msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   2277   1.1   msaitoh rx_fail:
   2278   1.1   msaitoh 	free(adapter->tx_rings, M_DEVBUF);
   2279   1.1   msaitoh tx_fail:
   2280   1.1   msaitoh 	free(adapter->queues, M_DEVBUF);
   2281   1.1   msaitoh fail:
   2282   1.1   msaitoh 	return (error);
   2283  1.28   msaitoh } /* ixgbe_allocate_queues */
   2284