Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.6
      1  1.1  msaitoh /******************************************************************************
      2  1.1  msaitoh 
      3  1.4  msaitoh   Copyright (c) 2001-2015, Intel Corporation
      4  1.1  msaitoh   All rights reserved.
      5  1.1  msaitoh 
      6  1.1  msaitoh   Redistribution and use in source and binary forms, with or without
      7  1.1  msaitoh   modification, are permitted provided that the following conditions are met:
      8  1.1  msaitoh 
      9  1.1  msaitoh    1. Redistributions of source code must retain the above copyright notice,
     10  1.1  msaitoh       this list of conditions and the following disclaimer.
     11  1.1  msaitoh 
     12  1.1  msaitoh    2. Redistributions in binary form must reproduce the above copyright
     13  1.1  msaitoh       notice, this list of conditions and the following disclaimer in the
     14  1.1  msaitoh       documentation and/or other materials provided with the distribution.
     15  1.1  msaitoh 
     16  1.1  msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     17  1.1  msaitoh       contributors may be used to endorse or promote products derived from
     18  1.1  msaitoh       this software without specific prior written permission.
     19  1.1  msaitoh 
     20  1.1  msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21  1.1  msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  1.1  msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  1.1  msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  1.1  msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  msaitoh   POSSIBILITY OF SUCH DAMAGE.
     31  1.1  msaitoh 
     32  1.1  msaitoh ******************************************************************************/
     33  1.1  msaitoh /*
     34  1.1  msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  1.1  msaitoh  * All rights reserved.
     36  1.1  msaitoh  *
     37  1.1  msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     38  1.1  msaitoh  * by Coyote Point Systems, Inc.
     39  1.1  msaitoh  *
     40  1.1  msaitoh  * Redistribution and use in source and binary forms, with or without
     41  1.1  msaitoh  * modification, are permitted provided that the following conditions
     42  1.1  msaitoh  * are met:
     43  1.1  msaitoh  * 1. Redistributions of source code must retain the above copyright
     44  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer.
     45  1.1  msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     46  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer in the
     47  1.1  msaitoh  *    documentation and/or other materials provided with the distribution.
     48  1.1  msaitoh  *
     49  1.1  msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  1.1  msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  1.1  msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  1.1  msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  1.1  msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  1.1  msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  1.1  msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  1.1  msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  1.1  msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  1.1  msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  1.1  msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     60  1.1  msaitoh  */
     61  1.6  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 285528 2015-07-14 09:13:18Z hiren $*/
     62  1.5  msaitoh /*$NetBSD: ix_txrx.c,v 1.6 2016/12/02 10:24:31 msaitoh Exp $*/
     63  1.1  msaitoh 
     64  1.1  msaitoh #include "ixgbe.h"
     65  1.1  msaitoh 
     66  1.4  msaitoh #ifdef DEV_NETMAP
     67  1.4  msaitoh #include <net/netmap.h>
     68  1.4  msaitoh #include <sys/selinfo.h>
     69  1.4  msaitoh #include <dev/netmap/netmap_kern.h>
     70  1.4  msaitoh 
     71  1.4  msaitoh extern int ix_crcstrip;
     72  1.4  msaitoh #endif
     73  1.4  msaitoh 
     74  1.1  msaitoh /*
     75  1.3  msaitoh ** HW RSC control:
     76  1.1  msaitoh **  this feature only works with
     77  1.1  msaitoh **  IPv4, and only on 82599 and later.
     78  1.1  msaitoh **  Also this will cause IP forwarding to
     79  1.1  msaitoh **  fail and that can't be controlled by
     80  1.1  msaitoh **  the stack as LRO can. For all these
     81  1.1  msaitoh **  reasons I've deemed it best to leave
     82  1.1  msaitoh **  this off and not bother with a tuneable
     83  1.1  msaitoh **  interface, this would need to be compiled
     84  1.1  msaitoh **  to enable.
     85  1.1  msaitoh */
     86  1.1  msaitoh static bool ixgbe_rsc_enable = FALSE;
     87  1.1  msaitoh 
     88  1.3  msaitoh #ifdef IXGBE_FDIR
     89  1.3  msaitoh /*
     90  1.3  msaitoh ** For Flow Director: this is the
     91  1.3  msaitoh ** number of TX packets we sample
     92  1.3  msaitoh ** for the filter pool, this means
     93  1.3  msaitoh ** every 20th packet will be probed.
     94  1.3  msaitoh **
     95  1.3  msaitoh ** This feature can be disabled by
     96  1.3  msaitoh ** setting this to 0.
     97  1.3  msaitoh */
     98  1.3  msaitoh static int atr_sample_rate = 20;
     99  1.3  msaitoh #endif
    100  1.3  msaitoh 
    101  1.3  msaitoh /* Shared PCI config read/write */
    102  1.3  msaitoh u16
    103  1.3  msaitoh ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
    104  1.3  msaitoh {
    105  1.3  msaitoh 	switch (reg % 4) {
    106  1.3  msaitoh 	case 0:
    107  1.3  msaitoh 		return pci_conf_read(hw->back->pc, hw->back->tag, reg) &
    108  1.3  msaitoh 		    __BITS(15, 0);
    109  1.3  msaitoh 	case 2:
    110  1.3  msaitoh 		return __SHIFTOUT(pci_conf_read(hw->back->pc, hw->back->tag,
    111  1.3  msaitoh 		    reg - 2), __BITS(31, 16));
    112  1.3  msaitoh 	default:
    113  1.3  msaitoh 		panic("%s: invalid register (%" PRIx32, __func__, reg);
    114  1.3  msaitoh 		break;
    115  1.3  msaitoh 	}
    116  1.3  msaitoh }
    117  1.3  msaitoh 
    118  1.3  msaitoh void
    119  1.3  msaitoh ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
    120  1.3  msaitoh {
    121  1.3  msaitoh 	pcireg_t old;
    122  1.3  msaitoh 
    123  1.3  msaitoh 	switch (reg % 4) {
    124  1.3  msaitoh 	case 0:
    125  1.3  msaitoh 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg) &
    126  1.3  msaitoh 		    __BITS(31, 16);
    127  1.3  msaitoh 		pci_conf_write(hw->back->pc, hw->back->tag, reg, value | old);
    128  1.3  msaitoh 		break;
    129  1.3  msaitoh 	case 2:
    130  1.3  msaitoh 		old = pci_conf_read(hw->back->pc, hw->back->tag, reg - 2) &
    131  1.3  msaitoh 		    __BITS(15, 0);
    132  1.3  msaitoh 		pci_conf_write(hw->back->pc, hw->back->tag, reg - 2,
    133  1.3  msaitoh 		    __SHIFTIN(value, __BITS(31, 16)) | old);
    134  1.3  msaitoh 		break;
    135  1.3  msaitoh 	default:
    136  1.3  msaitoh 		panic("%s: invalid register (%" PRIx32, __func__, reg);
    137  1.3  msaitoh 		break;
    138  1.3  msaitoh 	}
    139  1.3  msaitoh 
    140  1.3  msaitoh 	return;
    141  1.3  msaitoh }
    142  1.3  msaitoh 
    143  1.3  msaitoh /*********************************************************************
    144  1.3  msaitoh  *  Local Function prototypes
    145  1.3  msaitoh  *********************************************************************/
    146  1.1  msaitoh static void	ixgbe_setup_transmit_ring(struct tx_ring *);
    147  1.1  msaitoh static void     ixgbe_free_transmit_buffers(struct tx_ring *);
    148  1.1  msaitoh static int	ixgbe_setup_receive_ring(struct rx_ring *);
    149  1.1  msaitoh static void     ixgbe_free_receive_buffers(struct rx_ring *);
    150  1.1  msaitoh 
    151  1.1  msaitoh static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
    152  1.1  msaitoh 		    struct ixgbe_hw_stats *);
    153  1.1  msaitoh static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
    154  1.1  msaitoh static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
    155  1.1  msaitoh static int	ixgbe_tx_ctx_setup(struct tx_ring *,
    156  1.1  msaitoh 		    struct mbuf *, u32 *, u32 *);
    157  1.1  msaitoh static int	ixgbe_tso_setup(struct tx_ring *,
    158  1.1  msaitoh 		    struct mbuf *, u32 *, u32 *);
    159  1.1  msaitoh #ifdef IXGBE_FDIR
    160  1.1  msaitoh static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
    161  1.1  msaitoh #endif
    162  1.1  msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
    163  1.1  msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
    164  1.1  msaitoh 		    struct mbuf *, u32);
    165  1.1  msaitoh 
    166  1.1  msaitoh static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    167  1.1  msaitoh 
    168  1.1  msaitoh #ifdef IXGBE_LEGACY_TX
    169  1.1  msaitoh /*********************************************************************
    170  1.1  msaitoh  *  Transmit entry point
    171  1.1  msaitoh  *
    172  1.1  msaitoh  *  ixgbe_start is called by the stack to initiate a transmit.
    173  1.1  msaitoh  *  The driver will remain in this routine as long as there are
    174  1.1  msaitoh  *  packets to transmit and transmit resources are available.
    175  1.1  msaitoh  *  In case resources are not available stack is notified and
    176  1.1  msaitoh  *  the packet is requeued.
    177  1.1  msaitoh  **********************************************************************/
    178  1.1  msaitoh 
    179  1.1  msaitoh void
    180  1.1  msaitoh ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    181  1.1  msaitoh {
    182  1.1  msaitoh 	int rc;
    183  1.1  msaitoh 	struct mbuf    *m_head;
    184  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    185  1.1  msaitoh 
    186  1.1  msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    187  1.1  msaitoh 
    188  1.1  msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    189  1.1  msaitoh 		return;
    190  1.1  msaitoh 	if (!adapter->link_active)
    191  1.1  msaitoh 		return;
    192  1.1  msaitoh 
    193  1.1  msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    194  1.1  msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    195  1.1  msaitoh 			break;
    196  1.1  msaitoh 
    197  1.1  msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    198  1.1  msaitoh 		if (m_head == NULL)
    199  1.1  msaitoh 			break;
    200  1.1  msaitoh 
    201  1.1  msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    202  1.1  msaitoh 			break;
    203  1.1  msaitoh 		}
    204  1.1  msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    205  1.1  msaitoh 		if (rc == EFBIG) {
    206  1.1  msaitoh 			struct mbuf *mtmp;
    207  1.1  msaitoh 
    208  1.1  msaitoh 			if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
    209  1.1  msaitoh 				m_head = mtmp;
    210  1.1  msaitoh 				rc = ixgbe_xmit(txr, m_head);
    211  1.1  msaitoh 				if (rc != 0)
    212  1.1  msaitoh 					adapter->efbig2_tx_dma_setup.ev_count++;
    213  1.1  msaitoh 			} else
    214  1.1  msaitoh 				adapter->m_defrag_failed.ev_count++;
    215  1.1  msaitoh 		}
    216  1.1  msaitoh 		if (rc != 0) {
    217  1.1  msaitoh 			m_freem(m_head);
    218  1.1  msaitoh 			continue;
    219  1.1  msaitoh 		}
    220  1.1  msaitoh 
    221  1.1  msaitoh 		/* Send a copy of the frame to the BPF listener */
    222  1.1  msaitoh 		bpf_mtap(ifp, m_head);
    223  1.1  msaitoh 	}
    224  1.1  msaitoh 	return;
    225  1.1  msaitoh }
    226  1.1  msaitoh 
    227  1.1  msaitoh /*
    228  1.1  msaitoh  * Legacy TX start - called by the stack, this
    229  1.1  msaitoh  * always uses the first tx ring, and should
    230  1.1  msaitoh  * not be used with multiqueue tx enabled.
    231  1.1  msaitoh  */
    232  1.1  msaitoh void
    233  1.1  msaitoh ixgbe_start(struct ifnet *ifp)
    234  1.1  msaitoh {
    235  1.1  msaitoh 	struct adapter *adapter = ifp->if_softc;
    236  1.1  msaitoh 	struct tx_ring	*txr = adapter->tx_rings;
    237  1.1  msaitoh 
    238  1.1  msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    239  1.1  msaitoh 		IXGBE_TX_LOCK(txr);
    240  1.1  msaitoh 		ixgbe_start_locked(txr, ifp);
    241  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    242  1.1  msaitoh 	}
    243  1.1  msaitoh 	return;
    244  1.1  msaitoh }
    245  1.1  msaitoh 
    246  1.1  msaitoh #else /* ! IXGBE_LEGACY_TX */
    247  1.1  msaitoh 
    248  1.1  msaitoh /*
    249  1.1  msaitoh ** Multiqueue Transmit driver
    250  1.1  msaitoh **
    251  1.1  msaitoh */
    252  1.1  msaitoh int
    253  1.1  msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    254  1.1  msaitoh {
    255  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
    256  1.1  msaitoh 	struct ix_queue	*que;
    257  1.1  msaitoh 	struct tx_ring	*txr;
    258  1.1  msaitoh 	int 		i, err = 0;
    259  1.1  msaitoh #ifdef	RSS
    260  1.1  msaitoh 	uint32_t bucket_id;
    261  1.1  msaitoh #endif
    262  1.1  msaitoh 
    263  1.1  msaitoh 	/*
    264  1.1  msaitoh 	 * When doing RSS, map it to the same outbound queue
    265  1.1  msaitoh 	 * as the incoming flow would be mapped to.
    266  1.1  msaitoh 	 *
    267  1.1  msaitoh 	 * If everything is setup correctly, it should be the
    268  1.1  msaitoh 	 * same bucket that the current CPU we're on is.
    269  1.1  msaitoh 	 */
    270  1.4  msaitoh #if __FreeBSD_version < 1100054
    271  1.4  msaitoh 	if (m->m_flags & M_FLOWID) {
    272  1.4  msaitoh #else
    273  1.1  msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    274  1.4  msaitoh #endif
    275  1.1  msaitoh #ifdef	RSS
    276  1.1  msaitoh 		if (rss_hash2bucket(m->m_pkthdr.flowid,
    277  1.3  msaitoh 		    M_HASHTYPE_GET(m), &bucket_id) == 0)
    278  1.3  msaitoh 			/* TODO: spit out something if bucket_id > num_queues? */
    279  1.1  msaitoh 			i = bucket_id % adapter->num_queues;
    280  1.3  msaitoh 		else
    281  1.1  msaitoh #endif
    282  1.1  msaitoh 			i = m->m_pkthdr.flowid % adapter->num_queues;
    283  1.3  msaitoh 	} else
    284  1.1  msaitoh 		i = curcpu % adapter->num_queues;
    285  1.3  msaitoh 
    286  1.3  msaitoh 	/* Check for a hung queue and pick alternative */
    287  1.3  msaitoh 	if (((1 << i) & adapter->active_queues) == 0)
    288  1.3  msaitoh 		i = ffsl(adapter->active_queues);
    289  1.1  msaitoh 
    290  1.1  msaitoh 	txr = &adapter->tx_rings[i];
    291  1.1  msaitoh 	que = &adapter->queues[i];
    292  1.1  msaitoh 
    293  1.1  msaitoh 	err = drbr_enqueue(ifp, txr->br, m);
    294  1.1  msaitoh 	if (err)
    295  1.1  msaitoh 		return (err);
    296  1.1  msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    297  1.1  msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    298  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    299  1.1  msaitoh 	} else
    300  1.1  msaitoh 		softint_schedule(txr->txq_si);
    301  1.1  msaitoh 
    302  1.1  msaitoh 	return (0);
    303  1.1  msaitoh }
    304  1.1  msaitoh 
    305  1.1  msaitoh int
    306  1.1  msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    307  1.1  msaitoh {
    308  1.1  msaitoh 	struct adapter  *adapter = txr->adapter;
    309  1.1  msaitoh 	struct mbuf     *next;
    310  1.1  msaitoh 	int             enqueued = 0, err = 0;
    311  1.1  msaitoh 
    312  1.1  msaitoh 	if (((ifp->if_flags & IFF_RUNNING) == 0) ||
    313  1.1  msaitoh 	    adapter->link_active == 0)
    314  1.1  msaitoh 		return (ENETDOWN);
    315  1.1  msaitoh 
    316  1.1  msaitoh 	/* Process the queue */
    317  1.1  msaitoh #if __FreeBSD_version < 901504
    318  1.1  msaitoh 	next = drbr_dequeue(ifp, txr->br);
    319  1.1  msaitoh 	while (next != NULL) {
    320  1.1  msaitoh 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
    321  1.1  msaitoh 			if (next != NULL)
    322  1.1  msaitoh 				err = drbr_enqueue(ifp, txr->br, next);
    323  1.1  msaitoh #else
    324  1.1  msaitoh 	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
    325  1.1  msaitoh 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
    326  1.1  msaitoh 			if (next == NULL) {
    327  1.1  msaitoh 				drbr_advance(ifp, txr->br);
    328  1.1  msaitoh 			} else {
    329  1.1  msaitoh 				drbr_putback(ifp, txr->br, next);
    330  1.1  msaitoh 			}
    331  1.1  msaitoh #endif
    332  1.1  msaitoh 			break;
    333  1.1  msaitoh 		}
    334  1.1  msaitoh #if __FreeBSD_version >= 901504
    335  1.1  msaitoh 		drbr_advance(ifp, txr->br);
    336  1.1  msaitoh #endif
    337  1.1  msaitoh 		enqueued++;
    338  1.3  msaitoh #if 0 // this is VF-only
    339  1.3  msaitoh #if __FreeBSD_version >= 1100036
    340  1.4  msaitoh 		/*
    341  1.4  msaitoh 		 * Since we're looking at the tx ring, we can check
    342  1.4  msaitoh 		 * to see if we're a VF by examing our tail register
    343  1.4  msaitoh 		 * address.
    344  1.4  msaitoh 		 */
    345  1.4  msaitoh 		if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
    346  1.3  msaitoh 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
    347  1.3  msaitoh #endif
    348  1.3  msaitoh #endif
    349  1.1  msaitoh 		/* Send a copy of the frame to the BPF listener */
    350  1.1  msaitoh 		bpf_mtap(ifp, next);
    351  1.1  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    352  1.1  msaitoh 			break;
    353  1.1  msaitoh #if __FreeBSD_version < 901504
    354  1.1  msaitoh 		next = drbr_dequeue(ifp, txr->br);
    355  1.1  msaitoh #endif
    356  1.1  msaitoh 	}
    357  1.1  msaitoh 
    358  1.1  msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
    359  1.1  msaitoh 		ixgbe_txeof(txr);
    360  1.1  msaitoh 
    361  1.1  msaitoh 	return (err);
    362  1.1  msaitoh }
    363  1.1  msaitoh 
    364  1.1  msaitoh /*
    365  1.1  msaitoh  * Called from a taskqueue to drain queued transmit packets.
    366  1.1  msaitoh  */
    367  1.1  msaitoh void
    368  1.1  msaitoh ixgbe_deferred_mq_start(void *arg, int pending)
    369  1.1  msaitoh {
    370  1.1  msaitoh 	struct tx_ring *txr = arg;
    371  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    372  1.1  msaitoh 	struct ifnet *ifp = adapter->ifp;
    373  1.1  msaitoh 
    374  1.1  msaitoh 	IXGBE_TX_LOCK(txr);
    375  1.1  msaitoh 	if (!drbr_empty(ifp, txr->br))
    376  1.1  msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    377  1.1  msaitoh 	IXGBE_TX_UNLOCK(txr);
    378  1.1  msaitoh }
    379  1.1  msaitoh 
    380  1.1  msaitoh /*
    381  1.4  msaitoh  * Flush all ring buffers
    382  1.4  msaitoh  */
    383  1.1  msaitoh void
    384  1.1  msaitoh ixgbe_qflush(struct ifnet *ifp)
    385  1.1  msaitoh {
    386  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
    387  1.1  msaitoh 	struct tx_ring	*txr = adapter->tx_rings;
    388  1.1  msaitoh 	struct mbuf	*m;
    389  1.1  msaitoh 
    390  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    391  1.1  msaitoh 		IXGBE_TX_LOCK(txr);
    392  1.1  msaitoh 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    393  1.1  msaitoh 			m_freem(m);
    394  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    395  1.1  msaitoh 	}
    396  1.1  msaitoh 	if_qflush(ifp);
    397  1.1  msaitoh }
    398  1.1  msaitoh #endif /* IXGBE_LEGACY_TX */
    399  1.1  msaitoh 
    400  1.3  msaitoh 
    401  1.1  msaitoh /*********************************************************************
    402  1.1  msaitoh  *
    403  1.1  msaitoh  *  This routine maps the mbufs to tx descriptors, allowing the
    404  1.1  msaitoh  *  TX engine to transmit the packets.
    405  1.1  msaitoh  *  	- return 0 on success, positive on failure
    406  1.1  msaitoh  *
    407  1.1  msaitoh  **********************************************************************/
    408  1.1  msaitoh 
    409  1.1  msaitoh static int
    410  1.1  msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    411  1.1  msaitoh {
    412  1.1  msaitoh 	struct m_tag *mtag;
    413  1.1  msaitoh 	struct adapter  *adapter = txr->adapter;
    414  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    415  1.1  msaitoh 	u32		olinfo_status = 0, cmd_type_len;
    416  1.1  msaitoh 	int             i, j, error;
    417  1.1  msaitoh 	int		first;
    418  1.1  msaitoh 	bus_dmamap_t	map;
    419  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    420  1.1  msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    421  1.1  msaitoh 
    422  1.1  msaitoh 	/* Basic descriptor defines */
    423  1.1  msaitoh         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    424  1.1  msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    425  1.1  msaitoh 
    426  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
    427  1.1  msaitoh         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    428  1.1  msaitoh 
    429  1.1  msaitoh         /*
    430  1.1  msaitoh          * Important to capture the first descriptor
    431  1.1  msaitoh          * used because it will contain the index of
    432  1.1  msaitoh          * the one we tell the hardware to report back
    433  1.1  msaitoh          */
    434  1.1  msaitoh         first = txr->next_avail_desc;
    435  1.1  msaitoh 	txbuf = &txr->tx_buffers[first];
    436  1.1  msaitoh 	map = txbuf->map;
    437  1.1  msaitoh 
    438  1.1  msaitoh 	/*
    439  1.1  msaitoh 	 * Map the packet for DMA.
    440  1.1  msaitoh 	 */
    441  1.1  msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
    442  1.1  msaitoh 	    m_head, BUS_DMA_NOWAIT);
    443  1.1  msaitoh 
    444  1.1  msaitoh 	if (__predict_false(error)) {
    445  1.1  msaitoh 
    446  1.1  msaitoh 		switch (error) {
    447  1.1  msaitoh 		case EAGAIN:
    448  1.1  msaitoh 			adapter->eagain_tx_dma_setup.ev_count++;
    449  1.1  msaitoh 			return EAGAIN;
    450  1.1  msaitoh 		case ENOMEM:
    451  1.1  msaitoh 			adapter->enomem_tx_dma_setup.ev_count++;
    452  1.1  msaitoh 			return EAGAIN;
    453  1.1  msaitoh 		case EFBIG:
    454  1.1  msaitoh 			/*
    455  1.1  msaitoh 			 * XXX Try it again?
    456  1.1  msaitoh 			 * do m_defrag() and retry bus_dmamap_load_mbuf().
    457  1.1  msaitoh 			 */
    458  1.1  msaitoh 			adapter->efbig_tx_dma_setup.ev_count++;
    459  1.1  msaitoh 			return error;
    460  1.1  msaitoh 		case EINVAL:
    461  1.1  msaitoh 			adapter->einval_tx_dma_setup.ev_count++;
    462  1.1  msaitoh 			return error;
    463  1.1  msaitoh 		default:
    464  1.1  msaitoh 			adapter->other_tx_dma_setup.ev_count++;
    465  1.1  msaitoh 			return error;
    466  1.1  msaitoh 		}
    467  1.1  msaitoh 	}
    468  1.1  msaitoh 
    469  1.1  msaitoh 	/* Make certain there are enough descriptors */
    470  1.1  msaitoh 	if (map->dm_nsegs > txr->tx_avail - 2) {
    471  1.1  msaitoh 		txr->no_desc_avail.ev_count++;
    472  1.1  msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    473  1.1  msaitoh 		return EAGAIN;
    474  1.1  msaitoh 	}
    475  1.1  msaitoh 
    476  1.1  msaitoh 	/*
    477  1.4  msaitoh 	 * Set up the appropriate offload context
    478  1.4  msaitoh 	 * this will consume the first descriptor
    479  1.4  msaitoh 	 */
    480  1.1  msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    481  1.1  msaitoh 	if (__predict_false(error)) {
    482  1.1  msaitoh 		return (error);
    483  1.1  msaitoh 	}
    484  1.1  msaitoh 
    485  1.1  msaitoh #ifdef IXGBE_FDIR
    486  1.1  msaitoh 	/* Do the flow director magic */
    487  1.1  msaitoh 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
    488  1.1  msaitoh 		++txr->atr_count;
    489  1.1  msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    490  1.1  msaitoh 			ixgbe_atr(txr, m_head);
    491  1.1  msaitoh 			txr->atr_count = 0;
    492  1.1  msaitoh 		}
    493  1.1  msaitoh 	}
    494  1.1  msaitoh #endif
    495  1.1  msaitoh 
    496  1.1  msaitoh 	i = txr->next_avail_desc;
    497  1.1  msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    498  1.1  msaitoh 		bus_size_t seglen;
    499  1.1  msaitoh 		bus_addr_t segaddr;
    500  1.1  msaitoh 
    501  1.1  msaitoh 		txbuf = &txr->tx_buffers[i];
    502  1.1  msaitoh 		txd = &txr->tx_base[i];
    503  1.1  msaitoh 		seglen = map->dm_segs[j].ds_len;
    504  1.1  msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    505  1.1  msaitoh 
    506  1.1  msaitoh 		txd->read.buffer_addr = segaddr;
    507  1.1  msaitoh 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
    508  1.1  msaitoh 		    cmd_type_len |seglen);
    509  1.1  msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    510  1.1  msaitoh 
    511  1.1  msaitoh 		if (++i == txr->num_desc)
    512  1.1  msaitoh 			i = 0;
    513  1.1  msaitoh 	}
    514  1.1  msaitoh 
    515  1.1  msaitoh 	txd->read.cmd_type_len |=
    516  1.1  msaitoh 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    517  1.1  msaitoh 	txr->tx_avail -= map->dm_nsegs;
    518  1.1  msaitoh 	txr->next_avail_desc = i;
    519  1.1  msaitoh 
    520  1.1  msaitoh 	txbuf->m_head = m_head;
    521  1.1  msaitoh 	/*
    522  1.4  msaitoh 	 * Here we swap the map so the last descriptor,
    523  1.4  msaitoh 	 * which gets the completion interrupt has the
    524  1.4  msaitoh 	 * real map, and the first descriptor gets the
    525  1.4  msaitoh 	 * unused map from this descriptor.
    526  1.4  msaitoh 	 */
    527  1.1  msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    528  1.1  msaitoh 	txbuf->map = map;
    529  1.1  msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    530  1.1  msaitoh 	    BUS_DMASYNC_PREWRITE);
    531  1.1  msaitoh 
    532  1.1  msaitoh         /* Set the EOP descriptor that will be marked done */
    533  1.1  msaitoh         txbuf = &txr->tx_buffers[first];
    534  1.1  msaitoh 	txbuf->eop = txd;
    535  1.1  msaitoh 
    536  1.1  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    537  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    538  1.1  msaitoh 	/*
    539  1.1  msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    540  1.1  msaitoh 	 * hardware that this frame is available to transmit.
    541  1.1  msaitoh 	 */
    542  1.1  msaitoh 	++txr->total_packets.ev_count;
    543  1.3  msaitoh 	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
    544  1.3  msaitoh 
    545  1.3  msaitoh 	/* Mark queue as having work */
    546  1.3  msaitoh 	if (txr->busy == 0)
    547  1.3  msaitoh 		txr->busy = 1;
    548  1.1  msaitoh 
    549  1.1  msaitoh 	return 0;
    550  1.1  msaitoh }
    551  1.1  msaitoh 
    552  1.1  msaitoh /*********************************************************************
    553  1.1  msaitoh  *
    554  1.1  msaitoh  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
    555  1.1  msaitoh  *  the information needed to transmit a packet on the wire. This is
    556  1.1  msaitoh  *  called only once at attach, setup is done every reset.
    557  1.1  msaitoh  *
    558  1.1  msaitoh  **********************************************************************/
    559  1.1  msaitoh int
    560  1.1  msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    561  1.1  msaitoh {
    562  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    563  1.1  msaitoh 	device_t dev = adapter->dev;
    564  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    565  1.1  msaitoh 	int error, i;
    566  1.1  msaitoh 
    567  1.1  msaitoh 	/*
    568  1.1  msaitoh 	 * Setup DMA descriptor areas.
    569  1.1  msaitoh 	 */
    570  1.1  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
    571  1.1  msaitoh 			       1, 0,		/* alignment, bounds */
    572  1.1  msaitoh 			       IXGBE_TSO_SIZE,		/* maxsize */
    573  1.1  msaitoh 			       adapter->num_segs,	/* nsegments */
    574  1.1  msaitoh 			       PAGE_SIZE,		/* maxsegsize */
    575  1.1  msaitoh 			       0,			/* flags */
    576  1.1  msaitoh 			       &txr->txtag))) {
    577  1.1  msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    578  1.1  msaitoh 		goto fail;
    579  1.1  msaitoh 	}
    580  1.1  msaitoh 
    581  1.1  msaitoh 	if (!(txr->tx_buffers =
    582  1.1  msaitoh 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
    583  1.1  msaitoh 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
    584  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
    585  1.1  msaitoh 		error = ENOMEM;
    586  1.1  msaitoh 		goto fail;
    587  1.1  msaitoh 	}
    588  1.1  msaitoh 
    589  1.1  msaitoh         /* Create the descriptor buffer dma maps */
    590  1.1  msaitoh 	txbuf = txr->tx_buffers;
    591  1.1  msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
    592  1.1  msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    593  1.1  msaitoh 		if (error != 0) {
    594  1.1  msaitoh 			aprint_error_dev(dev,
    595  1.1  msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    596  1.1  msaitoh 			goto fail;
    597  1.1  msaitoh 		}
    598  1.1  msaitoh 	}
    599  1.1  msaitoh 
    600  1.1  msaitoh 	return 0;
    601  1.1  msaitoh fail:
    602  1.1  msaitoh 	/* We free all, it handles case where we are in the middle */
    603  1.1  msaitoh 	ixgbe_free_transmit_structures(adapter);
    604  1.1  msaitoh 	return (error);
    605  1.1  msaitoh }
    606  1.1  msaitoh 
    607  1.1  msaitoh /*********************************************************************
    608  1.1  msaitoh  *
    609  1.1  msaitoh  *  Initialize a transmit ring.
    610  1.1  msaitoh  *
    611  1.1  msaitoh  **********************************************************************/
    612  1.1  msaitoh static void
    613  1.1  msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    614  1.1  msaitoh {
    615  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    616  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    617  1.1  msaitoh #ifdef DEV_NETMAP
    618  1.1  msaitoh 	struct netmap_adapter *na = NA(adapter->ifp);
    619  1.1  msaitoh 	struct netmap_slot *slot;
    620  1.1  msaitoh #endif /* DEV_NETMAP */
    621  1.1  msaitoh 
    622  1.1  msaitoh 	/* Clear the old ring contents */
    623  1.1  msaitoh 	IXGBE_TX_LOCK(txr);
    624  1.1  msaitoh #ifdef DEV_NETMAP
    625  1.1  msaitoh 	/*
    626  1.1  msaitoh 	 * (under lock): if in netmap mode, do some consistency
    627  1.1  msaitoh 	 * checks and set slot to entry 0 of the netmap ring.
    628  1.1  msaitoh 	 */
    629  1.1  msaitoh 	slot = netmap_reset(na, NR_TX, txr->me, 0);
    630  1.1  msaitoh #endif /* DEV_NETMAP */
    631  1.1  msaitoh 	bzero((void *)txr->tx_base,
    632  1.1  msaitoh 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
    633  1.1  msaitoh 	/* Reset indices */
    634  1.1  msaitoh 	txr->next_avail_desc = 0;
    635  1.1  msaitoh 	txr->next_to_clean = 0;
    636  1.1  msaitoh 
    637  1.1  msaitoh 	/* Free any existing tx buffers. */
    638  1.1  msaitoh         txbuf = txr->tx_buffers;
    639  1.5  msaitoh 	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
    640  1.1  msaitoh 		if (txbuf->m_head != NULL) {
    641  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    642  1.1  msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    643  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
    644  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    645  1.1  msaitoh 			m_freem(txbuf->m_head);
    646  1.1  msaitoh 			txbuf->m_head = NULL;
    647  1.1  msaitoh 		}
    648  1.1  msaitoh #ifdef DEV_NETMAP
    649  1.1  msaitoh 		/*
    650  1.1  msaitoh 		 * In netmap mode, set the map for the packet buffer.
    651  1.1  msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    652  1.1  msaitoh 		 * the physical buffer address in the NIC ring.
    653  1.1  msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    654  1.1  msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    655  1.1  msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    656  1.1  msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    657  1.1  msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    658  1.1  msaitoh 		 */
    659  1.1  msaitoh 		if (slot) {
    660  1.1  msaitoh 			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
    661  1.5  msaitoh 			netmap_load_map(na, txr->txtag,
    662  1.5  msaitoh 			    txbuf->map, NMB(na, slot + si));
    663  1.1  msaitoh 		}
    664  1.1  msaitoh #endif /* DEV_NETMAP */
    665  1.1  msaitoh 		/* Clear the EOP descriptor pointer */
    666  1.1  msaitoh 		txbuf->eop = NULL;
    667  1.1  msaitoh         }
    668  1.1  msaitoh 
    669  1.1  msaitoh #ifdef IXGBE_FDIR
    670  1.1  msaitoh 	/* Set the rate at which we sample packets */
    671  1.1  msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
    672  1.1  msaitoh 		txr->atr_sample = atr_sample_rate;
    673  1.1  msaitoh #endif
    674  1.1  msaitoh 
    675  1.1  msaitoh 	/* Set number of descriptors available */
    676  1.1  msaitoh 	txr->tx_avail = adapter->num_tx_desc;
    677  1.1  msaitoh 
    678  1.1  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    679  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    680  1.1  msaitoh 	IXGBE_TX_UNLOCK(txr);
    681  1.1  msaitoh }
    682  1.1  msaitoh 
    683  1.1  msaitoh /*********************************************************************
    684  1.1  msaitoh  *
    685  1.1  msaitoh  *  Initialize all transmit rings.
    686  1.1  msaitoh  *
    687  1.1  msaitoh  **********************************************************************/
    688  1.1  msaitoh int
    689  1.1  msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
    690  1.1  msaitoh {
    691  1.1  msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    692  1.1  msaitoh 
    693  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++)
    694  1.1  msaitoh 		ixgbe_setup_transmit_ring(txr);
    695  1.1  msaitoh 
    696  1.1  msaitoh 	return (0);
    697  1.1  msaitoh }
    698  1.1  msaitoh 
    699  1.1  msaitoh /*********************************************************************
    700  1.1  msaitoh  *
    701  1.1  msaitoh  *  Free all transmit rings.
    702  1.1  msaitoh  *
    703  1.1  msaitoh  **********************************************************************/
    704  1.1  msaitoh void
    705  1.1  msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
    706  1.1  msaitoh {
    707  1.1  msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    708  1.1  msaitoh 
    709  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    710  1.1  msaitoh 		ixgbe_free_transmit_buffers(txr);
    711  1.1  msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
    712  1.1  msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    713  1.1  msaitoh 	}
    714  1.1  msaitoh 	free(adapter->tx_rings, M_DEVBUF);
    715  1.1  msaitoh }
    716  1.1  msaitoh 
    717  1.1  msaitoh /*********************************************************************
    718  1.1  msaitoh  *
    719  1.1  msaitoh  *  Free transmit ring related data structures.
    720  1.1  msaitoh  *
    721  1.1  msaitoh  **********************************************************************/
    722  1.1  msaitoh static void
    723  1.1  msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    724  1.1  msaitoh {
    725  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    726  1.1  msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    727  1.1  msaitoh 	int             i;
    728  1.1  msaitoh 
    729  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
    730  1.1  msaitoh 
    731  1.1  msaitoh 	if (txr->tx_buffers == NULL)
    732  1.1  msaitoh 		return;
    733  1.1  msaitoh 
    734  1.1  msaitoh 	tx_buffer = txr->tx_buffers;
    735  1.1  msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
    736  1.1  msaitoh 		if (tx_buffer->m_head != NULL) {
    737  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    738  1.1  msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    739  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
    740  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    741  1.1  msaitoh 			m_freem(tx_buffer->m_head);
    742  1.1  msaitoh 			tx_buffer->m_head = NULL;
    743  1.1  msaitoh 			if (tx_buffer->map != NULL) {
    744  1.1  msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    745  1.1  msaitoh 				    tx_buffer->map);
    746  1.1  msaitoh 				tx_buffer->map = NULL;
    747  1.1  msaitoh 			}
    748  1.1  msaitoh 		} else if (tx_buffer->map != NULL) {
    749  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    750  1.1  msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    751  1.1  msaitoh 			tx_buffer->map = NULL;
    752  1.1  msaitoh 		}
    753  1.1  msaitoh 	}
    754  1.1  msaitoh #ifndef IXGBE_LEGACY_TX
    755  1.1  msaitoh 	if (txr->br != NULL)
    756  1.1  msaitoh 		buf_ring_free(txr->br, M_DEVBUF);
    757  1.1  msaitoh #endif
    758  1.1  msaitoh 	if (txr->tx_buffers != NULL) {
    759  1.1  msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    760  1.1  msaitoh 		txr->tx_buffers = NULL;
    761  1.1  msaitoh 	}
    762  1.1  msaitoh 	if (txr->txtag != NULL) {
    763  1.1  msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    764  1.1  msaitoh 		txr->txtag = NULL;
    765  1.1  msaitoh 	}
    766  1.1  msaitoh 	return;
    767  1.1  msaitoh }
    768  1.1  msaitoh 
    769  1.1  msaitoh /*********************************************************************
    770  1.1  msaitoh  *
    771  1.1  msaitoh  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
    772  1.1  msaitoh  *
    773  1.1  msaitoh  **********************************************************************/
    774  1.1  msaitoh 
    775  1.1  msaitoh static int
    776  1.1  msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    777  1.1  msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    778  1.1  msaitoh {
    779  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    780  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    781  1.4  msaitoh 	struct m_tag *mtag;
    782  1.1  msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    783  1.1  msaitoh 	struct ether_vlan_header *eh;
    784  1.1  msaitoh 	struct ip ip;
    785  1.1  msaitoh 	struct ip6_hdr ip6;
    786  1.1  msaitoh 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
    787  1.1  msaitoh 	int	ehdrlen, ip_hlen = 0;
    788  1.1  msaitoh 	u16	etype;
    789  1.1  msaitoh 	u8	ipproto __diagused = 0;
    790  1.1  msaitoh 	int	offload = TRUE;
    791  1.1  msaitoh 	int	ctxd = txr->next_avail_desc;
    792  1.1  msaitoh 	u16	vtag = 0;
    793  1.1  msaitoh 
    794  1.1  msaitoh 	/* First check if TSO is to be used */
    795  1.1  msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6))
    796  1.1  msaitoh 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
    797  1.1  msaitoh 
    798  1.1  msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    799  1.1  msaitoh 		offload = FALSE;
    800  1.1  msaitoh 
    801  1.1  msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    802  1.1  msaitoh        	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    803  1.1  msaitoh 
    804  1.1  msaitoh 	/* Now ready a context descriptor */
    805  1.1  msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
    806  1.1  msaitoh 
    807  1.1  msaitoh 	/*
    808  1.1  msaitoh 	** In advanced descriptors the vlan tag must
    809  1.1  msaitoh 	** be placed into the context descriptor. Hence
    810  1.1  msaitoh 	** we need to make one even if not doing offloads.
    811  1.1  msaitoh 	*/
    812  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
    813  1.1  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
    814  1.1  msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    815  1.5  msaitoh 	} else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
    816  1.4  msaitoh 		return (0);
    817  1.1  msaitoh 
    818  1.1  msaitoh 	/*
    819  1.1  msaitoh 	 * Determine where frame payload starts.
    820  1.1  msaitoh 	 * Jump over vlan headers if already present,
    821  1.1  msaitoh 	 * helpful for QinQ too.
    822  1.1  msaitoh 	 */
    823  1.1  msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    824  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    825  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    826  1.1  msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    827  1.1  msaitoh 		etype = ntohs(eh->evl_proto);
    828  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    829  1.1  msaitoh 	} else {
    830  1.1  msaitoh 		etype = ntohs(eh->evl_encap_proto);
    831  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
    832  1.1  msaitoh 	}
    833  1.1  msaitoh 
    834  1.1  msaitoh 	/* Set the ether header length */
    835  1.1  msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    836  1.1  msaitoh 
    837  1.3  msaitoh 	if (offload == FALSE)
    838  1.3  msaitoh 		goto no_offloads;
    839  1.3  msaitoh 
    840  1.1  msaitoh 	switch (etype) {
    841  1.1  msaitoh 	case ETHERTYPE_IP:
    842  1.1  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
    843  1.1  msaitoh 		ip_hlen = ip.ip_hl << 2;
    844  1.1  msaitoh 		ipproto = ip.ip_p;
    845  1.1  msaitoh #if 0
    846  1.1  msaitoh 		ip.ip_sum = 0;
    847  1.1  msaitoh 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
    848  1.1  msaitoh #else
    849  1.1  msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    850  1.1  msaitoh 		    ip.ip_sum == 0);
    851  1.1  msaitoh #endif
    852  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    853  1.1  msaitoh 		break;
    854  1.1  msaitoh 	case ETHERTYPE_IPV6:
    855  1.1  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
    856  1.1  msaitoh 		ip_hlen = sizeof(ip6);
    857  1.1  msaitoh 		/* XXX-BZ this will go badly in case of ext hdrs. */
    858  1.1  msaitoh 		ipproto = ip6.ip6_nxt;
    859  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    860  1.1  msaitoh 		break;
    861  1.1  msaitoh 	default:
    862  1.1  msaitoh 		break;
    863  1.1  msaitoh 	}
    864  1.1  msaitoh 
    865  1.1  msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    866  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    867  1.1  msaitoh 
    868  1.1  msaitoh 	vlan_macip_lens |= ip_hlen;
    869  1.1  msaitoh 
    870  1.1  msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
    871  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    872  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    873  1.1  msaitoh 		KASSERT(ipproto == IPPROTO_TCP);
    874  1.1  msaitoh 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
    875  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    876  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    877  1.1  msaitoh 		KASSERT(ipproto == IPPROTO_UDP);
    878  1.1  msaitoh 	}
    879  1.1  msaitoh 
    880  1.3  msaitoh no_offloads:
    881  1.3  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    882  1.3  msaitoh 
    883  1.1  msaitoh 	/* Now copy bits into descriptor */
    884  1.1  msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    885  1.1  msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    886  1.1  msaitoh 	TXD->seqnum_seed = htole32(0);
    887  1.1  msaitoh 	TXD->mss_l4len_idx = htole32(0);
    888  1.1  msaitoh 
    889  1.1  msaitoh 	/* We've consumed the first desc, adjust counters */
    890  1.1  msaitoh 	if (++ctxd == txr->num_desc)
    891  1.1  msaitoh 		ctxd = 0;
    892  1.1  msaitoh 	txr->next_avail_desc = ctxd;
    893  1.1  msaitoh 	--txr->tx_avail;
    894  1.1  msaitoh 
    895  1.1  msaitoh         return 0;
    896  1.1  msaitoh }
    897  1.1  msaitoh 
    898  1.1  msaitoh /**********************************************************************
    899  1.1  msaitoh  *
    900  1.1  msaitoh  *  Setup work for hardware segmentation offload (TSO) on
    901  1.1  msaitoh  *  adapters using advanced tx descriptors
    902  1.1  msaitoh  *
    903  1.1  msaitoh  **********************************************************************/
    904  1.1  msaitoh static int
    905  1.1  msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
    906  1.1  msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    907  1.1  msaitoh {
    908  1.1  msaitoh 	struct m_tag *mtag;
    909  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    910  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    911  1.1  msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    912  1.1  msaitoh 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
    913  1.1  msaitoh 	u32 mss_l4len_idx = 0, paylen;
    914  1.1  msaitoh 	u16 vtag = 0, eh_type;
    915  1.1  msaitoh 	int ctxd, ehdrlen, ip_hlen, tcp_hlen;
    916  1.1  msaitoh 	struct ether_vlan_header *eh;
    917  1.1  msaitoh #ifdef INET6
    918  1.1  msaitoh 	struct ip6_hdr *ip6;
    919  1.1  msaitoh #endif
    920  1.1  msaitoh #ifdef INET
    921  1.1  msaitoh 	struct ip *ip;
    922  1.1  msaitoh #endif
    923  1.1  msaitoh 	struct tcphdr *th;
    924  1.1  msaitoh 
    925  1.1  msaitoh 
    926  1.1  msaitoh 	/*
    927  1.1  msaitoh 	 * Determine where frame payload starts.
    928  1.1  msaitoh 	 * Jump over vlan headers if already present
    929  1.1  msaitoh 	 */
    930  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    931  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    932  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    933  1.1  msaitoh 		eh_type = eh->evl_proto;
    934  1.1  msaitoh 	} else {
    935  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
    936  1.1  msaitoh 		eh_type = eh->evl_encap_proto;
    937  1.1  msaitoh 	}
    938  1.1  msaitoh 
    939  1.1  msaitoh 	switch (ntohs(eh_type)) {
    940  1.1  msaitoh #ifdef INET6
    941  1.1  msaitoh 	case ETHERTYPE_IPV6:
    942  1.1  msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    943  1.1  msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
    944  1.1  msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
    945  1.1  msaitoh 			return (ENXIO);
    946  1.1  msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    947  1.1  msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    948  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
    949  1.1  msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
    950  1.1  msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
    951  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    952  1.1  msaitoh 		break;
    953  1.1  msaitoh #endif
    954  1.1  msaitoh #ifdef INET
    955  1.1  msaitoh 	case ETHERTYPE_IP:
    956  1.1  msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
    957  1.1  msaitoh 		if (ip->ip_p != IPPROTO_TCP)
    958  1.1  msaitoh 			return (ENXIO);
    959  1.1  msaitoh 		ip->ip_sum = 0;
    960  1.1  msaitoh 		ip_hlen = ip->ip_hl << 2;
    961  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
    962  1.1  msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
    963  1.1  msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
    964  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    965  1.1  msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
    966  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    967  1.1  msaitoh 		break;
    968  1.1  msaitoh #endif
    969  1.1  msaitoh 	default:
    970  1.1  msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
    971  1.1  msaitoh 		    __func__, ntohs(eh_type));
    972  1.1  msaitoh 		break;
    973  1.1  msaitoh 	}
    974  1.1  msaitoh 
    975  1.1  msaitoh 	ctxd = txr->next_avail_desc;
    976  1.1  msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
    977  1.1  msaitoh 
    978  1.1  msaitoh 	tcp_hlen = th->th_off << 2;
    979  1.1  msaitoh 
    980  1.1  msaitoh 	/* This is used in the transmit desc in encap */
    981  1.1  msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
    982  1.1  msaitoh 
    983  1.1  msaitoh 	/* VLAN MACLEN IPLEN */
    984  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
    985  1.1  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
    986  1.1  msaitoh                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    987  1.1  msaitoh 	}
    988  1.1  msaitoh 
    989  1.1  msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    990  1.1  msaitoh 	vlan_macip_lens |= ip_hlen;
    991  1.1  msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    992  1.1  msaitoh 
    993  1.1  msaitoh 	/* ADV DTYPE TUCMD */
    994  1.1  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    995  1.1  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    996  1.1  msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    997  1.1  msaitoh 
    998  1.1  msaitoh 	/* MSS L4LEN IDX */
    999  1.1  msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
   1000  1.1  msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
   1001  1.1  msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
   1002  1.1  msaitoh 
   1003  1.1  msaitoh 	TXD->seqnum_seed = htole32(0);
   1004  1.1  msaitoh 
   1005  1.1  msaitoh 	if (++ctxd == txr->num_desc)
   1006  1.1  msaitoh 		ctxd = 0;
   1007  1.1  msaitoh 
   1008  1.1  msaitoh 	txr->tx_avail--;
   1009  1.1  msaitoh 	txr->next_avail_desc = ctxd;
   1010  1.1  msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
   1011  1.1  msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
   1012  1.1  msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
   1013  1.1  msaitoh 	++txr->tso_tx.ev_count;
   1014  1.1  msaitoh 	return (0);
   1015  1.1  msaitoh }
   1016  1.1  msaitoh 
   1017  1.3  msaitoh 
   1018  1.1  msaitoh /**********************************************************************
   1019  1.1  msaitoh  *
   1020  1.1  msaitoh  *  Examine each tx_buffer in the used queue. If the hardware is done
   1021  1.1  msaitoh  *  processing the packet then free associated resources. The
   1022  1.1  msaitoh  *  tx_buffer is put back on the free queue.
   1023  1.1  msaitoh  *
   1024  1.1  msaitoh  **********************************************************************/
   1025  1.1  msaitoh void
   1026  1.1  msaitoh ixgbe_txeof(struct tx_ring *txr)
   1027  1.1  msaitoh {
   1028  1.1  msaitoh 	struct adapter		*adapter = txr->adapter;
   1029  1.1  msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1030  1.1  msaitoh 	u32			work, processed = 0;
   1031  1.1  msaitoh 	u16			limit = txr->process_limit;
   1032  1.1  msaitoh 	struct ixgbe_tx_buf	*buf;
   1033  1.1  msaitoh 	union ixgbe_adv_tx_desc *txd;
   1034  1.1  msaitoh 
   1035  1.1  msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
   1036  1.1  msaitoh 
   1037  1.1  msaitoh #ifdef DEV_NETMAP
   1038  1.1  msaitoh 	if (ifp->if_capenable & IFCAP_NETMAP) {
   1039  1.1  msaitoh 		struct netmap_adapter *na = NA(ifp);
   1040  1.1  msaitoh 		struct netmap_kring *kring = &na->tx_rings[txr->me];
   1041  1.1  msaitoh 		txd = txr->tx_base;
   1042  1.1  msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1043  1.1  msaitoh 		    BUS_DMASYNC_POSTREAD);
   1044  1.1  msaitoh 		/*
   1045  1.1  msaitoh 		 * In netmap mode, all the work is done in the context
   1046  1.1  msaitoh 		 * of the client thread. Interrupt handlers only wake up
   1047  1.1  msaitoh 		 * clients, which may be sleeping on individual rings
   1048  1.1  msaitoh 		 * or on a global resource for all rings.
   1049  1.1  msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
   1050  1.1  msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
   1051  1.1  msaitoh 		 * more frequently. This is implemented as follows:
   1052  1.1  msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
   1053  1.1  msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
   1054  1.1  msaitoh 		 *   means the user thread should not be woken up);
   1055  1.1  msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
   1056  1.1  msaitoh 		 *   or the slot has the DD bit set.
   1057  1.1  msaitoh 		 */
   1058  1.1  msaitoh 		if (!netmap_mitigate ||
   1059  1.1  msaitoh 		    (kring->nr_kflags < kring->nkr_num_slots &&
   1060  1.1  msaitoh 		    txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
   1061  1.1  msaitoh 			netmap_tx_irq(ifp, txr->me);
   1062  1.1  msaitoh 		}
   1063  1.1  msaitoh 		return;
   1064  1.1  msaitoh 	}
   1065  1.1  msaitoh #endif /* DEV_NETMAP */
   1066  1.1  msaitoh 
   1067  1.1  msaitoh 	if (txr->tx_avail == txr->num_desc) {
   1068  1.3  msaitoh 		txr->busy = 0;
   1069  1.1  msaitoh 		return;
   1070  1.1  msaitoh 	}
   1071  1.1  msaitoh 
   1072  1.1  msaitoh 	/* Get work starting point */
   1073  1.1  msaitoh 	work = txr->next_to_clean;
   1074  1.1  msaitoh 	buf = &txr->tx_buffers[work];
   1075  1.1  msaitoh 	txd = &txr->tx_base[work];
   1076  1.1  msaitoh 	work -= txr->num_desc; /* The distance to ring end */
   1077  1.1  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1078  1.1  msaitoh 	    BUS_DMASYNC_POSTREAD);
   1079  1.1  msaitoh 	do {
   1080  1.1  msaitoh 		union ixgbe_adv_tx_desc *eop= buf->eop;
   1081  1.1  msaitoh 		if (eop == NULL) /* No work */
   1082  1.1  msaitoh 			break;
   1083  1.1  msaitoh 
   1084  1.1  msaitoh 		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
   1085  1.1  msaitoh 			break;	/* I/O not complete */
   1086  1.1  msaitoh 
   1087  1.1  msaitoh 		if (buf->m_head) {
   1088  1.1  msaitoh 			txr->bytes +=
   1089  1.1  msaitoh 			    buf->m_head->m_pkthdr.len;
   1090  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat,
   1091  1.1  msaitoh 			    buf->map,
   1092  1.1  msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1093  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1094  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag,
   1095  1.1  msaitoh 			    buf->map);
   1096  1.1  msaitoh 			m_freem(buf->m_head);
   1097  1.1  msaitoh 			buf->m_head = NULL;
   1098  1.1  msaitoh 		}
   1099  1.1  msaitoh 		buf->eop = NULL;
   1100  1.1  msaitoh 		++txr->tx_avail;
   1101  1.1  msaitoh 
   1102  1.1  msaitoh 		/* We clean the range if multi segment */
   1103  1.1  msaitoh 		while (txd != eop) {
   1104  1.1  msaitoh 			++txd;
   1105  1.1  msaitoh 			++buf;
   1106  1.1  msaitoh 			++work;
   1107  1.1  msaitoh 			/* wrap the ring? */
   1108  1.1  msaitoh 			if (__predict_false(!work)) {
   1109  1.1  msaitoh 				work -= txr->num_desc;
   1110  1.1  msaitoh 				buf = txr->tx_buffers;
   1111  1.1  msaitoh 				txd = txr->tx_base;
   1112  1.1  msaitoh 			}
   1113  1.1  msaitoh 			if (buf->m_head) {
   1114  1.1  msaitoh 				txr->bytes +=
   1115  1.1  msaitoh 				    buf->m_head->m_pkthdr.len;
   1116  1.1  msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1117  1.1  msaitoh 				    buf->map,
   1118  1.1  msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1119  1.1  msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1120  1.1  msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1121  1.1  msaitoh 				    buf->map);
   1122  1.1  msaitoh 				m_freem(buf->m_head);
   1123  1.1  msaitoh 				buf->m_head = NULL;
   1124  1.1  msaitoh 			}
   1125  1.1  msaitoh 			++txr->tx_avail;
   1126  1.1  msaitoh 			buf->eop = NULL;
   1127  1.1  msaitoh 
   1128  1.1  msaitoh 		}
   1129  1.1  msaitoh 		++txr->packets;
   1130  1.1  msaitoh 		++processed;
   1131  1.1  msaitoh 		++ifp->if_opackets;
   1132  1.1  msaitoh 
   1133  1.1  msaitoh 		/* Try the next packet */
   1134  1.1  msaitoh 		++txd;
   1135  1.1  msaitoh 		++buf;
   1136  1.1  msaitoh 		++work;
   1137  1.1  msaitoh 		/* reset with a wrap */
   1138  1.1  msaitoh 		if (__predict_false(!work)) {
   1139  1.1  msaitoh 			work -= txr->num_desc;
   1140  1.1  msaitoh 			buf = txr->tx_buffers;
   1141  1.1  msaitoh 			txd = txr->tx_base;
   1142  1.1  msaitoh 		}
   1143  1.1  msaitoh 		prefetch(txd);
   1144  1.1  msaitoh 	} while (__predict_true(--limit));
   1145  1.1  msaitoh 
   1146  1.1  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1147  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1148  1.1  msaitoh 
   1149  1.1  msaitoh 	work += txr->num_desc;
   1150  1.1  msaitoh 	txr->next_to_clean = work;
   1151  1.1  msaitoh 
   1152  1.1  msaitoh 	/*
   1153  1.3  msaitoh 	** Queue Hang detection, we know there's
   1154  1.1  msaitoh 	** work outstanding or the first return
   1155  1.3  msaitoh 	** would have been taken, so increment busy
   1156  1.3  msaitoh 	** if nothing managed to get cleaned, then
   1157  1.3  msaitoh 	** in local_timer it will be checked and
   1158  1.3  msaitoh 	** marked as HUNG if it exceeds a MAX attempt.
   1159  1.1  msaitoh 	*/
   1160  1.3  msaitoh 	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
   1161  1.3  msaitoh 		++txr->busy;
   1162  1.3  msaitoh 	/*
   1163  1.3  msaitoh 	** If anything gets cleaned we reset state to 1,
   1164  1.3  msaitoh 	** note this will turn off HUNG if its set.
   1165  1.3  msaitoh 	*/
   1166  1.3  msaitoh 	if (processed)
   1167  1.3  msaitoh 		txr->busy = 1;
   1168  1.1  msaitoh 
   1169  1.1  msaitoh 	if (txr->tx_avail == txr->num_desc)
   1170  1.3  msaitoh 		txr->busy = 0;
   1171  1.1  msaitoh 
   1172  1.1  msaitoh 	return;
   1173  1.1  msaitoh }
   1174  1.1  msaitoh 
   1175  1.3  msaitoh 
   1176  1.1  msaitoh #ifdef IXGBE_FDIR
   1177  1.1  msaitoh /*
   1178  1.1  msaitoh ** This routine parses packet headers so that Flow
   1179  1.1  msaitoh ** Director can make a hashed filter table entry
   1180  1.1  msaitoh ** allowing traffic flows to be identified and kept
   1181  1.1  msaitoh ** on the same cpu.  This would be a performance
   1182  1.1  msaitoh ** hit, but we only do it at IXGBE_FDIR_RATE of
   1183  1.1  msaitoh ** packets.
   1184  1.1  msaitoh */
   1185  1.1  msaitoh static void
   1186  1.1  msaitoh ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   1187  1.1  msaitoh {
   1188  1.1  msaitoh 	struct adapter			*adapter = txr->adapter;
   1189  1.1  msaitoh 	struct ix_queue			*que;
   1190  1.1  msaitoh 	struct ip			*ip;
   1191  1.1  msaitoh 	struct tcphdr			*th;
   1192  1.1  msaitoh 	struct udphdr			*uh;
   1193  1.1  msaitoh 	struct ether_vlan_header	*eh;
   1194  1.1  msaitoh 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   1195  1.1  msaitoh 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   1196  1.1  msaitoh 	int  				ehdrlen, ip_hlen;
   1197  1.1  msaitoh 	u16				etype;
   1198  1.1  msaitoh 
   1199  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
   1200  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   1201  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1202  1.1  msaitoh 		etype = eh->evl_proto;
   1203  1.1  msaitoh 	} else {
   1204  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1205  1.1  msaitoh 		etype = eh->evl_encap_proto;
   1206  1.1  msaitoh 	}
   1207  1.1  msaitoh 
   1208  1.1  msaitoh 	/* Only handling IPv4 */
   1209  1.1  msaitoh 	if (etype != htons(ETHERTYPE_IP))
   1210  1.1  msaitoh 		return;
   1211  1.1  msaitoh 
   1212  1.1  msaitoh 	ip = (struct ip *)(mp->m_data + ehdrlen);
   1213  1.1  msaitoh 	ip_hlen = ip->ip_hl << 2;
   1214  1.1  msaitoh 
   1215  1.1  msaitoh 	/* check if we're UDP or TCP */
   1216  1.1  msaitoh 	switch (ip->ip_p) {
   1217  1.1  msaitoh 	case IPPROTO_TCP:
   1218  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1219  1.1  msaitoh 		/* src and dst are inverted */
   1220  1.1  msaitoh 		common.port.dst ^= th->th_sport;
   1221  1.1  msaitoh 		common.port.src ^= th->th_dport;
   1222  1.1  msaitoh 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   1223  1.1  msaitoh 		break;
   1224  1.1  msaitoh 	case IPPROTO_UDP:
   1225  1.1  msaitoh 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   1226  1.1  msaitoh 		/* src and dst are inverted */
   1227  1.1  msaitoh 		common.port.dst ^= uh->uh_sport;
   1228  1.1  msaitoh 		common.port.src ^= uh->uh_dport;
   1229  1.1  msaitoh 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   1230  1.1  msaitoh 		break;
   1231  1.1  msaitoh 	default:
   1232  1.1  msaitoh 		return;
   1233  1.1  msaitoh 	}
   1234  1.1  msaitoh 
   1235  1.1  msaitoh 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   1236  1.1  msaitoh 	if (mp->m_pkthdr.ether_vtag)
   1237  1.1  msaitoh 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   1238  1.1  msaitoh 	else
   1239  1.1  msaitoh 		common.flex_bytes ^= etype;
   1240  1.1  msaitoh 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   1241  1.1  msaitoh 
   1242  1.1  msaitoh 	que = &adapter->queues[txr->me];
   1243  1.1  msaitoh 	/*
   1244  1.1  msaitoh 	** This assumes the Rx queue and Tx
   1245  1.1  msaitoh 	** queue are bound to the same CPU
   1246  1.1  msaitoh 	*/
   1247  1.1  msaitoh 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   1248  1.1  msaitoh 	    input, common, que->msix);
   1249  1.1  msaitoh }
   1250  1.1  msaitoh #endif /* IXGBE_FDIR */
   1251  1.1  msaitoh 
   1252  1.1  msaitoh /*
   1253  1.1  msaitoh ** Used to detect a descriptor that has
   1254  1.1  msaitoh ** been merged by Hardware RSC.
   1255  1.1  msaitoh */
   1256  1.1  msaitoh static inline u32
   1257  1.1  msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1258  1.1  msaitoh {
   1259  1.1  msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1260  1.1  msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1261  1.1  msaitoh }
   1262  1.1  msaitoh 
   1263  1.1  msaitoh /*********************************************************************
   1264  1.1  msaitoh  *
   1265  1.1  msaitoh  *  Initialize Hardware RSC (LRO) feature on 82599
   1266  1.1  msaitoh  *  for an RX ring, this is toggled by the LRO capability
   1267  1.1  msaitoh  *  even though it is transparent to the stack.
   1268  1.1  msaitoh  *
   1269  1.1  msaitoh  *  NOTE: since this HW feature only works with IPV4 and
   1270  1.1  msaitoh  *        our testing has shown soft LRO to be as effective
   1271  1.1  msaitoh  *        I have decided to disable this by default.
   1272  1.1  msaitoh  *
   1273  1.1  msaitoh  **********************************************************************/
   1274  1.1  msaitoh static void
   1275  1.1  msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1276  1.1  msaitoh {
   1277  1.1  msaitoh 	struct	adapter 	*adapter = rxr->adapter;
   1278  1.1  msaitoh 	struct	ixgbe_hw	*hw = &adapter->hw;
   1279  1.1  msaitoh 	u32			rscctrl, rdrxctl;
   1280  1.1  msaitoh 
   1281  1.1  msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1282  1.1  msaitoh 	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
   1283  1.1  msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1284  1.1  msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1285  1.1  msaitoh 		return;
   1286  1.1  msaitoh 	}
   1287  1.1  msaitoh 
   1288  1.1  msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1289  1.1  msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1290  1.1  msaitoh #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
   1291  1.1  msaitoh 	if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
   1292  1.1  msaitoh #endif /* DEV_NETMAP */
   1293  1.1  msaitoh 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1294  1.1  msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1295  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1296  1.1  msaitoh 
   1297  1.1  msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1298  1.1  msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1299  1.1  msaitoh 	/*
   1300  1.1  msaitoh 	** Limit the total number of descriptors that
   1301  1.1  msaitoh 	** can be combined, so it does not exceed 64K
   1302  1.1  msaitoh 	*/
   1303  1.1  msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1304  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1305  1.1  msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1306  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1307  1.1  msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1308  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1309  1.1  msaitoh 	else  /* Using 16K cluster */
   1310  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1311  1.1  msaitoh 
   1312  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1313  1.1  msaitoh 
   1314  1.1  msaitoh 	/* Enable TCP header recognition */
   1315  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1316  1.1  msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   1317  1.1  msaitoh 	    IXGBE_PSRTYPE_TCPHDR));
   1318  1.1  msaitoh 
   1319  1.1  msaitoh 	/* Disable RSC for ACK packets */
   1320  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1321  1.1  msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1322  1.1  msaitoh 
   1323  1.1  msaitoh 	rxr->hw_rsc = TRUE;
   1324  1.1  msaitoh }
   1325  1.1  msaitoh /*********************************************************************
   1326  1.1  msaitoh  *
   1327  1.1  msaitoh  *  Refresh mbuf buffers for RX descriptor rings
   1328  1.1  msaitoh  *   - now keeps its own state so discards due to resource
   1329  1.1  msaitoh  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   1330  1.1  msaitoh  *     it just returns, keeping its placeholder, thus it can simply
   1331  1.1  msaitoh  *     be recalled to try again.
   1332  1.1  msaitoh  *
   1333  1.1  msaitoh  **********************************************************************/
   1334  1.1  msaitoh static void
   1335  1.1  msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1336  1.1  msaitoh {
   1337  1.1  msaitoh 	struct adapter		*adapter = rxr->adapter;
   1338  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1339  1.1  msaitoh 	struct mbuf		*mp;
   1340  1.1  msaitoh 	int			i, j, error;
   1341  1.1  msaitoh 	bool			refreshed = false;
   1342  1.1  msaitoh 
   1343  1.1  msaitoh 	i = j = rxr->next_to_refresh;
   1344  1.1  msaitoh 	/* Control the loop with one beyond */
   1345  1.1  msaitoh 	if (++j == rxr->num_desc)
   1346  1.1  msaitoh 		j = 0;
   1347  1.1  msaitoh 
   1348  1.1  msaitoh 	while (j != limit) {
   1349  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1350  1.1  msaitoh 		if (rxbuf->buf == NULL) {
   1351  1.1  msaitoh 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1352  1.1  msaitoh 			    MT_DATA, M_PKTHDR, rxr->mbuf_sz);
   1353  1.1  msaitoh 			if (mp == NULL) {
   1354  1.1  msaitoh 				rxr->no_jmbuf.ev_count++;
   1355  1.1  msaitoh 				goto update;
   1356  1.1  msaitoh 			}
   1357  1.1  msaitoh 			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
   1358  1.1  msaitoh 				m_adj(mp, ETHER_ALIGN);
   1359  1.1  msaitoh 		} else
   1360  1.1  msaitoh 			mp = rxbuf->buf;
   1361  1.1  msaitoh 
   1362  1.1  msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1363  1.1  msaitoh 
   1364  1.1  msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1365  1.1  msaitoh 		 * than replaced, there's no need to go through busdma.
   1366  1.1  msaitoh 		 */
   1367  1.1  msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1368  1.1  msaitoh 			/* Get the memory mapping */
   1369  1.4  msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1370  1.1  msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1371  1.1  msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1372  1.1  msaitoh 			if (error != 0) {
   1373  1.1  msaitoh 				printf("Refresh mbufs: payload dmamap load"
   1374  1.1  msaitoh 				    " failure - %d\n", error);
   1375  1.1  msaitoh 				m_free(mp);
   1376  1.1  msaitoh 				rxbuf->buf = NULL;
   1377  1.1  msaitoh 				goto update;
   1378  1.1  msaitoh 			}
   1379  1.1  msaitoh 			rxbuf->buf = mp;
   1380  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1381  1.1  msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1382  1.1  msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1383  1.1  msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1384  1.1  msaitoh 		} else {
   1385  1.1  msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1386  1.1  msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1387  1.1  msaitoh 		}
   1388  1.1  msaitoh 
   1389  1.1  msaitoh 		refreshed = true;
   1390  1.1  msaitoh 		/* Next is precalculated */
   1391  1.1  msaitoh 		i = j;
   1392  1.1  msaitoh 		rxr->next_to_refresh = i;
   1393  1.1  msaitoh 		if (++j == rxr->num_desc)
   1394  1.1  msaitoh 			j = 0;
   1395  1.1  msaitoh 	}
   1396  1.1  msaitoh update:
   1397  1.1  msaitoh 	if (refreshed) /* Update hardware tail index */
   1398  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw,
   1399  1.3  msaitoh 		    rxr->tail, rxr->next_to_refresh);
   1400  1.1  msaitoh 	return;
   1401  1.1  msaitoh }
   1402  1.1  msaitoh 
   1403  1.1  msaitoh /*********************************************************************
   1404  1.1  msaitoh  *
   1405  1.1  msaitoh  *  Allocate memory for rx_buffer structures. Since we use one
   1406  1.1  msaitoh  *  rx_buffer per received packet, the maximum number of rx_buffer's
   1407  1.1  msaitoh  *  that we'll need is equal to the number of receive descriptors
   1408  1.1  msaitoh  *  that we've allocated.
   1409  1.1  msaitoh  *
   1410  1.1  msaitoh  **********************************************************************/
   1411  1.1  msaitoh int
   1412  1.1  msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1413  1.1  msaitoh {
   1414  1.1  msaitoh 	struct	adapter 	*adapter = rxr->adapter;
   1415  1.1  msaitoh 	device_t 		dev = adapter->dev;
   1416  1.1  msaitoh 	struct ixgbe_rx_buf 	*rxbuf;
   1417  1.5  msaitoh 	int             	bsize, error;
   1418  1.1  msaitoh 
   1419  1.1  msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1420  1.1  msaitoh 	if (!(rxr->rx_buffers =
   1421  1.1  msaitoh 	    (struct ixgbe_rx_buf *) malloc(bsize,
   1422  1.1  msaitoh 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   1423  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   1424  1.1  msaitoh 		error = ENOMEM;
   1425  1.1  msaitoh 		goto fail;
   1426  1.1  msaitoh 	}
   1427  1.1  msaitoh 
   1428  1.1  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   1429  1.1  msaitoh 				   1, 0,	/* alignment, bounds */
   1430  1.1  msaitoh 				   MJUM16BYTES,		/* maxsize */
   1431  1.1  msaitoh 				   1,			/* nsegments */
   1432  1.1  msaitoh 				   MJUM16BYTES,		/* maxsegsize */
   1433  1.1  msaitoh 				   0,			/* flags */
   1434  1.1  msaitoh 				   &rxr->ptag))) {
   1435  1.1  msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1436  1.1  msaitoh 		goto fail;
   1437  1.1  msaitoh 	}
   1438  1.1  msaitoh 
   1439  1.5  msaitoh 	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1440  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1441  1.4  msaitoh 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
   1442  1.1  msaitoh 		if (error) {
   1443  1.1  msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1444  1.1  msaitoh 			goto fail;
   1445  1.1  msaitoh 		}
   1446  1.1  msaitoh 	}
   1447  1.1  msaitoh 
   1448  1.1  msaitoh 	return (0);
   1449  1.1  msaitoh 
   1450  1.1  msaitoh fail:
   1451  1.1  msaitoh 	/* Frees all, but can handle partial completion */
   1452  1.1  msaitoh 	ixgbe_free_receive_structures(adapter);
   1453  1.1  msaitoh 	return (error);
   1454  1.1  msaitoh }
   1455  1.1  msaitoh 
   1456  1.3  msaitoh 
   1457  1.1  msaitoh static void
   1458  1.1  msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1459  1.1  msaitoh {
   1460  1.1  msaitoh 	struct ixgbe_rx_buf       *rxbuf;
   1461  1.1  msaitoh 
   1462  1.5  msaitoh 	for (int i = 0; i < rxr->num_desc; i++) {
   1463  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1464  1.1  msaitoh 		if (rxbuf->buf != NULL) {
   1465  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1466  1.1  msaitoh 			    0, rxbuf->buf->m_pkthdr.len,
   1467  1.1  msaitoh 			    BUS_DMASYNC_POSTREAD);
   1468  1.1  msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1469  1.1  msaitoh 			rxbuf->buf->m_flags |= M_PKTHDR;
   1470  1.1  msaitoh 			m_freem(rxbuf->buf);
   1471  1.1  msaitoh 			rxbuf->buf = NULL;
   1472  1.1  msaitoh 			rxbuf->flags = 0;
   1473  1.1  msaitoh 		}
   1474  1.1  msaitoh 	}
   1475  1.1  msaitoh }
   1476  1.1  msaitoh 
   1477  1.1  msaitoh 
   1478  1.1  msaitoh /*********************************************************************
   1479  1.1  msaitoh  *
   1480  1.1  msaitoh  *  Initialize a receive ring and its buffers.
   1481  1.1  msaitoh  *
   1482  1.1  msaitoh  **********************************************************************/
   1483  1.1  msaitoh static int
   1484  1.1  msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1485  1.1  msaitoh {
   1486  1.1  msaitoh 	struct	adapter 	*adapter;
   1487  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1488  1.1  msaitoh #ifdef LRO
   1489  1.1  msaitoh 	struct ifnet		*ifp;
   1490  1.1  msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1491  1.1  msaitoh #endif /* LRO */
   1492  1.1  msaitoh 	int			rsize, error = 0;
   1493  1.1  msaitoh #ifdef DEV_NETMAP
   1494  1.1  msaitoh 	struct netmap_adapter *na = NA(rxr->adapter->ifp);
   1495  1.1  msaitoh 	struct netmap_slot *slot;
   1496  1.1  msaitoh #endif /* DEV_NETMAP */
   1497  1.1  msaitoh 
   1498  1.1  msaitoh 	adapter = rxr->adapter;
   1499  1.1  msaitoh #ifdef LRO
   1500  1.1  msaitoh 	ifp = adapter->ifp;
   1501  1.1  msaitoh #endif /* LRO */
   1502  1.1  msaitoh 
   1503  1.1  msaitoh 	/* Clear the ring contents */
   1504  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1505  1.1  msaitoh #ifdef DEV_NETMAP
   1506  1.1  msaitoh 	/* same as in ixgbe_setup_transmit_ring() */
   1507  1.1  msaitoh 	slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1508  1.1  msaitoh #endif /* DEV_NETMAP */
   1509  1.1  msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   1510  1.1  msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1511  1.1  msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1512  1.1  msaitoh 	/* Cache the size */
   1513  1.1  msaitoh 	rxr->mbuf_sz = adapter->rx_mbuf_sz;
   1514  1.1  msaitoh 
   1515  1.1  msaitoh 	/* Free current RX buffer structs and their mbufs */
   1516  1.1  msaitoh 	ixgbe_free_receive_ring(rxr);
   1517  1.1  msaitoh 
   1518  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1519  1.1  msaitoh 
   1520  1.1  msaitoh 	/* Now reinitialize our supply of jumbo mbufs.  The number
   1521  1.1  msaitoh 	 * or size of jumbo mbufs may have changed.
   1522  1.1  msaitoh 	 */
   1523  1.1  msaitoh 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   1524  1.1  msaitoh 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   1525  1.1  msaitoh 
   1526  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1527  1.1  msaitoh 
   1528  1.1  msaitoh 	/* Now replenish the mbufs */
   1529  1.1  msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1530  1.1  msaitoh 		struct mbuf	*mp;
   1531  1.1  msaitoh 
   1532  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1533  1.1  msaitoh #ifdef DEV_NETMAP
   1534  1.1  msaitoh 		/*
   1535  1.1  msaitoh 		 * In netmap mode, fill the map and set the buffer
   1536  1.1  msaitoh 		 * address in the NIC ring, considering the offset
   1537  1.1  msaitoh 		 * between the netmap and NIC rings (see comment in
   1538  1.1  msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1539  1.1  msaitoh 		 * an mbuf, so end the block with a continue;
   1540  1.1  msaitoh 		 */
   1541  1.1  msaitoh 		if (slot) {
   1542  1.1  msaitoh 			int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
   1543  1.1  msaitoh 			uint64_t paddr;
   1544  1.1  msaitoh 			void *addr;
   1545  1.1  msaitoh 
   1546  1.1  msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1547  1.1  msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1548  1.1  msaitoh 			/* Update descriptor and the cached value */
   1549  1.1  msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1550  1.1  msaitoh 			rxbuf->addr = htole64(paddr);
   1551  1.1  msaitoh 			continue;
   1552  1.1  msaitoh 		}
   1553  1.1  msaitoh #endif /* DEV_NETMAP */
   1554  1.1  msaitoh 		rxbuf->flags = 0;
   1555  1.1  msaitoh 		rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1556  1.1  msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   1557  1.1  msaitoh 		if (rxbuf->buf == NULL) {
   1558  1.1  msaitoh 			error = ENOBUFS;
   1559  1.1  msaitoh                         goto fail;
   1560  1.1  msaitoh 		}
   1561  1.1  msaitoh 		mp = rxbuf->buf;
   1562  1.1  msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1563  1.1  msaitoh 		/* Get the memory mapping */
   1564  1.1  msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1565  1.1  msaitoh 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1566  1.1  msaitoh 		if (error != 0)
   1567  1.1  msaitoh                         goto fail;
   1568  1.1  msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1569  1.1  msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   1570  1.1  msaitoh 		/* Update the descriptor and the cached value */
   1571  1.1  msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1572  1.1  msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1573  1.1  msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1574  1.1  msaitoh 	}
   1575  1.1  msaitoh 
   1576  1.1  msaitoh 
   1577  1.1  msaitoh 	/* Setup our descriptor indices */
   1578  1.1  msaitoh 	rxr->next_to_check = 0;
   1579  1.1  msaitoh 	rxr->next_to_refresh = 0;
   1580  1.1  msaitoh 	rxr->lro_enabled = FALSE;
   1581  1.1  msaitoh 	rxr->rx_copies.ev_count = 0;
   1582  1.1  msaitoh 	rxr->rx_bytes.ev_count = 0;
   1583  1.1  msaitoh 	rxr->vtag_strip = FALSE;
   1584  1.1  msaitoh 
   1585  1.1  msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1586  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1587  1.1  msaitoh 
   1588  1.1  msaitoh 	/*
   1589  1.1  msaitoh 	** Now set up the LRO interface:
   1590  1.1  msaitoh 	*/
   1591  1.1  msaitoh 	if (ixgbe_rsc_enable)
   1592  1.1  msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1593  1.1  msaitoh #ifdef LRO
   1594  1.1  msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1595  1.1  msaitoh 		device_t dev = adapter->dev;
   1596  1.1  msaitoh 		int err = tcp_lro_init(lro);
   1597  1.1  msaitoh 		if (err) {
   1598  1.1  msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1599  1.1  msaitoh 			goto fail;
   1600  1.1  msaitoh 		}
   1601  1.1  msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1602  1.1  msaitoh 		rxr->lro_enabled = TRUE;
   1603  1.1  msaitoh 		lro->ifp = adapter->ifp;
   1604  1.1  msaitoh 	}
   1605  1.1  msaitoh #endif /* LRO */
   1606  1.1  msaitoh 
   1607  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1608  1.1  msaitoh 	return (0);
   1609  1.1  msaitoh 
   1610  1.1  msaitoh fail:
   1611  1.1  msaitoh 	ixgbe_free_receive_ring(rxr);
   1612  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1613  1.1  msaitoh 	return (error);
   1614  1.1  msaitoh }
   1615  1.1  msaitoh 
   1616  1.1  msaitoh /*********************************************************************
   1617  1.1  msaitoh  *
   1618  1.1  msaitoh  *  Initialize all receive rings.
   1619  1.1  msaitoh  *
   1620  1.1  msaitoh  **********************************************************************/
   1621  1.1  msaitoh int
   1622  1.1  msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
   1623  1.1  msaitoh {
   1624  1.1  msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1625  1.1  msaitoh 	int j;
   1626  1.1  msaitoh 
   1627  1.1  msaitoh 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   1628  1.1  msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1629  1.1  msaitoh 			goto fail;
   1630  1.1  msaitoh 
   1631  1.1  msaitoh 	return (0);
   1632  1.1  msaitoh fail:
   1633  1.1  msaitoh 	/*
   1634  1.1  msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1635  1.1  msaitoh 	 * the rings that completed, the failing case will have
   1636  1.1  msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1637  1.1  msaitoh 	 */
   1638  1.1  msaitoh 	for (int i = 0; i < j; ++i) {
   1639  1.1  msaitoh 		rxr = &adapter->rx_rings[i];
   1640  1.1  msaitoh 		ixgbe_free_receive_ring(rxr);
   1641  1.1  msaitoh 	}
   1642  1.1  msaitoh 
   1643  1.1  msaitoh 	return (ENOBUFS);
   1644  1.1  msaitoh }
   1645  1.1  msaitoh 
   1646  1.3  msaitoh 
   1647  1.1  msaitoh /*********************************************************************
   1648  1.1  msaitoh  *
   1649  1.1  msaitoh  *  Free all receive rings.
   1650  1.1  msaitoh  *
   1651  1.1  msaitoh  **********************************************************************/
   1652  1.1  msaitoh void
   1653  1.1  msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
   1654  1.1  msaitoh {
   1655  1.1  msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1656  1.1  msaitoh 
   1657  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1658  1.1  msaitoh 
   1659  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1660  1.1  msaitoh #ifdef LRO
   1661  1.1  msaitoh 		struct lro_ctrl		*lro = &rxr->lro;
   1662  1.1  msaitoh #endif /* LRO */
   1663  1.1  msaitoh 		ixgbe_free_receive_buffers(rxr);
   1664  1.1  msaitoh #ifdef LRO
   1665  1.1  msaitoh 		/* Free LRO memory */
   1666  1.1  msaitoh 		tcp_lro_free(lro);
   1667  1.1  msaitoh #endif /* LRO */
   1668  1.1  msaitoh 		/* Free the ring memory as well */
   1669  1.1  msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   1670  1.1  msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1671  1.1  msaitoh 	}
   1672  1.1  msaitoh 
   1673  1.1  msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   1674  1.1  msaitoh }
   1675  1.1  msaitoh 
   1676  1.1  msaitoh 
   1677  1.1  msaitoh /*********************************************************************
   1678  1.1  msaitoh  *
   1679  1.1  msaitoh  *  Free receive ring data structures
   1680  1.1  msaitoh  *
   1681  1.1  msaitoh  **********************************************************************/
   1682  1.1  msaitoh static void
   1683  1.1  msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1684  1.1  msaitoh {
   1685  1.1  msaitoh 	struct adapter		*adapter = rxr->adapter;
   1686  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1687  1.1  msaitoh 
   1688  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1689  1.1  msaitoh 
   1690  1.1  msaitoh 	/* Cleanup any existing buffers */
   1691  1.1  msaitoh 	if (rxr->rx_buffers != NULL) {
   1692  1.1  msaitoh 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   1693  1.1  msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1694  1.1  msaitoh 			if (rxbuf->buf != NULL) {
   1695  1.1  msaitoh 				bus_dmamap_sync(rxr->ptag->dt_dmat,
   1696  1.1  msaitoh 				    rxbuf->pmap, 0, rxbuf->buf->m_pkthdr.len,
   1697  1.1  msaitoh 				    BUS_DMASYNC_POSTREAD);
   1698  1.1  msaitoh 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1699  1.1  msaitoh 				rxbuf->buf->m_flags |= M_PKTHDR;
   1700  1.1  msaitoh 				m_freem(rxbuf->buf);
   1701  1.1  msaitoh 			}
   1702  1.1  msaitoh 			rxbuf->buf = NULL;
   1703  1.1  msaitoh 			if (rxbuf->pmap != NULL) {
   1704  1.1  msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1705  1.1  msaitoh 				rxbuf->pmap = NULL;
   1706  1.1  msaitoh 			}
   1707  1.1  msaitoh 		}
   1708  1.1  msaitoh 		if (rxr->rx_buffers != NULL) {
   1709  1.1  msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1710  1.1  msaitoh 			rxr->rx_buffers = NULL;
   1711  1.1  msaitoh 		}
   1712  1.1  msaitoh 	}
   1713  1.1  msaitoh 
   1714  1.1  msaitoh 	if (rxr->ptag != NULL) {
   1715  1.1  msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1716  1.1  msaitoh 		rxr->ptag = NULL;
   1717  1.1  msaitoh 	}
   1718  1.1  msaitoh 
   1719  1.1  msaitoh 	return;
   1720  1.1  msaitoh }
   1721  1.1  msaitoh 
   1722  1.1  msaitoh static __inline void
   1723  1.1  msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   1724  1.1  msaitoh {
   1725  1.1  msaitoh 	int s;
   1726  1.1  msaitoh 
   1727  1.1  msaitoh #ifdef LRO
   1728  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
   1729  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1730  1.1  msaitoh 
   1731  1.1  msaitoh         /*
   1732  1.1  msaitoh          * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1733  1.1  msaitoh          * should be computed by hardware. Also it should not have VLAN tag in
   1734  1.1  msaitoh          * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1735  1.1  msaitoh          */
   1736  1.1  msaitoh         if (rxr->lro_enabled &&
   1737  1.1  msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1738  1.1  msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1739  1.1  msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1740  1.1  msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1741  1.1  msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1742  1.1  msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1743  1.1  msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1744  1.1  msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1745  1.1  msaitoh                 /*
   1746  1.1  msaitoh                  * Send to the stack if:
   1747  1.1  msaitoh                  **  - LRO not enabled, or
   1748  1.1  msaitoh                  **  - no LRO resources, or
   1749  1.1  msaitoh                  **  - lro enqueue fails
   1750  1.1  msaitoh                  */
   1751  1.1  msaitoh                 if (rxr->lro.lro_cnt != 0)
   1752  1.1  msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1753  1.1  msaitoh                                 return;
   1754  1.1  msaitoh         }
   1755  1.1  msaitoh #endif /* LRO */
   1756  1.1  msaitoh 
   1757  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1758  1.1  msaitoh 
   1759  1.1  msaitoh 	s = splnet();
   1760  1.1  msaitoh 	/* Pass this up to any BPF listeners. */
   1761  1.1  msaitoh 	bpf_mtap(ifp, m);
   1762  1.1  msaitoh 	if_input(ifp, m);
   1763  1.1  msaitoh 	splx(s);
   1764  1.1  msaitoh 
   1765  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1766  1.1  msaitoh }
   1767  1.1  msaitoh 
   1768  1.1  msaitoh static __inline void
   1769  1.1  msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1770  1.1  msaitoh {
   1771  1.1  msaitoh 	struct ixgbe_rx_buf	*rbuf;
   1772  1.1  msaitoh 
   1773  1.1  msaitoh 	rbuf = &rxr->rx_buffers[i];
   1774  1.1  msaitoh 
   1775  1.1  msaitoh 
   1776  1.1  msaitoh 	/*
   1777  1.1  msaitoh 	** With advanced descriptors the writeback
   1778  1.1  msaitoh 	** clobbers the buffer addrs, so its easier
   1779  1.1  msaitoh 	** to just free the existing mbufs and take
   1780  1.1  msaitoh 	** the normal refresh path to get new buffers
   1781  1.1  msaitoh 	** and mapping.
   1782  1.1  msaitoh 	*/
   1783  1.1  msaitoh 
   1784  1.1  msaitoh 	if (rbuf->buf != NULL) {/* Partial chain ? */
   1785  1.1  msaitoh 		rbuf->fmp->m_flags |= M_PKTHDR;
   1786  1.1  msaitoh 		m_freem(rbuf->fmp);
   1787  1.1  msaitoh 		rbuf->fmp = NULL;
   1788  1.1  msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1789  1.1  msaitoh 	} else if (rbuf->buf) {
   1790  1.1  msaitoh 		m_free(rbuf->buf);
   1791  1.1  msaitoh 		rbuf->buf = NULL;
   1792  1.1  msaitoh 	}
   1793  1.4  msaitoh 	ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
   1794  1.1  msaitoh 
   1795  1.1  msaitoh 	rbuf->flags = 0;
   1796  1.1  msaitoh 
   1797  1.1  msaitoh 	return;
   1798  1.1  msaitoh }
   1799  1.1  msaitoh 
   1800  1.1  msaitoh 
   1801  1.1  msaitoh /*********************************************************************
   1802  1.1  msaitoh  *
   1803  1.1  msaitoh  *  This routine executes in interrupt context. It replenishes
   1804  1.1  msaitoh  *  the mbufs in the descriptor and sends data which has been
   1805  1.1  msaitoh  *  dma'ed into host memory to upper layer.
   1806  1.1  msaitoh  *
   1807  1.1  msaitoh  *  Return TRUE for more work, FALSE for all clean.
   1808  1.1  msaitoh  *********************************************************************/
   1809  1.1  msaitoh bool
   1810  1.1  msaitoh ixgbe_rxeof(struct ix_queue *que)
   1811  1.1  msaitoh {
   1812  1.1  msaitoh 	struct adapter		*adapter = que->adapter;
   1813  1.1  msaitoh 	struct rx_ring		*rxr = que->rxr;
   1814  1.1  msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1815  1.1  msaitoh #ifdef LRO
   1816  1.1  msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1817  1.1  msaitoh 	struct lro_entry	*queued;
   1818  1.1  msaitoh #endif /* LRO */
   1819  1.1  msaitoh 	int			i, nextp, processed = 0;
   1820  1.1  msaitoh 	u32			staterr = 0;
   1821  1.1  msaitoh 	u16			count = rxr->process_limit;
   1822  1.1  msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1823  1.1  msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1824  1.1  msaitoh #ifdef RSS
   1825  1.1  msaitoh 	u16			pkt_info;
   1826  1.1  msaitoh #endif
   1827  1.1  msaitoh 
   1828  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1829  1.1  msaitoh 
   1830  1.1  msaitoh #ifdef DEV_NETMAP
   1831  1.1  msaitoh 	/* Same as the txeof routine: wakeup clients on intr. */
   1832  1.1  msaitoh 	if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1833  1.1  msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1834  1.1  msaitoh 		return (FALSE);
   1835  1.1  msaitoh 	}
   1836  1.1  msaitoh #endif /* DEV_NETMAP */
   1837  1.1  msaitoh 
   1838  1.1  msaitoh 	for (i = rxr->next_to_check; count != 0;) {
   1839  1.1  msaitoh 		struct mbuf	*sendmp, *mp;
   1840  1.1  msaitoh 		u32		rsc, ptype;
   1841  1.1  msaitoh 		u16		len;
   1842  1.1  msaitoh 		u16		vtag = 0;
   1843  1.1  msaitoh 		bool		eop;
   1844  1.1  msaitoh 
   1845  1.1  msaitoh 		/* Sync the ring. */
   1846  1.1  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1847  1.1  msaitoh 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1848  1.1  msaitoh 
   1849  1.1  msaitoh 		cur = &rxr->rx_base[i];
   1850  1.1  msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1851  1.1  msaitoh #ifdef RSS
   1852  1.1  msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1853  1.1  msaitoh #endif
   1854  1.1  msaitoh 
   1855  1.1  msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1856  1.1  msaitoh 			break;
   1857  1.1  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1858  1.1  msaitoh 			break;
   1859  1.1  msaitoh 
   1860  1.1  msaitoh 		count--;
   1861  1.1  msaitoh 		sendmp = NULL;
   1862  1.1  msaitoh 		nbuf = NULL;
   1863  1.1  msaitoh 		rsc = 0;
   1864  1.1  msaitoh 		cur->wb.upper.status_error = 0;
   1865  1.1  msaitoh 		rbuf = &rxr->rx_buffers[i];
   1866  1.1  msaitoh 		mp = rbuf->buf;
   1867  1.1  msaitoh 
   1868  1.1  msaitoh 		len = le16toh(cur->wb.upper.length);
   1869  1.1  msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1870  1.1  msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1871  1.1  msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1872  1.1  msaitoh 
   1873  1.1  msaitoh 		/* Make sure bad packets are discarded */
   1874  1.1  msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1875  1.3  msaitoh #if __FreeBSD_version >= 1100036
   1876  1.4  msaitoh 			if (IXGBE_IS_VF(adapter))
   1877  1.4  msaitoh 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
   1878  1.3  msaitoh #endif
   1879  1.1  msaitoh 			rxr->rx_discarded.ev_count++;
   1880  1.1  msaitoh 			ixgbe_rx_discard(rxr, i);
   1881  1.1  msaitoh 			goto next_desc;
   1882  1.1  msaitoh 		}
   1883  1.1  msaitoh 
   1884  1.1  msaitoh 		/*
   1885  1.1  msaitoh 		** On 82599 which supports a hardware
   1886  1.1  msaitoh 		** LRO (called HW RSC), packets need
   1887  1.1  msaitoh 		** not be fragmented across sequential
   1888  1.1  msaitoh 		** descriptors, rather the next descriptor
   1889  1.1  msaitoh 		** is indicated in bits of the descriptor.
   1890  1.1  msaitoh 		** This also means that we might proceses
   1891  1.1  msaitoh 		** more than one packet at a time, something
   1892  1.1  msaitoh 		** that has never been true before, it
   1893  1.1  msaitoh 		** required eliminating global chain pointers
   1894  1.1  msaitoh 		** in favor of what we are doing here.  -jfv
   1895  1.1  msaitoh 		*/
   1896  1.1  msaitoh 		if (!eop) {
   1897  1.1  msaitoh 			/*
   1898  1.1  msaitoh 			** Figure out the next descriptor
   1899  1.1  msaitoh 			** of this frame.
   1900  1.1  msaitoh 			*/
   1901  1.1  msaitoh 			if (rxr->hw_rsc == TRUE) {
   1902  1.1  msaitoh 				rsc = ixgbe_rsc_count(cur);
   1903  1.1  msaitoh 				rxr->rsc_num += (rsc - 1);
   1904  1.1  msaitoh 			}
   1905  1.1  msaitoh 			if (rsc) { /* Get hardware index */
   1906  1.1  msaitoh 				nextp = ((staterr &
   1907  1.1  msaitoh 				    IXGBE_RXDADV_NEXTP_MASK) >>
   1908  1.1  msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1909  1.1  msaitoh 			} else { /* Just sequential */
   1910  1.1  msaitoh 				nextp = i + 1;
   1911  1.1  msaitoh 				if (nextp == adapter->num_rx_desc)
   1912  1.1  msaitoh 					nextp = 0;
   1913  1.1  msaitoh 			}
   1914  1.1  msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1915  1.1  msaitoh 			prefetch(nbuf);
   1916  1.1  msaitoh 		}
   1917  1.1  msaitoh 		/*
   1918  1.1  msaitoh 		** Rather than using the fmp/lmp global pointers
   1919  1.1  msaitoh 		** we now keep the head of a packet chain in the
   1920  1.1  msaitoh 		** buffer struct and pass this along from one
   1921  1.1  msaitoh 		** descriptor to the next, until we get EOP.
   1922  1.1  msaitoh 		*/
   1923  1.1  msaitoh 		mp->m_len = len;
   1924  1.1  msaitoh 		/*
   1925  1.1  msaitoh 		** See if there is a stored head
   1926  1.1  msaitoh 		** that determines what we are
   1927  1.1  msaitoh 		*/
   1928  1.1  msaitoh 		sendmp = rbuf->fmp;
   1929  1.1  msaitoh 		if (sendmp != NULL) {  /* secondary frag */
   1930  1.1  msaitoh 			rbuf->buf = rbuf->fmp = NULL;
   1931  1.1  msaitoh 			mp->m_flags &= ~M_PKTHDR;
   1932  1.1  msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   1933  1.1  msaitoh 		} else {
   1934  1.1  msaitoh 			/*
   1935  1.1  msaitoh 			 * Optimize.  This might be a small packet,
   1936  1.1  msaitoh 			 * maybe just a TCP ACK.  Do a fast copy that
   1937  1.1  msaitoh 			 * is cache aligned into a new mbuf, and
   1938  1.1  msaitoh 			 * leave the old mbuf+cluster for re-use.
   1939  1.1  msaitoh 			 */
   1940  1.1  msaitoh 			if (eop && len <= IXGBE_RX_COPY_LEN) {
   1941  1.1  msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1942  1.1  msaitoh 				if (sendmp != NULL) {
   1943  1.1  msaitoh 					sendmp->m_data +=
   1944  1.1  msaitoh 					    IXGBE_RX_COPY_ALIGN;
   1945  1.1  msaitoh 					ixgbe_bcopy(mp->m_data,
   1946  1.1  msaitoh 					    sendmp->m_data, len);
   1947  1.1  msaitoh 					sendmp->m_len = len;
   1948  1.1  msaitoh 					rxr->rx_copies.ev_count++;
   1949  1.1  msaitoh 					rbuf->flags |= IXGBE_RX_COPY;
   1950  1.1  msaitoh 				}
   1951  1.1  msaitoh 			}
   1952  1.1  msaitoh 			if (sendmp == NULL) {
   1953  1.1  msaitoh 				rbuf->buf = rbuf->fmp = NULL;
   1954  1.1  msaitoh 				sendmp = mp;
   1955  1.1  msaitoh 			}
   1956  1.1  msaitoh 
   1957  1.1  msaitoh 			/* first desc of a non-ps chain */
   1958  1.1  msaitoh 			sendmp->m_flags |= M_PKTHDR;
   1959  1.1  msaitoh 			sendmp->m_pkthdr.len = mp->m_len;
   1960  1.1  msaitoh 		}
   1961  1.1  msaitoh 		++processed;
   1962  1.1  msaitoh 
   1963  1.1  msaitoh 		/* Pass the head pointer on */
   1964  1.1  msaitoh 		if (eop == 0) {
   1965  1.1  msaitoh 			nbuf->fmp = sendmp;
   1966  1.1  msaitoh 			sendmp = NULL;
   1967  1.1  msaitoh 			mp->m_next = nbuf->buf;
   1968  1.1  msaitoh 		} else { /* Sending this frame */
   1969  1.1  msaitoh 			m_set_rcvif(sendmp, ifp);
   1970  1.1  msaitoh 			ifp->if_ipackets++;
   1971  1.1  msaitoh 			rxr->rx_packets.ev_count++;
   1972  1.1  msaitoh 			/* capture data for AIM */
   1973  1.1  msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   1974  1.1  msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   1975  1.1  msaitoh 			/* Process vlan info */
   1976  1.1  msaitoh 			if ((rxr->vtag_strip) &&
   1977  1.1  msaitoh 			    (staterr & IXGBE_RXD_STAT_VP))
   1978  1.1  msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   1979  1.1  msaitoh 			if (vtag) {
   1980  1.1  msaitoh 				VLAN_INPUT_TAG(ifp, sendmp, vtag,
   1981  1.1  msaitoh 				    printf("%s: could not apply VLAN "
   1982  1.1  msaitoh 					"tag", __func__));
   1983  1.1  msaitoh 			}
   1984  1.1  msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   1985  1.1  msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   1986  1.3  msaitoh 				   &adapter->stats.pf);
   1987  1.1  msaitoh 			}
   1988  1.6  msaitoh #if 0 /* FreeBSD */
   1989  1.6  msaitoh                         /*
   1990  1.6  msaitoh                          * In case of multiqueue, we have RXCSUM.PCSD bit set
   1991  1.6  msaitoh                          * and never cleared. This means we have RSS hash
   1992  1.6  msaitoh                          * available to be used.
   1993  1.6  msaitoh                          */
   1994  1.6  msaitoh                         if (adapter->num_queues > 1) {
   1995  1.6  msaitoh                                 sendmp->m_pkthdr.flowid =
   1996  1.6  msaitoh                                     le32toh(cur->wb.lower.hi_dword.rss);
   1997  1.6  msaitoh                                 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   1998  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV4:
   1999  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2000  1.6  msaitoh                                             M_HASHTYPE_RSS_IPV4);
   2001  1.6  msaitoh                                         break;
   2002  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   2003  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2004  1.6  msaitoh                                             M_HASHTYPE_RSS_TCP_IPV4);
   2005  1.6  msaitoh                                         break;
   2006  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6:
   2007  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2008  1.6  msaitoh                                             M_HASHTYPE_RSS_IPV6);
   2009  1.6  msaitoh                                         break;
   2010  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   2011  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2012  1.6  msaitoh                                             M_HASHTYPE_RSS_TCP_IPV6);
   2013  1.6  msaitoh                                         break;
   2014  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   2015  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2016  1.6  msaitoh                                             M_HASHTYPE_RSS_IPV6_EX);
   2017  1.6  msaitoh                                         break;
   2018  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   2019  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2020  1.6  msaitoh                                             M_HASHTYPE_RSS_TCP_IPV6_EX);
   2021  1.6  msaitoh                                         break;
   2022  1.6  msaitoh #if __FreeBSD_version > 1100000
   2023  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   2024  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2025  1.6  msaitoh                                             M_HASHTYPE_RSS_UDP_IPV4);
   2026  1.6  msaitoh                                         break;
   2027  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   2028  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2029  1.6  msaitoh                                             M_HASHTYPE_RSS_UDP_IPV6);
   2030  1.6  msaitoh                                         break;
   2031  1.6  msaitoh                                     case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   2032  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2033  1.6  msaitoh                                             M_HASHTYPE_RSS_UDP_IPV6_EX);
   2034  1.6  msaitoh                                         break;
   2035  1.6  msaitoh #endif
   2036  1.6  msaitoh                                     default:
   2037  1.6  msaitoh                                         M_HASHTYPE_SET(sendmp,
   2038  1.6  msaitoh                                             M_HASHTYPE_OPAQUE);
   2039  1.6  msaitoh                                 }
   2040  1.6  msaitoh                         } else {
   2041  1.6  msaitoh                                 sendmp->m_pkthdr.flowid = que->msix;
   2042  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   2043  1.1  msaitoh 			}
   2044  1.1  msaitoh #endif /* FreeBSD_version */
   2045  1.1  msaitoh 		}
   2046  1.1  msaitoh next_desc:
   2047  1.1  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   2048  1.1  msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2049  1.1  msaitoh 
   2050  1.1  msaitoh 		/* Advance our pointers to the next descriptor. */
   2051  1.1  msaitoh 		if (++i == rxr->num_desc)
   2052  1.1  msaitoh 			i = 0;
   2053  1.1  msaitoh 
   2054  1.1  msaitoh 		/* Now send to the stack or do LRO */
   2055  1.1  msaitoh 		if (sendmp != NULL) {
   2056  1.1  msaitoh 			rxr->next_to_check = i;
   2057  1.1  msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   2058  1.1  msaitoh 			i = rxr->next_to_check;
   2059  1.1  msaitoh 		}
   2060  1.1  msaitoh 
   2061  1.1  msaitoh                /* Every 8 descriptors we go to refresh mbufs */
   2062  1.1  msaitoh 		if (processed == 8) {
   2063  1.1  msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   2064  1.1  msaitoh 			processed = 0;
   2065  1.1  msaitoh 		}
   2066  1.1  msaitoh 	}
   2067  1.1  msaitoh 
   2068  1.1  msaitoh 	/* Refresh any remaining buf structs */
   2069  1.1  msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   2070  1.1  msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   2071  1.1  msaitoh 
   2072  1.1  msaitoh 	rxr->next_to_check = i;
   2073  1.1  msaitoh 
   2074  1.1  msaitoh #ifdef LRO
   2075  1.1  msaitoh 	/*
   2076  1.1  msaitoh 	 * Flush any outstanding LRO work
   2077  1.1  msaitoh 	 */
   2078  1.1  msaitoh 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   2079  1.1  msaitoh 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   2080  1.1  msaitoh 		tcp_lro_flush(lro, queued);
   2081  1.1  msaitoh 	}
   2082  1.1  msaitoh #endif /* LRO */
   2083  1.1  msaitoh 
   2084  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   2085  1.1  msaitoh 
   2086  1.1  msaitoh 	/*
   2087  1.1  msaitoh 	** Still have cleaning to do?
   2088  1.1  msaitoh 	*/
   2089  1.1  msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   2090  1.1  msaitoh 		return true;
   2091  1.1  msaitoh 	else
   2092  1.1  msaitoh 		return false;
   2093  1.1  msaitoh }
   2094  1.1  msaitoh 
   2095  1.1  msaitoh 
   2096  1.1  msaitoh /*********************************************************************
   2097  1.1  msaitoh  *
   2098  1.1  msaitoh  *  Verify that the hardware indicated that the checksum is valid.
   2099  1.1  msaitoh  *  Inform the stack about the status of checksum so that stack
   2100  1.1  msaitoh  *  doesn't spend time verifying the checksum.
   2101  1.1  msaitoh  *
   2102  1.1  msaitoh  *********************************************************************/
   2103  1.1  msaitoh static void
   2104  1.1  msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2105  1.1  msaitoh     struct ixgbe_hw_stats *stats)
   2106  1.1  msaitoh {
   2107  1.1  msaitoh 	u16	status = (u16) staterr;
   2108  1.1  msaitoh 	u8	errors = (u8) (staterr >> 24);
   2109  1.1  msaitoh #if 0
   2110  1.1  msaitoh 	bool	sctp = FALSE;
   2111  1.1  msaitoh 
   2112  1.1  msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2113  1.1  msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2114  1.1  msaitoh 		sctp = TRUE;
   2115  1.1  msaitoh #endif
   2116  1.1  msaitoh 
   2117  1.1  msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2118  1.1  msaitoh 		stats->ipcs.ev_count++;
   2119  1.1  msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2120  1.1  msaitoh 			/* IP Checksum Good */
   2121  1.1  msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2122  1.1  msaitoh 
   2123  1.1  msaitoh 		} else {
   2124  1.1  msaitoh 			stats->ipcs_bad.ev_count++;
   2125  1.1  msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2126  1.1  msaitoh 		}
   2127  1.1  msaitoh 	}
   2128  1.1  msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2129  1.1  msaitoh 		stats->l4cs.ev_count++;
   2130  1.1  msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2131  1.1  msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2132  1.1  msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2133  1.1  msaitoh 		} else {
   2134  1.1  msaitoh 			stats->l4cs_bad.ev_count++;
   2135  1.1  msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2136  1.1  msaitoh 		}
   2137  1.1  msaitoh 	}
   2138  1.1  msaitoh 	return;
   2139  1.1  msaitoh }
   2140  1.1  msaitoh 
   2141  1.1  msaitoh 
   2142  1.1  msaitoh /********************************************************************
   2143  1.1  msaitoh  * Manage DMA'able memory.
   2144  1.1  msaitoh  *******************************************************************/
   2145  1.1  msaitoh 
   2146  1.1  msaitoh int
   2147  1.1  msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2148  1.1  msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2149  1.1  msaitoh {
   2150  1.1  msaitoh 	device_t dev = adapter->dev;
   2151  1.1  msaitoh 	int             r, rsegs;
   2152  1.1  msaitoh 
   2153  1.1  msaitoh 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2154  1.1  msaitoh 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2155  1.1  msaitoh 			       size,	/* maxsize */
   2156  1.1  msaitoh 			       1,	/* nsegments */
   2157  1.1  msaitoh 			       size,	/* maxsegsize */
   2158  1.1  msaitoh 			       BUS_DMA_ALLOCNOW,	/* flags */
   2159  1.1  msaitoh 			       &dma->dma_tag);
   2160  1.1  msaitoh 	if (r != 0) {
   2161  1.1  msaitoh 		aprint_error_dev(dev,
   2162  1.1  msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2163  1.1  msaitoh 		goto fail_0;
   2164  1.1  msaitoh 	}
   2165  1.1  msaitoh 
   2166  1.1  msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2167  1.1  msaitoh 		size,
   2168  1.1  msaitoh 		dma->dma_tag->dt_alignment,
   2169  1.1  msaitoh 		dma->dma_tag->dt_boundary,
   2170  1.1  msaitoh 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2171  1.1  msaitoh 	if (r != 0) {
   2172  1.1  msaitoh 		aprint_error_dev(dev,
   2173  1.1  msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2174  1.1  msaitoh 		goto fail_1;
   2175  1.1  msaitoh 	}
   2176  1.1  msaitoh 
   2177  1.1  msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2178  1.1  msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2179  1.1  msaitoh 	if (r != 0) {
   2180  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2181  1.1  msaitoh 		    __func__, r);
   2182  1.1  msaitoh 		goto fail_2;
   2183  1.1  msaitoh 	}
   2184  1.1  msaitoh 
   2185  1.1  msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2186  1.1  msaitoh 	if (r != 0) {
   2187  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2188  1.1  msaitoh 		    __func__, r);
   2189  1.1  msaitoh 		goto fail_3;
   2190  1.1  msaitoh 	}
   2191  1.1  msaitoh 
   2192  1.1  msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2193  1.1  msaitoh 			    size,
   2194  1.1  msaitoh 			    NULL,
   2195  1.1  msaitoh 			    mapflags | BUS_DMA_NOWAIT);
   2196  1.1  msaitoh 	if (r != 0) {
   2197  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2198  1.1  msaitoh 		    __func__, r);
   2199  1.1  msaitoh 		goto fail_4;
   2200  1.1  msaitoh 	}
   2201  1.1  msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2202  1.1  msaitoh 	dma->dma_size = size;
   2203  1.1  msaitoh 	return 0;
   2204  1.1  msaitoh fail_4:
   2205  1.1  msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2206  1.1  msaitoh fail_3:
   2207  1.1  msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2208  1.1  msaitoh fail_2:
   2209  1.1  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2210  1.1  msaitoh fail_1:
   2211  1.1  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2212  1.1  msaitoh fail_0:
   2213  1.1  msaitoh 	return r;
   2214  1.1  msaitoh }
   2215  1.1  msaitoh 
   2216  1.3  msaitoh void
   2217  1.1  msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2218  1.1  msaitoh {
   2219  1.1  msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2220  1.1  msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2221  1.1  msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2222  1.1  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2223  1.1  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2224  1.1  msaitoh }
   2225  1.1  msaitoh 
   2226  1.1  msaitoh 
   2227  1.1  msaitoh /*********************************************************************
   2228  1.1  msaitoh  *
   2229  1.1  msaitoh  *  Allocate memory for the transmit and receive rings, and then
   2230  1.1  msaitoh  *  the descriptors associated with each, called only once at attach.
   2231  1.1  msaitoh  *
   2232  1.1  msaitoh  **********************************************************************/
   2233  1.1  msaitoh int
   2234  1.1  msaitoh ixgbe_allocate_queues(struct adapter *adapter)
   2235  1.1  msaitoh {
   2236  1.1  msaitoh 	device_t	dev = adapter->dev;
   2237  1.1  msaitoh 	struct ix_queue	*que;
   2238  1.1  msaitoh 	struct tx_ring	*txr;
   2239  1.1  msaitoh 	struct rx_ring	*rxr;
   2240  1.1  msaitoh 	int rsize, tsize, error = IXGBE_SUCCESS;
   2241  1.1  msaitoh 	int txconf = 0, rxconf = 0;
   2242  1.5  msaitoh #ifdef PCI_IOV
   2243  1.5  msaitoh 	enum ixgbe_iov_mode iov_mode;
   2244  1.5  msaitoh #endif
   2245  1.1  msaitoh 
   2246  1.1  msaitoh         /* First allocate the top level queue structs */
   2247  1.1  msaitoh         if (!(adapter->queues =
   2248  1.1  msaitoh             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2249  1.1  msaitoh             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2250  1.1  msaitoh                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2251  1.1  msaitoh                 error = ENOMEM;
   2252  1.1  msaitoh                 goto fail;
   2253  1.1  msaitoh         }
   2254  1.1  msaitoh 
   2255  1.1  msaitoh 	/* First allocate the TX ring struct memory */
   2256  1.1  msaitoh 	if (!(adapter->tx_rings =
   2257  1.1  msaitoh 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2258  1.1  msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2259  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2260  1.1  msaitoh 		error = ENOMEM;
   2261  1.1  msaitoh 		goto tx_fail;
   2262  1.1  msaitoh 	}
   2263  1.1  msaitoh 
   2264  1.1  msaitoh 	/* Next allocate the RX */
   2265  1.1  msaitoh 	if (!(adapter->rx_rings =
   2266  1.1  msaitoh 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2267  1.1  msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2268  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2269  1.1  msaitoh 		error = ENOMEM;
   2270  1.1  msaitoh 		goto rx_fail;
   2271  1.1  msaitoh 	}
   2272  1.1  msaitoh 
   2273  1.1  msaitoh 	/* For the ring itself */
   2274  1.1  msaitoh 	tsize = roundup2(adapter->num_tx_desc *
   2275  1.1  msaitoh 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2276  1.1  msaitoh 
   2277  1.5  msaitoh #ifdef PCI_IOV
   2278  1.5  msaitoh 	iov_mode = ixgbe_get_iov_mode(adapter);
   2279  1.5  msaitoh 	adapter->pool = ixgbe_max_vfs(iov_mode);
   2280  1.5  msaitoh #else
   2281  1.5  msaitoh 	adapter->pool = 0;
   2282  1.5  msaitoh #endif
   2283  1.1  msaitoh 	/*
   2284  1.1  msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2285  1.1  msaitoh 	 * possibility that things fail midcourse and we need to
   2286  1.1  msaitoh 	 * undo memory gracefully
   2287  1.1  msaitoh 	 */
   2288  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2289  1.1  msaitoh 		/* Set up some basics */
   2290  1.1  msaitoh 		txr = &adapter->tx_rings[i];
   2291  1.1  msaitoh 		txr->adapter = adapter;
   2292  1.5  msaitoh #ifdef PCI_IOV
   2293  1.5  msaitoh 		txr->me = ixgbe_pf_que_index(iov_mode, i);
   2294  1.5  msaitoh #else
   2295  1.1  msaitoh 		txr->me = i;
   2296  1.5  msaitoh #endif
   2297  1.1  msaitoh 		txr->num_desc = adapter->num_tx_desc;
   2298  1.1  msaitoh 
   2299  1.1  msaitoh 		/* Initialize the TX side lock */
   2300  1.1  msaitoh 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2301  1.1  msaitoh 		    device_xname(dev), txr->me);
   2302  1.1  msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2303  1.1  msaitoh 
   2304  1.1  msaitoh 		if (ixgbe_dma_malloc(adapter, tsize,
   2305  1.1  msaitoh 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2306  1.1  msaitoh 			aprint_error_dev(dev,
   2307  1.1  msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2308  1.1  msaitoh 			error = ENOMEM;
   2309  1.1  msaitoh 			goto err_tx_desc;
   2310  1.1  msaitoh 		}
   2311  1.1  msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2312  1.1  msaitoh 		bzero((void *)txr->tx_base, tsize);
   2313  1.1  msaitoh 
   2314  1.1  msaitoh         	/* Now allocate transmit buffers for the ring */
   2315  1.1  msaitoh         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2316  1.1  msaitoh 			aprint_error_dev(dev,
   2317  1.1  msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2318  1.1  msaitoh 			error = ENOMEM;
   2319  1.1  msaitoh 			goto err_tx_desc;
   2320  1.1  msaitoh         	}
   2321  1.1  msaitoh #ifndef IXGBE_LEGACY_TX
   2322  1.1  msaitoh 		/* Allocate a buf ring */
   2323  1.1  msaitoh 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2324  1.1  msaitoh 		    M_WAITOK, &txr->tx_mtx);
   2325  1.1  msaitoh 		if (txr->br == NULL) {
   2326  1.1  msaitoh 			aprint_error_dev(dev,
   2327  1.1  msaitoh 			    "Critical Failure setting up buf ring\n");
   2328  1.1  msaitoh 			error = ENOMEM;
   2329  1.1  msaitoh 			goto err_tx_desc;
   2330  1.1  msaitoh         	}
   2331  1.1  msaitoh #endif
   2332  1.1  msaitoh 	}
   2333  1.1  msaitoh 
   2334  1.1  msaitoh 	/*
   2335  1.1  msaitoh 	 * Next the RX queues...
   2336  1.1  msaitoh 	 */
   2337  1.1  msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   2338  1.1  msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2339  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2340  1.1  msaitoh 		rxr = &adapter->rx_rings[i];
   2341  1.1  msaitoh 		/* Set up some basics */
   2342  1.1  msaitoh 		rxr->adapter = adapter;
   2343  1.5  msaitoh #ifdef PCI_IOV
   2344  1.5  msaitoh 		rxr->me = ixgbe_pf_que_index(iov_mode, i);
   2345  1.5  msaitoh #else
   2346  1.1  msaitoh 		rxr->me = i;
   2347  1.5  msaitoh #endif
   2348  1.1  msaitoh 		rxr->num_desc = adapter->num_rx_desc;
   2349  1.1  msaitoh 
   2350  1.1  msaitoh 		/* Initialize the RX side lock */
   2351  1.1  msaitoh 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2352  1.1  msaitoh 		    device_xname(dev), rxr->me);
   2353  1.1  msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2354  1.1  msaitoh 
   2355  1.1  msaitoh 		if (ixgbe_dma_malloc(adapter, rsize,
   2356  1.1  msaitoh 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2357  1.1  msaitoh 			aprint_error_dev(dev,
   2358  1.1  msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2359  1.1  msaitoh 			error = ENOMEM;
   2360  1.1  msaitoh 			goto err_rx_desc;
   2361  1.1  msaitoh 		}
   2362  1.1  msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2363  1.1  msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2364  1.1  msaitoh 
   2365  1.1  msaitoh         	/* Allocate receive buffers for the ring*/
   2366  1.1  msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2367  1.1  msaitoh 			aprint_error_dev(dev,
   2368  1.1  msaitoh 			    "Critical Failure setting up receive buffers\n");
   2369  1.1  msaitoh 			error = ENOMEM;
   2370  1.1  msaitoh 			goto err_rx_desc;
   2371  1.1  msaitoh 		}
   2372  1.1  msaitoh 	}
   2373  1.1  msaitoh 
   2374  1.1  msaitoh 	/*
   2375  1.1  msaitoh 	** Finally set up the queue holding structs
   2376  1.1  msaitoh 	*/
   2377  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++) {
   2378  1.1  msaitoh 		que = &adapter->queues[i];
   2379  1.1  msaitoh 		que->adapter = adapter;
   2380  1.3  msaitoh 		que->me = i;
   2381  1.1  msaitoh 		que->txr = &adapter->tx_rings[i];
   2382  1.1  msaitoh 		que->rxr = &adapter->rx_rings[i];
   2383  1.1  msaitoh 	}
   2384  1.1  msaitoh 
   2385  1.1  msaitoh 	return (0);
   2386  1.1  msaitoh 
   2387  1.1  msaitoh err_rx_desc:
   2388  1.1  msaitoh 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2389  1.1  msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2390  1.1  msaitoh err_tx_desc:
   2391  1.1  msaitoh 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2392  1.1  msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
   2393  1.1  msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   2394  1.1  msaitoh rx_fail:
   2395  1.1  msaitoh 	free(adapter->tx_rings, M_DEVBUF);
   2396  1.1  msaitoh tx_fail:
   2397  1.1  msaitoh 	free(adapter->queues, M_DEVBUF);
   2398  1.1  msaitoh fail:
   2399  1.1  msaitoh 	return (error);
   2400  1.1  msaitoh }
   2401  1.1  msaitoh 
   2402