Home | History | Annotate | Line # | Download | only in ixgbe
ix_txrx.c revision 1.1
      1  1.1  msaitoh /******************************************************************************
      2  1.1  msaitoh 
      3  1.1  msaitoh   Copyright (c) 2001-2013, Intel Corporation
      4  1.1  msaitoh   All rights reserved.
      5  1.1  msaitoh 
      6  1.1  msaitoh   Redistribution and use in source and binary forms, with or without
      7  1.1  msaitoh   modification, are permitted provided that the following conditions are met:
      8  1.1  msaitoh 
      9  1.1  msaitoh    1. Redistributions of source code must retain the above copyright notice,
     10  1.1  msaitoh       this list of conditions and the following disclaimer.
     11  1.1  msaitoh 
     12  1.1  msaitoh    2. Redistributions in binary form must reproduce the above copyright
     13  1.1  msaitoh       notice, this list of conditions and the following disclaimer in the
     14  1.1  msaitoh       documentation and/or other materials provided with the distribution.
     15  1.1  msaitoh 
     16  1.1  msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     17  1.1  msaitoh       contributors may be used to endorse or promote products derived from
     18  1.1  msaitoh       this software without specific prior written permission.
     19  1.1  msaitoh 
     20  1.1  msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21  1.1  msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  1.1  msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  1.1  msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  1.1  msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  msaitoh   POSSIBILITY OF SUCH DAMAGE.
     31  1.1  msaitoh 
     32  1.1  msaitoh ******************************************************************************/
     33  1.1  msaitoh /*
     34  1.1  msaitoh  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  1.1  msaitoh  * All rights reserved.
     36  1.1  msaitoh  *
     37  1.1  msaitoh  * This code is derived from software contributed to The NetBSD Foundation
     38  1.1  msaitoh  * by Coyote Point Systems, Inc.
     39  1.1  msaitoh  *
     40  1.1  msaitoh  * Redistribution and use in source and binary forms, with or without
     41  1.1  msaitoh  * modification, are permitted provided that the following conditions
     42  1.1  msaitoh  * are met:
     43  1.1  msaitoh  * 1. Redistributions of source code must retain the above copyright
     44  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer.
     45  1.1  msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     46  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer in the
     47  1.1  msaitoh  *    documentation and/or other materials provided with the distribution.
     48  1.1  msaitoh  *
     49  1.1  msaitoh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  1.1  msaitoh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  1.1  msaitoh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  1.1  msaitoh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  1.1  msaitoh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  1.1  msaitoh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  1.1  msaitoh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  1.1  msaitoh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  1.1  msaitoh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  1.1  msaitoh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  1.1  msaitoh  * POSSIBILITY OF SUCH DAMAGE.
     60  1.1  msaitoh  */
     61  1.1  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 279805 2015-03-09 10:29:15Z araujo $*/
     62  1.1  msaitoh /*$NetBSD: ix_txrx.c,v 1.1 2016/11/28 02:23:33 msaitoh Exp $*/
     63  1.1  msaitoh 
     64  1.1  msaitoh #include "ixgbe.h"
     65  1.1  msaitoh 
     66  1.1  msaitoh /*
     67  1.1  msaitoh ** HW RSC control:
     68  1.1  msaitoh **  this feature only works with
     69  1.1  msaitoh **  IPv4, and only on 82599 and later.
     70  1.1  msaitoh **  Also this will cause IP forwarding to
     71  1.1  msaitoh **  fail and that can't be controlled by
     72  1.1  msaitoh **  the stack as LRO can. For all these
     73  1.1  msaitoh **  reasons I've deemed it best to leave
     74  1.1  msaitoh **  this off and not bother with a tuneable
     75  1.1  msaitoh **  interface, this would need to be compiled
     76  1.1  msaitoh **  to enable.
     77  1.1  msaitoh */
     78  1.1  msaitoh static bool ixgbe_rsc_enable = FALSE;
     79  1.1  msaitoh 
     80  1.1  msaitoh static void	ixgbe_setup_transmit_ring(struct tx_ring *);
     81  1.1  msaitoh static void     ixgbe_free_transmit_buffers(struct tx_ring *);
     82  1.1  msaitoh static int	ixgbe_setup_receive_ring(struct rx_ring *);
     83  1.1  msaitoh static void     ixgbe_free_receive_buffers(struct rx_ring *);
     84  1.1  msaitoh 
     85  1.1  msaitoh static void	ixgbe_rx_checksum(u32, struct mbuf *, u32,
     86  1.1  msaitoh 		    struct ixgbe_hw_stats *);
     87  1.1  msaitoh static void	ixgbe_refresh_mbufs(struct rx_ring *, int);
     88  1.1  msaitoh static int      ixgbe_xmit(struct tx_ring *, struct mbuf *);
     89  1.1  msaitoh static int	ixgbe_tx_ctx_setup(struct tx_ring *,
     90  1.1  msaitoh 		    struct mbuf *, u32 *, u32 *);
     91  1.1  msaitoh static int	ixgbe_tso_setup(struct tx_ring *,
     92  1.1  msaitoh 		    struct mbuf *, u32 *, u32 *);
     93  1.1  msaitoh #ifdef IXGBE_FDIR
     94  1.1  msaitoh static void	ixgbe_atr(struct tx_ring *, struct mbuf *);
     95  1.1  msaitoh #endif
     96  1.1  msaitoh static __inline void ixgbe_rx_discard(struct rx_ring *, int);
     97  1.1  msaitoh static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
     98  1.1  msaitoh 		    struct mbuf *, u32);
     99  1.1  msaitoh 
    100  1.1  msaitoh static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
    101  1.1  msaitoh static void	ixgbe_setup_hw_rsc(struct rx_ring *);
    102  1.1  msaitoh 
    103  1.1  msaitoh #ifdef IXGBE_LEGACY_TX
    104  1.1  msaitoh /*********************************************************************
    105  1.1  msaitoh  *  Transmit entry point
    106  1.1  msaitoh  *
    107  1.1  msaitoh  *  ixgbe_start is called by the stack to initiate a transmit.
    108  1.1  msaitoh  *  The driver will remain in this routine as long as there are
    109  1.1  msaitoh  *  packets to transmit and transmit resources are available.
    110  1.1  msaitoh  *  In case resources are not available stack is notified and
    111  1.1  msaitoh  *  the packet is requeued.
    112  1.1  msaitoh  **********************************************************************/
    113  1.1  msaitoh 
    114  1.1  msaitoh void
    115  1.1  msaitoh ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
    116  1.1  msaitoh {
    117  1.1  msaitoh 	int rc;
    118  1.1  msaitoh 	struct mbuf    *m_head;
    119  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    120  1.1  msaitoh 
    121  1.1  msaitoh 	IXGBE_TX_LOCK_ASSERT(txr);
    122  1.1  msaitoh 
    123  1.1  msaitoh 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    124  1.1  msaitoh 		return;
    125  1.1  msaitoh 	if (!adapter->link_active)
    126  1.1  msaitoh 		return;
    127  1.1  msaitoh 
    128  1.1  msaitoh 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
    129  1.1  msaitoh 		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
    130  1.1  msaitoh 			break;
    131  1.1  msaitoh 
    132  1.1  msaitoh 		IFQ_POLL(&ifp->if_snd, m_head);
    133  1.1  msaitoh 		if (m_head == NULL)
    134  1.1  msaitoh 			break;
    135  1.1  msaitoh 
    136  1.1  msaitoh 		if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
    137  1.1  msaitoh 			break;
    138  1.1  msaitoh 		}
    139  1.1  msaitoh 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
    140  1.1  msaitoh 		if (rc == EFBIG) {
    141  1.1  msaitoh 			struct mbuf *mtmp;
    142  1.1  msaitoh 
    143  1.1  msaitoh 			if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
    144  1.1  msaitoh 				m_head = mtmp;
    145  1.1  msaitoh 				rc = ixgbe_xmit(txr, m_head);
    146  1.1  msaitoh 				if (rc != 0)
    147  1.1  msaitoh 					adapter->efbig2_tx_dma_setup.ev_count++;
    148  1.1  msaitoh 			} else
    149  1.1  msaitoh 				adapter->m_defrag_failed.ev_count++;
    150  1.1  msaitoh 		}
    151  1.1  msaitoh 		if (rc != 0) {
    152  1.1  msaitoh 			m_freem(m_head);
    153  1.1  msaitoh 			continue;
    154  1.1  msaitoh 		}
    155  1.1  msaitoh 
    156  1.1  msaitoh 		/* Send a copy of the frame to the BPF listener */
    157  1.1  msaitoh 		bpf_mtap(ifp, m_head);
    158  1.1  msaitoh 
    159  1.1  msaitoh 		/* Set watchdog on */
    160  1.1  msaitoh 		getmicrotime(&txr->watchdog_time);
    161  1.1  msaitoh 		txr->queue_status = IXGBE_QUEUE_WORKING;
    162  1.1  msaitoh 
    163  1.1  msaitoh 	}
    164  1.1  msaitoh 	return;
    165  1.1  msaitoh }
    166  1.1  msaitoh 
    167  1.1  msaitoh /*
    168  1.1  msaitoh  * Legacy TX start - called by the stack, this
    169  1.1  msaitoh  * always uses the first tx ring, and should
    170  1.1  msaitoh  * not be used with multiqueue tx enabled.
    171  1.1  msaitoh  */
    172  1.1  msaitoh void
    173  1.1  msaitoh ixgbe_start(struct ifnet *ifp)
    174  1.1  msaitoh {
    175  1.1  msaitoh 	struct adapter *adapter = ifp->if_softc;
    176  1.1  msaitoh 	struct tx_ring	*txr = adapter->tx_rings;
    177  1.1  msaitoh 
    178  1.1  msaitoh 	if (ifp->if_flags & IFF_RUNNING) {
    179  1.1  msaitoh 		IXGBE_TX_LOCK(txr);
    180  1.1  msaitoh 		ixgbe_start_locked(txr, ifp);
    181  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    182  1.1  msaitoh 	}
    183  1.1  msaitoh 	return;
    184  1.1  msaitoh }
    185  1.1  msaitoh 
    186  1.1  msaitoh #else /* ! IXGBE_LEGACY_TX */
    187  1.1  msaitoh 
    188  1.1  msaitoh /*
    189  1.1  msaitoh ** Multiqueue Transmit driver
    190  1.1  msaitoh **
    191  1.1  msaitoh */
    192  1.1  msaitoh int
    193  1.1  msaitoh ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
    194  1.1  msaitoh {
    195  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
    196  1.1  msaitoh 	struct ix_queue	*que;
    197  1.1  msaitoh 	struct tx_ring	*txr;
    198  1.1  msaitoh 	int 		i, err = 0;
    199  1.1  msaitoh #ifdef	RSS
    200  1.1  msaitoh 	uint32_t bucket_id;
    201  1.1  msaitoh #endif
    202  1.1  msaitoh 
    203  1.1  msaitoh 	/* Which queue to use */
    204  1.1  msaitoh 	/*
    205  1.1  msaitoh 	 * When doing RSS, map it to the same outbound queue
    206  1.1  msaitoh 	 * as the incoming flow would be mapped to.
    207  1.1  msaitoh 	 *
    208  1.1  msaitoh 	 * If everything is setup correctly, it should be the
    209  1.1  msaitoh 	 * same bucket that the current CPU we're on is.
    210  1.1  msaitoh 	 */
    211  1.1  msaitoh 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
    212  1.1  msaitoh #ifdef	RSS
    213  1.1  msaitoh 		if (rss_hash2bucket(m->m_pkthdr.flowid,
    214  1.1  msaitoh 		    M_HASHTYPE_GET(m), &bucket_id) == 0) {
    215  1.1  msaitoh 			/* XXX TODO: spit out something if bucket_id > num_queues? */
    216  1.1  msaitoh 			i = bucket_id % adapter->num_queues;
    217  1.1  msaitoh 		} else {
    218  1.1  msaitoh #endif
    219  1.1  msaitoh 			i = m->m_pkthdr.flowid % adapter->num_queues;
    220  1.1  msaitoh #ifdef	RSS
    221  1.1  msaitoh 		}
    222  1.1  msaitoh #endif
    223  1.1  msaitoh 	} else {
    224  1.1  msaitoh 		i = curcpu % adapter->num_queues;
    225  1.1  msaitoh 	}
    226  1.1  msaitoh 
    227  1.1  msaitoh 	txr = &adapter->tx_rings[i];
    228  1.1  msaitoh 	que = &adapter->queues[i];
    229  1.1  msaitoh 
    230  1.1  msaitoh 	err = drbr_enqueue(ifp, txr->br, m);
    231  1.1  msaitoh 	if (err)
    232  1.1  msaitoh 		return (err);
    233  1.1  msaitoh 	if (IXGBE_TX_TRYLOCK(txr)) {
    234  1.1  msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    235  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    236  1.1  msaitoh 	} else
    237  1.1  msaitoh 		softint_schedule(txr->txq_si);
    238  1.1  msaitoh 
    239  1.1  msaitoh 	return (0);
    240  1.1  msaitoh }
    241  1.1  msaitoh 
    242  1.1  msaitoh int
    243  1.1  msaitoh ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
    244  1.1  msaitoh {
    245  1.1  msaitoh 	struct adapter  *adapter = txr->adapter;
    246  1.1  msaitoh 	struct mbuf     *next;
    247  1.1  msaitoh 	int             enqueued = 0, err = 0;
    248  1.1  msaitoh 
    249  1.1  msaitoh 	if (((ifp->if_flags & IFF_RUNNING) == 0) ||
    250  1.1  msaitoh 	    adapter->link_active == 0)
    251  1.1  msaitoh 		return (ENETDOWN);
    252  1.1  msaitoh 
    253  1.1  msaitoh 	/* Process the queue */
    254  1.1  msaitoh #if __FreeBSD_version < 901504
    255  1.1  msaitoh 	next = drbr_dequeue(ifp, txr->br);
    256  1.1  msaitoh 	while (next != NULL) {
    257  1.1  msaitoh 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
    258  1.1  msaitoh 			if (next != NULL)
    259  1.1  msaitoh 				err = drbr_enqueue(ifp, txr->br, next);
    260  1.1  msaitoh #else
    261  1.1  msaitoh 	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
    262  1.1  msaitoh 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
    263  1.1  msaitoh 			if (next == NULL) {
    264  1.1  msaitoh 				drbr_advance(ifp, txr->br);
    265  1.1  msaitoh 			} else {
    266  1.1  msaitoh 				drbr_putback(ifp, txr->br, next);
    267  1.1  msaitoh 			}
    268  1.1  msaitoh #endif
    269  1.1  msaitoh 			break;
    270  1.1  msaitoh 		}
    271  1.1  msaitoh #if __FreeBSD_version >= 901504
    272  1.1  msaitoh 		drbr_advance(ifp, txr->br);
    273  1.1  msaitoh #endif
    274  1.1  msaitoh 		enqueued++;
    275  1.1  msaitoh 		/* Send a copy of the frame to the BPF listener */
    276  1.1  msaitoh 		bpf_mtap(ifp, next);
    277  1.1  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    278  1.1  msaitoh 			break;
    279  1.1  msaitoh #if __FreeBSD_version < 901504
    280  1.1  msaitoh 		next = drbr_dequeue(ifp, txr->br);
    281  1.1  msaitoh #endif
    282  1.1  msaitoh 	}
    283  1.1  msaitoh 
    284  1.1  msaitoh 	if (enqueued > 0) {
    285  1.1  msaitoh 		/* Set watchdog on */
    286  1.1  msaitoh 		txr->queue_status = IXGBE_QUEUE_WORKING;
    287  1.1  msaitoh 		getmicrotime(&txr->watchdog_time);
    288  1.1  msaitoh 	}
    289  1.1  msaitoh 
    290  1.1  msaitoh 	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
    291  1.1  msaitoh 		ixgbe_txeof(txr);
    292  1.1  msaitoh 
    293  1.1  msaitoh 	return (err);
    294  1.1  msaitoh }
    295  1.1  msaitoh 
    296  1.1  msaitoh /*
    297  1.1  msaitoh  * Called from a taskqueue to drain queued transmit packets.
    298  1.1  msaitoh  */
    299  1.1  msaitoh void
    300  1.1  msaitoh ixgbe_deferred_mq_start(void *arg, int pending)
    301  1.1  msaitoh {
    302  1.1  msaitoh 	struct tx_ring *txr = arg;
    303  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    304  1.1  msaitoh 	struct ifnet *ifp = adapter->ifp;
    305  1.1  msaitoh 
    306  1.1  msaitoh 	IXGBE_TX_LOCK(txr);
    307  1.1  msaitoh 	if (!drbr_empty(ifp, txr->br))
    308  1.1  msaitoh 		ixgbe_mq_start_locked(ifp, txr);
    309  1.1  msaitoh 	IXGBE_TX_UNLOCK(txr);
    310  1.1  msaitoh }
    311  1.1  msaitoh 
    312  1.1  msaitoh /*
    313  1.1  msaitoh ** Flush all ring buffers
    314  1.1  msaitoh */
    315  1.1  msaitoh void
    316  1.1  msaitoh ixgbe_qflush(struct ifnet *ifp)
    317  1.1  msaitoh {
    318  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
    319  1.1  msaitoh 	struct tx_ring	*txr = adapter->tx_rings;
    320  1.1  msaitoh 	struct mbuf	*m;
    321  1.1  msaitoh 
    322  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    323  1.1  msaitoh 		IXGBE_TX_LOCK(txr);
    324  1.1  msaitoh 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
    325  1.1  msaitoh 			m_freem(m);
    326  1.1  msaitoh 		IXGBE_TX_UNLOCK(txr);
    327  1.1  msaitoh 	}
    328  1.1  msaitoh 	if_qflush(ifp);
    329  1.1  msaitoh }
    330  1.1  msaitoh #endif /* IXGBE_LEGACY_TX */
    331  1.1  msaitoh 
    332  1.1  msaitoh /*********************************************************************
    333  1.1  msaitoh  *
    334  1.1  msaitoh  *  This routine maps the mbufs to tx descriptors, allowing the
    335  1.1  msaitoh  *  TX engine to transmit the packets.
    336  1.1  msaitoh  *  	- return 0 on success, positive on failure
    337  1.1  msaitoh  *
    338  1.1  msaitoh  **********************************************************************/
    339  1.1  msaitoh 
    340  1.1  msaitoh static int
    341  1.1  msaitoh ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
    342  1.1  msaitoh {
    343  1.1  msaitoh 	struct m_tag *mtag;
    344  1.1  msaitoh 	struct adapter  *adapter = txr->adapter;
    345  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    346  1.1  msaitoh 	u32		olinfo_status = 0, cmd_type_len;
    347  1.1  msaitoh 	int             i, j, error;
    348  1.1  msaitoh 	int		first;
    349  1.1  msaitoh 	bus_dmamap_t	map;
    350  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    351  1.1  msaitoh 	union ixgbe_adv_tx_desc *txd = NULL;
    352  1.1  msaitoh 
    353  1.1  msaitoh 	/* Basic descriptor defines */
    354  1.1  msaitoh         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
    355  1.1  msaitoh 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
    356  1.1  msaitoh 
    357  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
    358  1.1  msaitoh         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
    359  1.1  msaitoh 
    360  1.1  msaitoh         /*
    361  1.1  msaitoh          * Important to capture the first descriptor
    362  1.1  msaitoh          * used because it will contain the index of
    363  1.1  msaitoh          * the one we tell the hardware to report back
    364  1.1  msaitoh          */
    365  1.1  msaitoh         first = txr->next_avail_desc;
    366  1.1  msaitoh 	txbuf = &txr->tx_buffers[first];
    367  1.1  msaitoh 	map = txbuf->map;
    368  1.1  msaitoh 
    369  1.1  msaitoh 	/*
    370  1.1  msaitoh 	 * Map the packet for DMA.
    371  1.1  msaitoh 	 */
    372  1.1  msaitoh 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
    373  1.1  msaitoh 	    m_head, BUS_DMA_NOWAIT);
    374  1.1  msaitoh 
    375  1.1  msaitoh 	if (__predict_false(error)) {
    376  1.1  msaitoh 
    377  1.1  msaitoh 		switch (error) {
    378  1.1  msaitoh 		case EAGAIN:
    379  1.1  msaitoh 			adapter->eagain_tx_dma_setup.ev_count++;
    380  1.1  msaitoh 			return EAGAIN;
    381  1.1  msaitoh 		case ENOMEM:
    382  1.1  msaitoh 			adapter->enomem_tx_dma_setup.ev_count++;
    383  1.1  msaitoh 			return EAGAIN;
    384  1.1  msaitoh 		case EFBIG:
    385  1.1  msaitoh 			/*
    386  1.1  msaitoh 			 * XXX Try it again?
    387  1.1  msaitoh 			 * do m_defrag() and retry bus_dmamap_load_mbuf().
    388  1.1  msaitoh 			 */
    389  1.1  msaitoh 			adapter->efbig_tx_dma_setup.ev_count++;
    390  1.1  msaitoh 			return error;
    391  1.1  msaitoh 		case EINVAL:
    392  1.1  msaitoh 			adapter->einval_tx_dma_setup.ev_count++;
    393  1.1  msaitoh 			return error;
    394  1.1  msaitoh 		default:
    395  1.1  msaitoh 			adapter->other_tx_dma_setup.ev_count++;
    396  1.1  msaitoh 			return error;
    397  1.1  msaitoh 		}
    398  1.1  msaitoh 	}
    399  1.1  msaitoh 
    400  1.1  msaitoh 	/* Make certain there are enough descriptors */
    401  1.1  msaitoh 	if (map->dm_nsegs > txr->tx_avail - 2) {
    402  1.1  msaitoh 		txr->no_desc_avail.ev_count++;
    403  1.1  msaitoh 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    404  1.1  msaitoh 		return EAGAIN;
    405  1.1  msaitoh 	}
    406  1.1  msaitoh 
    407  1.1  msaitoh 	/*
    408  1.1  msaitoh 	** Set up the appropriate offload context
    409  1.1  msaitoh 	** this will consume the first descriptor
    410  1.1  msaitoh 	*/
    411  1.1  msaitoh 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
    412  1.1  msaitoh 	if (__predict_false(error)) {
    413  1.1  msaitoh 		return (error);
    414  1.1  msaitoh 	}
    415  1.1  msaitoh 
    416  1.1  msaitoh #ifdef IXGBE_FDIR
    417  1.1  msaitoh 	/* Do the flow director magic */
    418  1.1  msaitoh 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
    419  1.1  msaitoh 		++txr->atr_count;
    420  1.1  msaitoh 		if (txr->atr_count >= atr_sample_rate) {
    421  1.1  msaitoh 			ixgbe_atr(txr, m_head);
    422  1.1  msaitoh 			txr->atr_count = 0;
    423  1.1  msaitoh 		}
    424  1.1  msaitoh 	}
    425  1.1  msaitoh #endif
    426  1.1  msaitoh 
    427  1.1  msaitoh 	i = txr->next_avail_desc;
    428  1.1  msaitoh 	for (j = 0; j < map->dm_nsegs; j++) {
    429  1.1  msaitoh 		bus_size_t seglen;
    430  1.1  msaitoh 		bus_addr_t segaddr;
    431  1.1  msaitoh 
    432  1.1  msaitoh 		txbuf = &txr->tx_buffers[i];
    433  1.1  msaitoh 		txd = &txr->tx_base[i];
    434  1.1  msaitoh 		seglen = map->dm_segs[j].ds_len;
    435  1.1  msaitoh 		segaddr = htole64(map->dm_segs[j].ds_addr);
    436  1.1  msaitoh 
    437  1.1  msaitoh 		txd->read.buffer_addr = segaddr;
    438  1.1  msaitoh 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
    439  1.1  msaitoh 		    cmd_type_len |seglen);
    440  1.1  msaitoh 		txd->read.olinfo_status = htole32(olinfo_status);
    441  1.1  msaitoh 
    442  1.1  msaitoh 		if (++i == txr->num_desc)
    443  1.1  msaitoh 			i = 0;
    444  1.1  msaitoh 	}
    445  1.1  msaitoh 
    446  1.1  msaitoh 	txd->read.cmd_type_len |=
    447  1.1  msaitoh 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
    448  1.1  msaitoh 	txr->tx_avail -= map->dm_nsegs;
    449  1.1  msaitoh 	txr->next_avail_desc = i;
    450  1.1  msaitoh 
    451  1.1  msaitoh 	txbuf->m_head = m_head;
    452  1.1  msaitoh 	/*
    453  1.1  msaitoh 	** Here we swap the map so the last descriptor,
    454  1.1  msaitoh 	** which gets the completion interrupt has the
    455  1.1  msaitoh 	** real map, and the first descriptor gets the
    456  1.1  msaitoh 	** unused map from this descriptor.
    457  1.1  msaitoh 	*/
    458  1.1  msaitoh 	txr->tx_buffers[first].map = txbuf->map;
    459  1.1  msaitoh 	txbuf->map = map;
    460  1.1  msaitoh 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
    461  1.1  msaitoh 	    BUS_DMASYNC_PREWRITE);
    462  1.1  msaitoh 
    463  1.1  msaitoh         /* Set the EOP descriptor that will be marked done */
    464  1.1  msaitoh         txbuf = &txr->tx_buffers[first];
    465  1.1  msaitoh 	txbuf->eop = txd;
    466  1.1  msaitoh 
    467  1.1  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    468  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    469  1.1  msaitoh 	/*
    470  1.1  msaitoh 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
    471  1.1  msaitoh 	 * hardware that this frame is available to transmit.
    472  1.1  msaitoh 	 */
    473  1.1  msaitoh 	++txr->total_packets.ev_count;
    474  1.1  msaitoh 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
    475  1.1  msaitoh 
    476  1.1  msaitoh 	return 0;
    477  1.1  msaitoh }
    478  1.1  msaitoh 
    479  1.1  msaitoh /*********************************************************************
    480  1.1  msaitoh  *
    481  1.1  msaitoh  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
    482  1.1  msaitoh  *  the information needed to transmit a packet on the wire. This is
    483  1.1  msaitoh  *  called only once at attach, setup is done every reset.
    484  1.1  msaitoh  *
    485  1.1  msaitoh  **********************************************************************/
    486  1.1  msaitoh int
    487  1.1  msaitoh ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
    488  1.1  msaitoh {
    489  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    490  1.1  msaitoh 	device_t dev = adapter->dev;
    491  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    492  1.1  msaitoh 	int error, i;
    493  1.1  msaitoh 
    494  1.1  msaitoh 	/*
    495  1.1  msaitoh 	 * Setup DMA descriptor areas.
    496  1.1  msaitoh 	 */
    497  1.1  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
    498  1.1  msaitoh 			       1, 0,		/* alignment, bounds */
    499  1.1  msaitoh 			       IXGBE_TSO_SIZE,		/* maxsize */
    500  1.1  msaitoh 			       adapter->num_segs,	/* nsegments */
    501  1.1  msaitoh 			       PAGE_SIZE,		/* maxsegsize */
    502  1.1  msaitoh 			       0,			/* flags */
    503  1.1  msaitoh 			       &txr->txtag))) {
    504  1.1  msaitoh 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
    505  1.1  msaitoh 		goto fail;
    506  1.1  msaitoh 	}
    507  1.1  msaitoh 
    508  1.1  msaitoh 	if (!(txr->tx_buffers =
    509  1.1  msaitoh 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
    510  1.1  msaitoh 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
    511  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
    512  1.1  msaitoh 		error = ENOMEM;
    513  1.1  msaitoh 		goto fail;
    514  1.1  msaitoh 	}
    515  1.1  msaitoh 
    516  1.1  msaitoh         /* Create the descriptor buffer dma maps */
    517  1.1  msaitoh 	txbuf = txr->tx_buffers;
    518  1.1  msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
    519  1.1  msaitoh 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
    520  1.1  msaitoh 		if (error != 0) {
    521  1.1  msaitoh 			aprint_error_dev(dev,
    522  1.1  msaitoh 			    "Unable to create TX DMA map (%d)\n", error);
    523  1.1  msaitoh 			goto fail;
    524  1.1  msaitoh 		}
    525  1.1  msaitoh 	}
    526  1.1  msaitoh 
    527  1.1  msaitoh 	return 0;
    528  1.1  msaitoh fail:
    529  1.1  msaitoh 	/* We free all, it handles case where we are in the middle */
    530  1.1  msaitoh 	ixgbe_free_transmit_structures(adapter);
    531  1.1  msaitoh 	return (error);
    532  1.1  msaitoh }
    533  1.1  msaitoh 
    534  1.1  msaitoh /*********************************************************************
    535  1.1  msaitoh  *
    536  1.1  msaitoh  *  Initialize a transmit ring.
    537  1.1  msaitoh  *
    538  1.1  msaitoh  **********************************************************************/
    539  1.1  msaitoh static void
    540  1.1  msaitoh ixgbe_setup_transmit_ring(struct tx_ring *txr)
    541  1.1  msaitoh {
    542  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    543  1.1  msaitoh 	struct ixgbe_tx_buf *txbuf;
    544  1.1  msaitoh 	int i;
    545  1.1  msaitoh #ifdef DEV_NETMAP
    546  1.1  msaitoh 	struct netmap_adapter *na = NA(adapter->ifp);
    547  1.1  msaitoh 	struct netmap_slot *slot;
    548  1.1  msaitoh #endif /* DEV_NETMAP */
    549  1.1  msaitoh 
    550  1.1  msaitoh 	/* Clear the old ring contents */
    551  1.1  msaitoh 	IXGBE_TX_LOCK(txr);
    552  1.1  msaitoh #ifdef DEV_NETMAP
    553  1.1  msaitoh 	/*
    554  1.1  msaitoh 	 * (under lock): if in netmap mode, do some consistency
    555  1.1  msaitoh 	 * checks and set slot to entry 0 of the netmap ring.
    556  1.1  msaitoh 	 */
    557  1.1  msaitoh 	slot = netmap_reset(na, NR_TX, txr->me, 0);
    558  1.1  msaitoh #endif /* DEV_NETMAP */
    559  1.1  msaitoh 	bzero((void *)txr->tx_base,
    560  1.1  msaitoh 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
    561  1.1  msaitoh 	/* Reset indices */
    562  1.1  msaitoh 	txr->next_avail_desc = 0;
    563  1.1  msaitoh 	txr->next_to_clean = 0;
    564  1.1  msaitoh 
    565  1.1  msaitoh 	/* Free any existing tx buffers. */
    566  1.1  msaitoh         txbuf = txr->tx_buffers;
    567  1.1  msaitoh 	for (i = 0; i < txr->num_desc; i++, txbuf++) {
    568  1.1  msaitoh 		if (txbuf->m_head != NULL) {
    569  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
    570  1.1  msaitoh 			    0, txbuf->m_head->m_pkthdr.len,
    571  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
    572  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
    573  1.1  msaitoh 			m_freem(txbuf->m_head);
    574  1.1  msaitoh 			txbuf->m_head = NULL;
    575  1.1  msaitoh 		}
    576  1.1  msaitoh #ifdef DEV_NETMAP
    577  1.1  msaitoh 		/*
    578  1.1  msaitoh 		 * In netmap mode, set the map for the packet buffer.
    579  1.1  msaitoh 		 * NOTE: Some drivers (not this one) also need to set
    580  1.1  msaitoh 		 * the physical buffer address in the NIC ring.
    581  1.1  msaitoh 		 * Slots in the netmap ring (indexed by "si") are
    582  1.1  msaitoh 		 * kring->nkr_hwofs positions "ahead" wrt the
    583  1.1  msaitoh 		 * corresponding slot in the NIC ring. In some drivers
    584  1.1  msaitoh 		 * (not here) nkr_hwofs can be negative. Function
    585  1.1  msaitoh 		 * netmap_idx_n2k() handles wraparounds properly.
    586  1.1  msaitoh 		 */
    587  1.1  msaitoh 		if (slot) {
    588  1.1  msaitoh 			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
    589  1.1  msaitoh 			netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
    590  1.1  msaitoh 		}
    591  1.1  msaitoh #endif /* DEV_NETMAP */
    592  1.1  msaitoh 		/* Clear the EOP descriptor pointer */
    593  1.1  msaitoh 		txbuf->eop = NULL;
    594  1.1  msaitoh         }
    595  1.1  msaitoh 
    596  1.1  msaitoh #ifdef IXGBE_FDIR
    597  1.1  msaitoh 	/* Set the rate at which we sample packets */
    598  1.1  msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82598EB)
    599  1.1  msaitoh 		txr->atr_sample = atr_sample_rate;
    600  1.1  msaitoh #endif
    601  1.1  msaitoh 
    602  1.1  msaitoh 	/* Set number of descriptors available */
    603  1.1  msaitoh 	txr->tx_avail = adapter->num_tx_desc;
    604  1.1  msaitoh 
    605  1.1  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    606  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    607  1.1  msaitoh 	IXGBE_TX_UNLOCK(txr);
    608  1.1  msaitoh }
    609  1.1  msaitoh 
    610  1.1  msaitoh /*********************************************************************
    611  1.1  msaitoh  *
    612  1.1  msaitoh  *  Initialize all transmit rings.
    613  1.1  msaitoh  *
    614  1.1  msaitoh  **********************************************************************/
    615  1.1  msaitoh int
    616  1.1  msaitoh ixgbe_setup_transmit_structures(struct adapter *adapter)
    617  1.1  msaitoh {
    618  1.1  msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    619  1.1  msaitoh 
    620  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++)
    621  1.1  msaitoh 		ixgbe_setup_transmit_ring(txr);
    622  1.1  msaitoh 
    623  1.1  msaitoh 	return (0);
    624  1.1  msaitoh }
    625  1.1  msaitoh 
    626  1.1  msaitoh /*********************************************************************
    627  1.1  msaitoh  *
    628  1.1  msaitoh  *  Free all transmit rings.
    629  1.1  msaitoh  *
    630  1.1  msaitoh  **********************************************************************/
    631  1.1  msaitoh void
    632  1.1  msaitoh ixgbe_free_transmit_structures(struct adapter *adapter)
    633  1.1  msaitoh {
    634  1.1  msaitoh 	struct tx_ring *txr = adapter->tx_rings;
    635  1.1  msaitoh 
    636  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    637  1.1  msaitoh 		ixgbe_free_transmit_buffers(txr);
    638  1.1  msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
    639  1.1  msaitoh 		IXGBE_TX_LOCK_DESTROY(txr);
    640  1.1  msaitoh 	}
    641  1.1  msaitoh 	free(adapter->tx_rings, M_DEVBUF);
    642  1.1  msaitoh }
    643  1.1  msaitoh 
    644  1.1  msaitoh /*********************************************************************
    645  1.1  msaitoh  *
    646  1.1  msaitoh  *  Free transmit ring related data structures.
    647  1.1  msaitoh  *
    648  1.1  msaitoh  **********************************************************************/
    649  1.1  msaitoh static void
    650  1.1  msaitoh ixgbe_free_transmit_buffers(struct tx_ring *txr)
    651  1.1  msaitoh {
    652  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    653  1.1  msaitoh 	struct ixgbe_tx_buf *tx_buffer;
    654  1.1  msaitoh 	int             i;
    655  1.1  msaitoh 
    656  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
    657  1.1  msaitoh 
    658  1.1  msaitoh 	if (txr->tx_buffers == NULL)
    659  1.1  msaitoh 		return;
    660  1.1  msaitoh 
    661  1.1  msaitoh 	tx_buffer = txr->tx_buffers;
    662  1.1  msaitoh 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
    663  1.1  msaitoh 		if (tx_buffer->m_head != NULL) {
    664  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
    665  1.1  msaitoh 			    0, tx_buffer->m_head->m_pkthdr.len,
    666  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
    667  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    668  1.1  msaitoh 			m_freem(tx_buffer->m_head);
    669  1.1  msaitoh 			tx_buffer->m_head = NULL;
    670  1.1  msaitoh 			if (tx_buffer->map != NULL) {
    671  1.1  msaitoh 				ixgbe_dmamap_destroy(txr->txtag,
    672  1.1  msaitoh 				    tx_buffer->map);
    673  1.1  msaitoh 				tx_buffer->map = NULL;
    674  1.1  msaitoh 			}
    675  1.1  msaitoh 		} else if (tx_buffer->map != NULL) {
    676  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
    677  1.1  msaitoh 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
    678  1.1  msaitoh 			tx_buffer->map = NULL;
    679  1.1  msaitoh 		}
    680  1.1  msaitoh 	}
    681  1.1  msaitoh #ifndef IXGBE_LEGACY_TX
    682  1.1  msaitoh 	if (txr->br != NULL)
    683  1.1  msaitoh 		buf_ring_free(txr->br, M_DEVBUF);
    684  1.1  msaitoh #endif
    685  1.1  msaitoh 	if (txr->tx_buffers != NULL) {
    686  1.1  msaitoh 		free(txr->tx_buffers, M_DEVBUF);
    687  1.1  msaitoh 		txr->tx_buffers = NULL;
    688  1.1  msaitoh 	}
    689  1.1  msaitoh 	if (txr->txtag != NULL) {
    690  1.1  msaitoh 		ixgbe_dma_tag_destroy(txr->txtag);
    691  1.1  msaitoh 		txr->txtag = NULL;
    692  1.1  msaitoh 	}
    693  1.1  msaitoh 	return;
    694  1.1  msaitoh }
    695  1.1  msaitoh 
    696  1.1  msaitoh /*********************************************************************
    697  1.1  msaitoh  *
    698  1.1  msaitoh  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
    699  1.1  msaitoh  *
    700  1.1  msaitoh  **********************************************************************/
    701  1.1  msaitoh 
    702  1.1  msaitoh static int
    703  1.1  msaitoh ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
    704  1.1  msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    705  1.1  msaitoh {
    706  1.1  msaitoh 	struct m_tag *mtag;
    707  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    708  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    709  1.1  msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    710  1.1  msaitoh 	struct ether_vlan_header *eh;
    711  1.1  msaitoh 	struct ip ip;
    712  1.1  msaitoh 	struct ip6_hdr ip6;
    713  1.1  msaitoh 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
    714  1.1  msaitoh 	int	ehdrlen, ip_hlen = 0;
    715  1.1  msaitoh 	u16	etype;
    716  1.1  msaitoh 	u8	ipproto __diagused = 0;
    717  1.1  msaitoh 	int	offload = TRUE;
    718  1.1  msaitoh 	int	ctxd = txr->next_avail_desc;
    719  1.1  msaitoh 	u16	vtag = 0;
    720  1.1  msaitoh 
    721  1.1  msaitoh 	/* First check if TSO is to be used */
    722  1.1  msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6))
    723  1.1  msaitoh 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
    724  1.1  msaitoh 
    725  1.1  msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
    726  1.1  msaitoh 		offload = FALSE;
    727  1.1  msaitoh 
    728  1.1  msaitoh 	/* Indicate the whole packet as payload when not doing TSO */
    729  1.1  msaitoh        	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
    730  1.1  msaitoh 
    731  1.1  msaitoh 	/* Now ready a context descriptor */
    732  1.1  msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
    733  1.1  msaitoh 
    734  1.1  msaitoh 	/*
    735  1.1  msaitoh 	** In advanced descriptors the vlan tag must
    736  1.1  msaitoh 	** be placed into the context descriptor. Hence
    737  1.1  msaitoh 	** we need to make one even if not doing offloads.
    738  1.1  msaitoh 	*/
    739  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
    740  1.1  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
    741  1.1  msaitoh 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    742  1.1  msaitoh 	} else if (offload == FALSE) /* ... no offload to do */
    743  1.1  msaitoh 		return 0;
    744  1.1  msaitoh 
    745  1.1  msaitoh 	/*
    746  1.1  msaitoh 	 * Determine where frame payload starts.
    747  1.1  msaitoh 	 * Jump over vlan headers if already present,
    748  1.1  msaitoh 	 * helpful for QinQ too.
    749  1.1  msaitoh 	 */
    750  1.1  msaitoh 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
    751  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    752  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    753  1.1  msaitoh 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
    754  1.1  msaitoh 		etype = ntohs(eh->evl_proto);
    755  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    756  1.1  msaitoh 	} else {
    757  1.1  msaitoh 		etype = ntohs(eh->evl_encap_proto);
    758  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
    759  1.1  msaitoh 	}
    760  1.1  msaitoh 
    761  1.1  msaitoh 	/* Set the ether header length */
    762  1.1  msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    763  1.1  msaitoh 
    764  1.1  msaitoh 	switch (etype) {
    765  1.1  msaitoh 	case ETHERTYPE_IP:
    766  1.1  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
    767  1.1  msaitoh 		ip_hlen = ip.ip_hl << 2;
    768  1.1  msaitoh 		ipproto = ip.ip_p;
    769  1.1  msaitoh #if 0
    770  1.1  msaitoh 		ip.ip_sum = 0;
    771  1.1  msaitoh 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
    772  1.1  msaitoh #else
    773  1.1  msaitoh 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
    774  1.1  msaitoh 		    ip.ip_sum == 0);
    775  1.1  msaitoh #endif
    776  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    777  1.1  msaitoh 		break;
    778  1.1  msaitoh 	case ETHERTYPE_IPV6:
    779  1.1  msaitoh 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
    780  1.1  msaitoh 		ip_hlen = sizeof(ip6);
    781  1.1  msaitoh 		/* XXX-BZ this will go badly in case of ext hdrs. */
    782  1.1  msaitoh 		ipproto = ip6.ip6_nxt;
    783  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    784  1.1  msaitoh 		break;
    785  1.1  msaitoh 	default:
    786  1.1  msaitoh 		break;
    787  1.1  msaitoh 	}
    788  1.1  msaitoh 
    789  1.1  msaitoh 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
    790  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    791  1.1  msaitoh 
    792  1.1  msaitoh 	vlan_macip_lens |= ip_hlen;
    793  1.1  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    794  1.1  msaitoh 
    795  1.1  msaitoh 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
    796  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    797  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    798  1.1  msaitoh 		KASSERT(ipproto == IPPROTO_TCP);
    799  1.1  msaitoh 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
    800  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
    801  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    802  1.1  msaitoh 		KASSERT(ipproto == IPPROTO_UDP);
    803  1.1  msaitoh 	}
    804  1.1  msaitoh 
    805  1.1  msaitoh 	/* Now copy bits into descriptor */
    806  1.1  msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    807  1.1  msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    808  1.1  msaitoh 	TXD->seqnum_seed = htole32(0);
    809  1.1  msaitoh 	TXD->mss_l4len_idx = htole32(0);
    810  1.1  msaitoh 
    811  1.1  msaitoh 	/* We've consumed the first desc, adjust counters */
    812  1.1  msaitoh 	if (++ctxd == txr->num_desc)
    813  1.1  msaitoh 		ctxd = 0;
    814  1.1  msaitoh 	txr->next_avail_desc = ctxd;
    815  1.1  msaitoh 	--txr->tx_avail;
    816  1.1  msaitoh 
    817  1.1  msaitoh         return 0;
    818  1.1  msaitoh }
    819  1.1  msaitoh 
    820  1.1  msaitoh /**********************************************************************
    821  1.1  msaitoh  *
    822  1.1  msaitoh  *  Setup work for hardware segmentation offload (TSO) on
    823  1.1  msaitoh  *  adapters using advanced tx descriptors
    824  1.1  msaitoh  *
    825  1.1  msaitoh  **********************************************************************/
    826  1.1  msaitoh static int
    827  1.1  msaitoh ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
    828  1.1  msaitoh     u32 *cmd_type_len, u32 *olinfo_status)
    829  1.1  msaitoh {
    830  1.1  msaitoh 	struct m_tag *mtag;
    831  1.1  msaitoh 	struct adapter *adapter = txr->adapter;
    832  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
    833  1.1  msaitoh 	struct ixgbe_adv_tx_context_desc *TXD;
    834  1.1  msaitoh 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
    835  1.1  msaitoh 	u32 mss_l4len_idx = 0, paylen;
    836  1.1  msaitoh 	u16 vtag = 0, eh_type;
    837  1.1  msaitoh 	int ctxd, ehdrlen, ip_hlen, tcp_hlen;
    838  1.1  msaitoh 	struct ether_vlan_header *eh;
    839  1.1  msaitoh #ifdef INET6
    840  1.1  msaitoh 	struct ip6_hdr *ip6;
    841  1.1  msaitoh #endif
    842  1.1  msaitoh #ifdef INET
    843  1.1  msaitoh 	struct ip *ip;
    844  1.1  msaitoh #endif
    845  1.1  msaitoh 	struct tcphdr *th;
    846  1.1  msaitoh 
    847  1.1  msaitoh 
    848  1.1  msaitoh 	/*
    849  1.1  msaitoh 	 * Determine where frame payload starts.
    850  1.1  msaitoh 	 * Jump over vlan headers if already present
    851  1.1  msaitoh 	 */
    852  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
    853  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
    854  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
    855  1.1  msaitoh 		eh_type = eh->evl_proto;
    856  1.1  msaitoh 	} else {
    857  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
    858  1.1  msaitoh 		eh_type = eh->evl_encap_proto;
    859  1.1  msaitoh 	}
    860  1.1  msaitoh 
    861  1.1  msaitoh 	switch (ntohs(eh_type)) {
    862  1.1  msaitoh #ifdef INET6
    863  1.1  msaitoh 	case ETHERTYPE_IPV6:
    864  1.1  msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    865  1.1  msaitoh 		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
    866  1.1  msaitoh 		if (ip6->ip6_nxt != IPPROTO_TCP)
    867  1.1  msaitoh 			return (ENXIO);
    868  1.1  msaitoh 		ip_hlen = sizeof(struct ip6_hdr);
    869  1.1  msaitoh 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
    870  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip6 + ip_hlen);
    871  1.1  msaitoh 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
    872  1.1  msaitoh 		    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
    873  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
    874  1.1  msaitoh 		break;
    875  1.1  msaitoh #endif
    876  1.1  msaitoh #ifdef INET
    877  1.1  msaitoh 	case ETHERTYPE_IP:
    878  1.1  msaitoh 		ip = (struct ip *)(mp->m_data + ehdrlen);
    879  1.1  msaitoh 		if (ip->ip_p != IPPROTO_TCP)
    880  1.1  msaitoh 			return (ENXIO);
    881  1.1  msaitoh 		ip->ip_sum = 0;
    882  1.1  msaitoh 		ip_hlen = ip->ip_hl << 2;
    883  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
    884  1.1  msaitoh 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
    885  1.1  msaitoh 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
    886  1.1  msaitoh 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
    887  1.1  msaitoh 		/* Tell transmit desc to also do IPv4 checksum. */
    888  1.1  msaitoh 		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
    889  1.1  msaitoh 		break;
    890  1.1  msaitoh #endif
    891  1.1  msaitoh 	default:
    892  1.1  msaitoh 		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
    893  1.1  msaitoh 		    __func__, ntohs(eh_type));
    894  1.1  msaitoh 		break;
    895  1.1  msaitoh 	}
    896  1.1  msaitoh 
    897  1.1  msaitoh 	ctxd = txr->next_avail_desc;
    898  1.1  msaitoh 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
    899  1.1  msaitoh 
    900  1.1  msaitoh 	tcp_hlen = th->th_off << 2;
    901  1.1  msaitoh 
    902  1.1  msaitoh 	/* This is used in the transmit desc in encap */
    903  1.1  msaitoh 	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
    904  1.1  msaitoh 
    905  1.1  msaitoh 	/* VLAN MACLEN IPLEN */
    906  1.1  msaitoh 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
    907  1.1  msaitoh 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
    908  1.1  msaitoh                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
    909  1.1  msaitoh 	}
    910  1.1  msaitoh 
    911  1.1  msaitoh 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
    912  1.1  msaitoh 	vlan_macip_lens |= ip_hlen;
    913  1.1  msaitoh 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
    914  1.1  msaitoh 
    915  1.1  msaitoh 	/* ADV DTYPE TUCMD */
    916  1.1  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
    917  1.1  msaitoh 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
    918  1.1  msaitoh 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
    919  1.1  msaitoh 
    920  1.1  msaitoh 	/* MSS L4LEN IDX */
    921  1.1  msaitoh 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
    922  1.1  msaitoh 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
    923  1.1  msaitoh 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
    924  1.1  msaitoh 
    925  1.1  msaitoh 	TXD->seqnum_seed = htole32(0);
    926  1.1  msaitoh 
    927  1.1  msaitoh 	if (++ctxd == txr->num_desc)
    928  1.1  msaitoh 		ctxd = 0;
    929  1.1  msaitoh 
    930  1.1  msaitoh 	txr->tx_avail--;
    931  1.1  msaitoh 	txr->next_avail_desc = ctxd;
    932  1.1  msaitoh 	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
    933  1.1  msaitoh 	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
    934  1.1  msaitoh 	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
    935  1.1  msaitoh 	++txr->tso_tx.ev_count;
    936  1.1  msaitoh 	return (0);
    937  1.1  msaitoh }
    938  1.1  msaitoh 
    939  1.1  msaitoh /**********************************************************************
    940  1.1  msaitoh  *
    941  1.1  msaitoh  *  Examine each tx_buffer in the used queue. If the hardware is done
    942  1.1  msaitoh  *  processing the packet then free associated resources. The
    943  1.1  msaitoh  *  tx_buffer is put back on the free queue.
    944  1.1  msaitoh  *
    945  1.1  msaitoh  **********************************************************************/
    946  1.1  msaitoh void
    947  1.1  msaitoh ixgbe_txeof(struct tx_ring *txr)
    948  1.1  msaitoh {
    949  1.1  msaitoh 	struct adapter		*adapter = txr->adapter;
    950  1.1  msaitoh 	struct ifnet		*ifp = adapter->ifp;
    951  1.1  msaitoh 	u32			work, processed = 0;
    952  1.1  msaitoh 	u16			limit = txr->process_limit;
    953  1.1  msaitoh 	struct ixgbe_tx_buf	*buf;
    954  1.1  msaitoh 	union ixgbe_adv_tx_desc *txd;
    955  1.1  msaitoh 	struct timeval now, elapsed;
    956  1.1  msaitoh 
    957  1.1  msaitoh 	KASSERT(mutex_owned(&txr->tx_mtx));
    958  1.1  msaitoh 
    959  1.1  msaitoh #ifdef DEV_NETMAP
    960  1.1  msaitoh 	if (ifp->if_capenable & IFCAP_NETMAP) {
    961  1.1  msaitoh 		struct netmap_adapter *na = NA(ifp);
    962  1.1  msaitoh 		struct netmap_kring *kring = &na->tx_rings[txr->me];
    963  1.1  msaitoh 		txd = txr->tx_base;
    964  1.1  msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    965  1.1  msaitoh 		    BUS_DMASYNC_POSTREAD);
    966  1.1  msaitoh 		/*
    967  1.1  msaitoh 		 * In netmap mode, all the work is done in the context
    968  1.1  msaitoh 		 * of the client thread. Interrupt handlers only wake up
    969  1.1  msaitoh 		 * clients, which may be sleeping on individual rings
    970  1.1  msaitoh 		 * or on a global resource for all rings.
    971  1.1  msaitoh 		 * To implement tx interrupt mitigation, we wake up the client
    972  1.1  msaitoh 		 * thread roughly every half ring, even if the NIC interrupts
    973  1.1  msaitoh 		 * more frequently. This is implemented as follows:
    974  1.1  msaitoh 		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
    975  1.1  msaitoh 		 *   the slot that should wake up the thread (nkr_num_slots
    976  1.1  msaitoh 		 *   means the user thread should not be woken up);
    977  1.1  msaitoh 		 * - the driver ignores tx interrupts unless netmap_mitigate=0
    978  1.1  msaitoh 		 *   or the slot has the DD bit set.
    979  1.1  msaitoh 		 */
    980  1.1  msaitoh 		if (!netmap_mitigate ||
    981  1.1  msaitoh 		    (kring->nr_kflags < kring->nkr_num_slots &&
    982  1.1  msaitoh 		    txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
    983  1.1  msaitoh 			netmap_tx_irq(ifp, txr->me);
    984  1.1  msaitoh 		}
    985  1.1  msaitoh 		return;
    986  1.1  msaitoh 	}
    987  1.1  msaitoh #endif /* DEV_NETMAP */
    988  1.1  msaitoh 
    989  1.1  msaitoh 	if (txr->tx_avail == txr->num_desc) {
    990  1.1  msaitoh 		txr->queue_status = IXGBE_QUEUE_IDLE;
    991  1.1  msaitoh 		return;
    992  1.1  msaitoh 	}
    993  1.1  msaitoh 
    994  1.1  msaitoh 	/* Get work starting point */
    995  1.1  msaitoh 	work = txr->next_to_clean;
    996  1.1  msaitoh 	buf = &txr->tx_buffers[work];
    997  1.1  msaitoh 	txd = &txr->tx_base[work];
    998  1.1  msaitoh 	work -= txr->num_desc; /* The distance to ring end */
    999  1.1  msaitoh         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1000  1.1  msaitoh 	    BUS_DMASYNC_POSTREAD);
   1001  1.1  msaitoh 	do {
   1002  1.1  msaitoh 		union ixgbe_adv_tx_desc *eop= buf->eop;
   1003  1.1  msaitoh 		if (eop == NULL) /* No work */
   1004  1.1  msaitoh 			break;
   1005  1.1  msaitoh 
   1006  1.1  msaitoh 		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
   1007  1.1  msaitoh 			break;	/* I/O not complete */
   1008  1.1  msaitoh 
   1009  1.1  msaitoh 		if (buf->m_head) {
   1010  1.1  msaitoh 			txr->bytes +=
   1011  1.1  msaitoh 			    buf->m_head->m_pkthdr.len;
   1012  1.1  msaitoh 			bus_dmamap_sync(txr->txtag->dt_dmat,
   1013  1.1  msaitoh 			    buf->map,
   1014  1.1  msaitoh 			    0, buf->m_head->m_pkthdr.len,
   1015  1.1  msaitoh 			    BUS_DMASYNC_POSTWRITE);
   1016  1.1  msaitoh 			ixgbe_dmamap_unload(txr->txtag,
   1017  1.1  msaitoh 			    buf->map);
   1018  1.1  msaitoh 			m_freem(buf->m_head);
   1019  1.1  msaitoh 			buf->m_head = NULL;
   1020  1.1  msaitoh 			/*
   1021  1.1  msaitoh 			 * NetBSD: Don't override buf->map with NULL here.
   1022  1.1  msaitoh 			 * It'll panic when a ring runs one lap around.
   1023  1.1  msaitoh 			 */
   1024  1.1  msaitoh 		}
   1025  1.1  msaitoh 		buf->eop = NULL;
   1026  1.1  msaitoh 		++txr->tx_avail;
   1027  1.1  msaitoh 
   1028  1.1  msaitoh 		/* We clean the range if multi segment */
   1029  1.1  msaitoh 		while (txd != eop) {
   1030  1.1  msaitoh 			++txd;
   1031  1.1  msaitoh 			++buf;
   1032  1.1  msaitoh 			++work;
   1033  1.1  msaitoh 			/* wrap the ring? */
   1034  1.1  msaitoh 			if (__predict_false(!work)) {
   1035  1.1  msaitoh 				work -= txr->num_desc;
   1036  1.1  msaitoh 				buf = txr->tx_buffers;
   1037  1.1  msaitoh 				txd = txr->tx_base;
   1038  1.1  msaitoh 			}
   1039  1.1  msaitoh 			if (buf->m_head) {
   1040  1.1  msaitoh 				txr->bytes +=
   1041  1.1  msaitoh 				    buf->m_head->m_pkthdr.len;
   1042  1.1  msaitoh 				bus_dmamap_sync(txr->txtag->dt_dmat,
   1043  1.1  msaitoh 				    buf->map,
   1044  1.1  msaitoh 				    0, buf->m_head->m_pkthdr.len,
   1045  1.1  msaitoh 				    BUS_DMASYNC_POSTWRITE);
   1046  1.1  msaitoh 				ixgbe_dmamap_unload(txr->txtag,
   1047  1.1  msaitoh 				    buf->map);
   1048  1.1  msaitoh 				m_freem(buf->m_head);
   1049  1.1  msaitoh 				buf->m_head = NULL;
   1050  1.1  msaitoh 				/*
   1051  1.1  msaitoh 				 * NetBSD: Don't override buf->map with NULL
   1052  1.1  msaitoh 				 * here. It'll panic when a ring runs one lap
   1053  1.1  msaitoh 				 * around.
   1054  1.1  msaitoh 				 */
   1055  1.1  msaitoh 			}
   1056  1.1  msaitoh 			++txr->tx_avail;
   1057  1.1  msaitoh 			buf->eop = NULL;
   1058  1.1  msaitoh 
   1059  1.1  msaitoh 		}
   1060  1.1  msaitoh 		++txr->packets;
   1061  1.1  msaitoh 		++processed;
   1062  1.1  msaitoh 		++ifp->if_opackets;
   1063  1.1  msaitoh 		getmicrotime(&txr->watchdog_time);
   1064  1.1  msaitoh 
   1065  1.1  msaitoh 		/* Try the next packet */
   1066  1.1  msaitoh 		++txd;
   1067  1.1  msaitoh 		++buf;
   1068  1.1  msaitoh 		++work;
   1069  1.1  msaitoh 		/* reset with a wrap */
   1070  1.1  msaitoh 		if (__predict_false(!work)) {
   1071  1.1  msaitoh 			work -= txr->num_desc;
   1072  1.1  msaitoh 			buf = txr->tx_buffers;
   1073  1.1  msaitoh 			txd = txr->tx_base;
   1074  1.1  msaitoh 		}
   1075  1.1  msaitoh 		prefetch(txd);
   1076  1.1  msaitoh 	} while (__predict_true(--limit));
   1077  1.1  msaitoh 
   1078  1.1  msaitoh 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
   1079  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1080  1.1  msaitoh 
   1081  1.1  msaitoh 	work += txr->num_desc;
   1082  1.1  msaitoh 	txr->next_to_clean = work;
   1083  1.1  msaitoh 
   1084  1.1  msaitoh 	/*
   1085  1.1  msaitoh 	** Watchdog calculation, we know there's
   1086  1.1  msaitoh 	** work outstanding or the first return
   1087  1.1  msaitoh 	** would have been taken, so none processed
   1088  1.1  msaitoh 	** for too long indicates a hang.
   1089  1.1  msaitoh 	*/
   1090  1.1  msaitoh 	getmicrotime(&now);
   1091  1.1  msaitoh 	timersub(&now, &txr->watchdog_time, &elapsed);
   1092  1.1  msaitoh 	if (!processed && tvtohz(&elapsed) > IXGBE_WATCHDOG)
   1093  1.1  msaitoh 		txr->queue_status = IXGBE_QUEUE_HUNG;
   1094  1.1  msaitoh 
   1095  1.1  msaitoh 	if (txr->tx_avail == txr->num_desc)
   1096  1.1  msaitoh 		txr->queue_status = IXGBE_QUEUE_IDLE;
   1097  1.1  msaitoh 
   1098  1.1  msaitoh 	return;
   1099  1.1  msaitoh }
   1100  1.1  msaitoh 
   1101  1.1  msaitoh #ifdef IXGBE_FDIR
   1102  1.1  msaitoh /*
   1103  1.1  msaitoh ** This routine parses packet headers so that Flow
   1104  1.1  msaitoh ** Director can make a hashed filter table entry
   1105  1.1  msaitoh ** allowing traffic flows to be identified and kept
   1106  1.1  msaitoh ** on the same cpu.  This would be a performance
   1107  1.1  msaitoh ** hit, but we only do it at IXGBE_FDIR_RATE of
   1108  1.1  msaitoh ** packets.
   1109  1.1  msaitoh */
   1110  1.1  msaitoh static void
   1111  1.1  msaitoh ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
   1112  1.1  msaitoh {
   1113  1.1  msaitoh 	struct adapter			*adapter = txr->adapter;
   1114  1.1  msaitoh 	struct ix_queue			*que;
   1115  1.1  msaitoh 	struct ip			*ip;
   1116  1.1  msaitoh 	struct tcphdr			*th;
   1117  1.1  msaitoh 	struct udphdr			*uh;
   1118  1.1  msaitoh 	struct ether_vlan_header	*eh;
   1119  1.1  msaitoh 	union ixgbe_atr_hash_dword	input = {.dword = 0};
   1120  1.1  msaitoh 	union ixgbe_atr_hash_dword	common = {.dword = 0};
   1121  1.1  msaitoh 	int  				ehdrlen, ip_hlen;
   1122  1.1  msaitoh 	u16				etype;
   1123  1.1  msaitoh 
   1124  1.1  msaitoh 	eh = mtod(mp, struct ether_vlan_header *);
   1125  1.1  msaitoh 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   1126  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1127  1.1  msaitoh 		etype = eh->evl_proto;
   1128  1.1  msaitoh 	} else {
   1129  1.1  msaitoh 		ehdrlen = ETHER_HDR_LEN;
   1130  1.1  msaitoh 		etype = eh->evl_encap_proto;
   1131  1.1  msaitoh 	}
   1132  1.1  msaitoh 
   1133  1.1  msaitoh 	/* Only handling IPv4 */
   1134  1.1  msaitoh 	if (etype != htons(ETHERTYPE_IP))
   1135  1.1  msaitoh 		return;
   1136  1.1  msaitoh 
   1137  1.1  msaitoh 	ip = (struct ip *)(mp->m_data + ehdrlen);
   1138  1.1  msaitoh 	ip_hlen = ip->ip_hl << 2;
   1139  1.1  msaitoh 
   1140  1.1  msaitoh 	/* check if we're UDP or TCP */
   1141  1.1  msaitoh 	switch (ip->ip_p) {
   1142  1.1  msaitoh 	case IPPROTO_TCP:
   1143  1.1  msaitoh 		th = (struct tcphdr *)((char *)ip + ip_hlen);
   1144  1.1  msaitoh 		/* src and dst are inverted */
   1145  1.1  msaitoh 		common.port.dst ^= th->th_sport;
   1146  1.1  msaitoh 		common.port.src ^= th->th_dport;
   1147  1.1  msaitoh 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
   1148  1.1  msaitoh 		break;
   1149  1.1  msaitoh 	case IPPROTO_UDP:
   1150  1.1  msaitoh 		uh = (struct udphdr *)((char *)ip + ip_hlen);
   1151  1.1  msaitoh 		/* src and dst are inverted */
   1152  1.1  msaitoh 		common.port.dst ^= uh->uh_sport;
   1153  1.1  msaitoh 		common.port.src ^= uh->uh_dport;
   1154  1.1  msaitoh 		input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
   1155  1.1  msaitoh 		break;
   1156  1.1  msaitoh 	default:
   1157  1.1  msaitoh 		return;
   1158  1.1  msaitoh 	}
   1159  1.1  msaitoh 
   1160  1.1  msaitoh 	input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
   1161  1.1  msaitoh 	if (mp->m_pkthdr.ether_vtag)
   1162  1.1  msaitoh 		common.flex_bytes ^= htons(ETHERTYPE_VLAN);
   1163  1.1  msaitoh 	else
   1164  1.1  msaitoh 		common.flex_bytes ^= etype;
   1165  1.1  msaitoh 	common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
   1166  1.1  msaitoh 
   1167  1.1  msaitoh 	que = &adapter->queues[txr->me];
   1168  1.1  msaitoh 	/*
   1169  1.1  msaitoh 	** This assumes the Rx queue and Tx
   1170  1.1  msaitoh 	** queue are bound to the same CPU
   1171  1.1  msaitoh 	*/
   1172  1.1  msaitoh 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
   1173  1.1  msaitoh 	    input, common, que->msix);
   1174  1.1  msaitoh }
   1175  1.1  msaitoh #endif /* IXGBE_FDIR */
   1176  1.1  msaitoh 
   1177  1.1  msaitoh /*
   1178  1.1  msaitoh ** Used to detect a descriptor that has
   1179  1.1  msaitoh ** been merged by Hardware RSC.
   1180  1.1  msaitoh */
   1181  1.1  msaitoh static inline u32
   1182  1.1  msaitoh ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
   1183  1.1  msaitoh {
   1184  1.1  msaitoh 	return (le32toh(rx->wb.lower.lo_dword.data) &
   1185  1.1  msaitoh 	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
   1186  1.1  msaitoh }
   1187  1.1  msaitoh 
   1188  1.1  msaitoh /*********************************************************************
   1189  1.1  msaitoh  *
   1190  1.1  msaitoh  *  Initialize Hardware RSC (LRO) feature on 82599
   1191  1.1  msaitoh  *  for an RX ring, this is toggled by the LRO capability
   1192  1.1  msaitoh  *  even though it is transparent to the stack.
   1193  1.1  msaitoh  *
   1194  1.1  msaitoh  *  NOTE: since this HW feature only works with IPV4 and
   1195  1.1  msaitoh  *        our testing has shown soft LRO to be as effective
   1196  1.1  msaitoh  *        I have decided to disable this by default.
   1197  1.1  msaitoh  *
   1198  1.1  msaitoh  **********************************************************************/
   1199  1.1  msaitoh static void
   1200  1.1  msaitoh ixgbe_setup_hw_rsc(struct rx_ring *rxr)
   1201  1.1  msaitoh {
   1202  1.1  msaitoh 	struct	adapter 	*adapter = rxr->adapter;
   1203  1.1  msaitoh 	struct	ixgbe_hw	*hw = &adapter->hw;
   1204  1.1  msaitoh 	u32			rscctrl, rdrxctl;
   1205  1.1  msaitoh 
   1206  1.1  msaitoh 	/* If turning LRO/RSC off we need to disable it */
   1207  1.1  msaitoh 	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
   1208  1.1  msaitoh 		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1209  1.1  msaitoh 		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
   1210  1.1  msaitoh 		return;
   1211  1.1  msaitoh 	}
   1212  1.1  msaitoh 
   1213  1.1  msaitoh 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
   1214  1.1  msaitoh 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
   1215  1.1  msaitoh #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
   1216  1.1  msaitoh 	if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
   1217  1.1  msaitoh #endif /* DEV_NETMAP */
   1218  1.1  msaitoh 	rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
   1219  1.1  msaitoh 	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
   1220  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
   1221  1.1  msaitoh 
   1222  1.1  msaitoh 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
   1223  1.1  msaitoh 	rscctrl |= IXGBE_RSCCTL_RSCEN;
   1224  1.1  msaitoh 	/*
   1225  1.1  msaitoh 	** Limit the total number of descriptors that
   1226  1.1  msaitoh 	** can be combined, so it does not exceed 64K
   1227  1.1  msaitoh 	*/
   1228  1.1  msaitoh 	if (rxr->mbuf_sz == MCLBYTES)
   1229  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
   1230  1.1  msaitoh 	else if (rxr->mbuf_sz == MJUMPAGESIZE)
   1231  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
   1232  1.1  msaitoh 	else if (rxr->mbuf_sz == MJUM9BYTES)
   1233  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
   1234  1.1  msaitoh 	else  /* Using 16K cluster */
   1235  1.1  msaitoh 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
   1236  1.1  msaitoh 
   1237  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
   1238  1.1  msaitoh 
   1239  1.1  msaitoh 	/* Enable TCP header recognition */
   1240  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
   1241  1.1  msaitoh 	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
   1242  1.1  msaitoh 	    IXGBE_PSRTYPE_TCPHDR));
   1243  1.1  msaitoh 
   1244  1.1  msaitoh 	/* Disable RSC for ACK packets */
   1245  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
   1246  1.1  msaitoh 	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
   1247  1.1  msaitoh 
   1248  1.1  msaitoh 	rxr->hw_rsc = TRUE;
   1249  1.1  msaitoh }
   1250  1.1  msaitoh /*********************************************************************
   1251  1.1  msaitoh  *
   1252  1.1  msaitoh  *  Refresh mbuf buffers for RX descriptor rings
   1253  1.1  msaitoh  *   - now keeps its own state so discards due to resource
   1254  1.1  msaitoh  *     exhaustion are unnecessary, if an mbuf cannot be obtained
   1255  1.1  msaitoh  *     it just returns, keeping its placeholder, thus it can simply
   1256  1.1  msaitoh  *     be recalled to try again.
   1257  1.1  msaitoh  *
   1258  1.1  msaitoh  **********************************************************************/
   1259  1.1  msaitoh static void
   1260  1.1  msaitoh ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
   1261  1.1  msaitoh {
   1262  1.1  msaitoh 	struct adapter		*adapter = rxr->adapter;
   1263  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1264  1.1  msaitoh 	struct mbuf		*mp;
   1265  1.1  msaitoh 	int			i, j, error;
   1266  1.1  msaitoh 	bool			refreshed = false;
   1267  1.1  msaitoh 
   1268  1.1  msaitoh 	i = j = rxr->next_to_refresh;
   1269  1.1  msaitoh 	/* Control the loop with one beyond */
   1270  1.1  msaitoh 	if (++j == rxr->num_desc)
   1271  1.1  msaitoh 		j = 0;
   1272  1.1  msaitoh 
   1273  1.1  msaitoh 	while (j != limit) {
   1274  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1275  1.1  msaitoh 		if (rxbuf->buf == NULL) {
   1276  1.1  msaitoh 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1277  1.1  msaitoh 			    MT_DATA, M_PKTHDR, rxr->mbuf_sz);
   1278  1.1  msaitoh 			if (mp == NULL) {
   1279  1.1  msaitoh 				rxr->no_jmbuf.ev_count++;
   1280  1.1  msaitoh 				goto update;
   1281  1.1  msaitoh 			}
   1282  1.1  msaitoh 			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
   1283  1.1  msaitoh 				m_adj(mp, ETHER_ALIGN);
   1284  1.1  msaitoh 		} else
   1285  1.1  msaitoh 			mp = rxbuf->buf;
   1286  1.1  msaitoh 
   1287  1.1  msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1288  1.1  msaitoh 
   1289  1.1  msaitoh 		/* If we're dealing with an mbuf that was copied rather
   1290  1.1  msaitoh 		 * than replaced, there's no need to go through busdma.
   1291  1.1  msaitoh 		 */
   1292  1.1  msaitoh 		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
   1293  1.1  msaitoh 			/* Get the memory mapping */
   1294  1.1  msaitoh 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1295  1.1  msaitoh 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1296  1.1  msaitoh 			if (error != 0) {
   1297  1.1  msaitoh 				printf("Refresh mbufs: payload dmamap load"
   1298  1.1  msaitoh 				    " failure - %d\n", error);
   1299  1.1  msaitoh 				m_free(mp);
   1300  1.1  msaitoh 				rxbuf->buf = NULL;
   1301  1.1  msaitoh 				goto update;
   1302  1.1  msaitoh 			}
   1303  1.1  msaitoh 			rxbuf->buf = mp;
   1304  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1305  1.1  msaitoh 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
   1306  1.1  msaitoh 			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
   1307  1.1  msaitoh 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1308  1.1  msaitoh 		} else {
   1309  1.1  msaitoh 			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
   1310  1.1  msaitoh 			rxbuf->flags &= ~IXGBE_RX_COPY;
   1311  1.1  msaitoh 		}
   1312  1.1  msaitoh 
   1313  1.1  msaitoh 		refreshed = true;
   1314  1.1  msaitoh 		/* Next is precalculated */
   1315  1.1  msaitoh 		i = j;
   1316  1.1  msaitoh 		rxr->next_to_refresh = i;
   1317  1.1  msaitoh 		if (++j == rxr->num_desc)
   1318  1.1  msaitoh 			j = 0;
   1319  1.1  msaitoh 	}
   1320  1.1  msaitoh update:
   1321  1.1  msaitoh 	if (refreshed) /* Update hardware tail index */
   1322  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw,
   1323  1.1  msaitoh 		    IXGBE_RDT(rxr->me), rxr->next_to_refresh);
   1324  1.1  msaitoh 	return;
   1325  1.1  msaitoh }
   1326  1.1  msaitoh 
   1327  1.1  msaitoh /*********************************************************************
   1328  1.1  msaitoh  *
   1329  1.1  msaitoh  *  Allocate memory for rx_buffer structures. Since we use one
   1330  1.1  msaitoh  *  rx_buffer per received packet, the maximum number of rx_buffer's
   1331  1.1  msaitoh  *  that we'll need is equal to the number of receive descriptors
   1332  1.1  msaitoh  *  that we've allocated.
   1333  1.1  msaitoh  *
   1334  1.1  msaitoh  **********************************************************************/
   1335  1.1  msaitoh int
   1336  1.1  msaitoh ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
   1337  1.1  msaitoh {
   1338  1.1  msaitoh 	struct	adapter 	*adapter = rxr->adapter;
   1339  1.1  msaitoh 	device_t 		dev = adapter->dev;
   1340  1.1  msaitoh 	struct ixgbe_rx_buf 	*rxbuf;
   1341  1.1  msaitoh 	int             	i, bsize, error;
   1342  1.1  msaitoh 
   1343  1.1  msaitoh 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
   1344  1.1  msaitoh 	if (!(rxr->rx_buffers =
   1345  1.1  msaitoh 	    (struct ixgbe_rx_buf *) malloc(bsize,
   1346  1.1  msaitoh 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
   1347  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
   1348  1.1  msaitoh 		error = ENOMEM;
   1349  1.1  msaitoh 		goto fail;
   1350  1.1  msaitoh 	}
   1351  1.1  msaitoh 
   1352  1.1  msaitoh 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   1353  1.1  msaitoh 				   1, 0,	/* alignment, bounds */
   1354  1.1  msaitoh 				   MJUM16BYTES,		/* maxsize */
   1355  1.1  msaitoh 				   1,			/* nsegments */
   1356  1.1  msaitoh 				   MJUM16BYTES,		/* maxsegsize */
   1357  1.1  msaitoh 				   0,			/* flags */
   1358  1.1  msaitoh 				   &rxr->ptag))) {
   1359  1.1  msaitoh 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
   1360  1.1  msaitoh 		goto fail;
   1361  1.1  msaitoh 	}
   1362  1.1  msaitoh 
   1363  1.1  msaitoh 	for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
   1364  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1365  1.1  msaitoh 		error = ixgbe_dmamap_create(rxr->ptag,
   1366  1.1  msaitoh 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
   1367  1.1  msaitoh 		if (error) {
   1368  1.1  msaitoh 			aprint_error_dev(dev, "Unable to create RX dma map\n");
   1369  1.1  msaitoh 			goto fail;
   1370  1.1  msaitoh 		}
   1371  1.1  msaitoh 	}
   1372  1.1  msaitoh 
   1373  1.1  msaitoh 	return (0);
   1374  1.1  msaitoh 
   1375  1.1  msaitoh fail:
   1376  1.1  msaitoh 	/* Frees all, but can handle partial completion */
   1377  1.1  msaitoh 	ixgbe_free_receive_structures(adapter);
   1378  1.1  msaitoh 	return (error);
   1379  1.1  msaitoh }
   1380  1.1  msaitoh 
   1381  1.1  msaitoh static void
   1382  1.1  msaitoh ixgbe_free_receive_ring(struct rx_ring *rxr)
   1383  1.1  msaitoh {
   1384  1.1  msaitoh 	struct ixgbe_rx_buf       *rxbuf;
   1385  1.1  msaitoh 	int i;
   1386  1.1  msaitoh 
   1387  1.1  msaitoh 	for (i = 0; i < rxr->num_desc; i++) {
   1388  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[i];
   1389  1.1  msaitoh 		if (rxbuf->buf != NULL) {
   1390  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1391  1.1  msaitoh 			    0, rxbuf->buf->m_pkthdr.len,
   1392  1.1  msaitoh 			    BUS_DMASYNC_POSTREAD);
   1393  1.1  msaitoh 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1394  1.1  msaitoh 			rxbuf->buf->m_flags |= M_PKTHDR;
   1395  1.1  msaitoh 			m_freem(rxbuf->buf);
   1396  1.1  msaitoh 			rxbuf->buf = NULL;
   1397  1.1  msaitoh 			rxbuf->flags = 0;
   1398  1.1  msaitoh 		}
   1399  1.1  msaitoh 	}
   1400  1.1  msaitoh }
   1401  1.1  msaitoh 
   1402  1.1  msaitoh 
   1403  1.1  msaitoh /*********************************************************************
   1404  1.1  msaitoh  *
   1405  1.1  msaitoh  *  Initialize a receive ring and its buffers.
   1406  1.1  msaitoh  *
   1407  1.1  msaitoh  **********************************************************************/
   1408  1.1  msaitoh static int
   1409  1.1  msaitoh ixgbe_setup_receive_ring(struct rx_ring *rxr)
   1410  1.1  msaitoh {
   1411  1.1  msaitoh 	struct	adapter 	*adapter;
   1412  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1413  1.1  msaitoh #ifdef LRO
   1414  1.1  msaitoh 	struct ifnet		*ifp;
   1415  1.1  msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1416  1.1  msaitoh #endif /* LRO */
   1417  1.1  msaitoh 	int			rsize, error = 0;
   1418  1.1  msaitoh #ifdef DEV_NETMAP
   1419  1.1  msaitoh 	struct netmap_adapter *na = NA(rxr->adapter->ifp);
   1420  1.1  msaitoh 	struct netmap_slot *slot;
   1421  1.1  msaitoh #endif /* DEV_NETMAP */
   1422  1.1  msaitoh 
   1423  1.1  msaitoh 	adapter = rxr->adapter;
   1424  1.1  msaitoh #ifdef LRO
   1425  1.1  msaitoh 	ifp = adapter->ifp;
   1426  1.1  msaitoh #endif /* LRO */
   1427  1.1  msaitoh 
   1428  1.1  msaitoh 	/* Clear the ring contents */
   1429  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1430  1.1  msaitoh #ifdef DEV_NETMAP
   1431  1.1  msaitoh 	/* same as in ixgbe_setup_transmit_ring() */
   1432  1.1  msaitoh 	slot = netmap_reset(na, NR_RX, rxr->me, 0);
   1433  1.1  msaitoh #endif /* DEV_NETMAP */
   1434  1.1  msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   1435  1.1  msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   1436  1.1  msaitoh 	bzero((void *)rxr->rx_base, rsize);
   1437  1.1  msaitoh 	/* Cache the size */
   1438  1.1  msaitoh 	rxr->mbuf_sz = adapter->rx_mbuf_sz;
   1439  1.1  msaitoh 
   1440  1.1  msaitoh 	/* Free current RX buffer structs and their mbufs */
   1441  1.1  msaitoh 	ixgbe_free_receive_ring(rxr);
   1442  1.1  msaitoh 
   1443  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1444  1.1  msaitoh 
   1445  1.1  msaitoh 	/* Now reinitialize our supply of jumbo mbufs.  The number
   1446  1.1  msaitoh 	 * or size of jumbo mbufs may have changed.
   1447  1.1  msaitoh 	 */
   1448  1.1  msaitoh 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
   1449  1.1  msaitoh 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
   1450  1.1  msaitoh 
   1451  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1452  1.1  msaitoh 
   1453  1.1  msaitoh 	/* Now replenish the mbufs */
   1454  1.1  msaitoh 	for (int j = 0; j != rxr->num_desc; ++j) {
   1455  1.1  msaitoh 		struct mbuf	*mp;
   1456  1.1  msaitoh 
   1457  1.1  msaitoh 		rxbuf = &rxr->rx_buffers[j];
   1458  1.1  msaitoh #ifdef DEV_NETMAP
   1459  1.1  msaitoh 		/*
   1460  1.1  msaitoh 		 * In netmap mode, fill the map and set the buffer
   1461  1.1  msaitoh 		 * address in the NIC ring, considering the offset
   1462  1.1  msaitoh 		 * between the netmap and NIC rings (see comment in
   1463  1.1  msaitoh 		 * ixgbe_setup_transmit_ring() ). No need to allocate
   1464  1.1  msaitoh 		 * an mbuf, so end the block with a continue;
   1465  1.1  msaitoh 		 */
   1466  1.1  msaitoh 		if (slot) {
   1467  1.1  msaitoh 			int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
   1468  1.1  msaitoh 			uint64_t paddr;
   1469  1.1  msaitoh 			void *addr;
   1470  1.1  msaitoh 
   1471  1.1  msaitoh 			addr = PNMB(na, slot + sj, &paddr);
   1472  1.1  msaitoh 			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
   1473  1.1  msaitoh 			/* Update descriptor and the cached value */
   1474  1.1  msaitoh 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
   1475  1.1  msaitoh 			rxbuf->addr = htole64(paddr);
   1476  1.1  msaitoh 			continue;
   1477  1.1  msaitoh 		}
   1478  1.1  msaitoh #endif /* DEV_NETMAP */
   1479  1.1  msaitoh 		rxbuf->flags = 0;
   1480  1.1  msaitoh 		rxbuf->buf = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
   1481  1.1  msaitoh 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
   1482  1.1  msaitoh 		if (rxbuf->buf == NULL) {
   1483  1.1  msaitoh 			error = ENOBUFS;
   1484  1.1  msaitoh                         goto fail;
   1485  1.1  msaitoh 		}
   1486  1.1  msaitoh 		mp = rxbuf->buf;
   1487  1.1  msaitoh 		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
   1488  1.1  msaitoh 		/* Get the memory mapping */
   1489  1.1  msaitoh 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
   1490  1.1  msaitoh 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
   1491  1.1  msaitoh 		if (error != 0)
   1492  1.1  msaitoh                         goto fail;
   1493  1.1  msaitoh 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
   1494  1.1  msaitoh 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
   1495  1.1  msaitoh 		/* Update the descriptor and the cached value */
   1496  1.1  msaitoh 		rxr->rx_base[j].read.pkt_addr =
   1497  1.1  msaitoh 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1498  1.1  msaitoh 		rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
   1499  1.1  msaitoh 	}
   1500  1.1  msaitoh 
   1501  1.1  msaitoh 
   1502  1.1  msaitoh 	/* Setup our descriptor indices */
   1503  1.1  msaitoh 	rxr->next_to_check = 0;
   1504  1.1  msaitoh 	rxr->next_to_refresh = 0;
   1505  1.1  msaitoh 	rxr->lro_enabled = FALSE;
   1506  1.1  msaitoh 	rxr->rx_copies.ev_count = 0;
   1507  1.1  msaitoh 	rxr->rx_bytes.ev_count = 0;
   1508  1.1  msaitoh 	rxr->vtag_strip = FALSE;
   1509  1.1  msaitoh 
   1510  1.1  msaitoh 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1511  1.1  msaitoh 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1512  1.1  msaitoh 
   1513  1.1  msaitoh 	/*
   1514  1.1  msaitoh 	** Now set up the LRO interface:
   1515  1.1  msaitoh 	*/
   1516  1.1  msaitoh 	if (ixgbe_rsc_enable)
   1517  1.1  msaitoh 		ixgbe_setup_hw_rsc(rxr);
   1518  1.1  msaitoh #ifdef LRO
   1519  1.1  msaitoh 	else if (ifp->if_capenable & IFCAP_LRO) {
   1520  1.1  msaitoh 		device_t dev = adapter->dev;
   1521  1.1  msaitoh 		int err = tcp_lro_init(lro);
   1522  1.1  msaitoh 		if (err) {
   1523  1.1  msaitoh 			device_printf(dev, "LRO Initialization failed!\n");
   1524  1.1  msaitoh 			goto fail;
   1525  1.1  msaitoh 		}
   1526  1.1  msaitoh 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
   1527  1.1  msaitoh 		rxr->lro_enabled = TRUE;
   1528  1.1  msaitoh 		lro->ifp = adapter->ifp;
   1529  1.1  msaitoh 	}
   1530  1.1  msaitoh #endif /* LRO */
   1531  1.1  msaitoh 
   1532  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1533  1.1  msaitoh 	return (0);
   1534  1.1  msaitoh 
   1535  1.1  msaitoh fail:
   1536  1.1  msaitoh 	ixgbe_free_receive_ring(rxr);
   1537  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1538  1.1  msaitoh 	return (error);
   1539  1.1  msaitoh }
   1540  1.1  msaitoh 
   1541  1.1  msaitoh /*********************************************************************
   1542  1.1  msaitoh  *
   1543  1.1  msaitoh  *  Initialize all receive rings.
   1544  1.1  msaitoh  *
   1545  1.1  msaitoh  **********************************************************************/
   1546  1.1  msaitoh int
   1547  1.1  msaitoh ixgbe_setup_receive_structures(struct adapter *adapter)
   1548  1.1  msaitoh {
   1549  1.1  msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1550  1.1  msaitoh 	int j;
   1551  1.1  msaitoh 
   1552  1.1  msaitoh 	for (j = 0; j < adapter->num_queues; j++, rxr++)
   1553  1.1  msaitoh 		if (ixgbe_setup_receive_ring(rxr))
   1554  1.1  msaitoh 			goto fail;
   1555  1.1  msaitoh 
   1556  1.1  msaitoh 	return (0);
   1557  1.1  msaitoh fail:
   1558  1.1  msaitoh 	/*
   1559  1.1  msaitoh 	 * Free RX buffers allocated so far, we will only handle
   1560  1.1  msaitoh 	 * the rings that completed, the failing case will have
   1561  1.1  msaitoh 	 * cleaned up for itself. 'j' failed, so its the terminus.
   1562  1.1  msaitoh 	 */
   1563  1.1  msaitoh 	for (int i = 0; i < j; ++i) {
   1564  1.1  msaitoh 		rxr = &adapter->rx_rings[i];
   1565  1.1  msaitoh 		ixgbe_free_receive_ring(rxr);
   1566  1.1  msaitoh 	}
   1567  1.1  msaitoh 
   1568  1.1  msaitoh 	return (ENOBUFS);
   1569  1.1  msaitoh }
   1570  1.1  msaitoh 
   1571  1.1  msaitoh /*********************************************************************
   1572  1.1  msaitoh  *
   1573  1.1  msaitoh  *  Free all receive rings.
   1574  1.1  msaitoh  *
   1575  1.1  msaitoh  **********************************************************************/
   1576  1.1  msaitoh void
   1577  1.1  msaitoh ixgbe_free_receive_structures(struct adapter *adapter)
   1578  1.1  msaitoh {
   1579  1.1  msaitoh 	struct rx_ring *rxr = adapter->rx_rings;
   1580  1.1  msaitoh 
   1581  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
   1582  1.1  msaitoh 
   1583  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1584  1.1  msaitoh #ifdef LRO
   1585  1.1  msaitoh 		struct lro_ctrl		*lro = &rxr->lro;
   1586  1.1  msaitoh #endif /* LRO */
   1587  1.1  msaitoh 		ixgbe_free_receive_buffers(rxr);
   1588  1.1  msaitoh #ifdef LRO
   1589  1.1  msaitoh 		/* Free LRO memory */
   1590  1.1  msaitoh 		tcp_lro_free(lro);
   1591  1.1  msaitoh #endif /* LRO */
   1592  1.1  msaitoh 		/* Free the ring memory as well */
   1593  1.1  msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   1594  1.1  msaitoh 		IXGBE_RX_LOCK_DESTROY(rxr);
   1595  1.1  msaitoh 	}
   1596  1.1  msaitoh 
   1597  1.1  msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   1598  1.1  msaitoh }
   1599  1.1  msaitoh 
   1600  1.1  msaitoh 
   1601  1.1  msaitoh /*********************************************************************
   1602  1.1  msaitoh  *
   1603  1.1  msaitoh  *  Free receive ring data structures
   1604  1.1  msaitoh  *
   1605  1.1  msaitoh  **********************************************************************/
   1606  1.1  msaitoh static void
   1607  1.1  msaitoh ixgbe_free_receive_buffers(struct rx_ring *rxr)
   1608  1.1  msaitoh {
   1609  1.1  msaitoh 	struct adapter		*adapter = rxr->adapter;
   1610  1.1  msaitoh 	struct ixgbe_rx_buf	*rxbuf;
   1611  1.1  msaitoh 
   1612  1.1  msaitoh 	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
   1613  1.1  msaitoh 
   1614  1.1  msaitoh 	/* Cleanup any existing buffers */
   1615  1.1  msaitoh 	if (rxr->rx_buffers != NULL) {
   1616  1.1  msaitoh 		for (int i = 0; i < adapter->num_rx_desc; i++) {
   1617  1.1  msaitoh 			rxbuf = &rxr->rx_buffers[i];
   1618  1.1  msaitoh 			if (rxbuf->buf != NULL) {
   1619  1.1  msaitoh 				bus_dmamap_sync(rxr->ptag->dt_dmat,
   1620  1.1  msaitoh 				    rxbuf->pmap, 0, rxbuf->buf->m_pkthdr.len,
   1621  1.1  msaitoh 				    BUS_DMASYNC_POSTREAD);
   1622  1.1  msaitoh 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
   1623  1.1  msaitoh 				rxbuf->buf->m_flags |= M_PKTHDR;
   1624  1.1  msaitoh 				m_freem(rxbuf->buf);
   1625  1.1  msaitoh 			}
   1626  1.1  msaitoh 			rxbuf->buf = NULL;
   1627  1.1  msaitoh 			if (rxbuf->pmap != NULL) {
   1628  1.1  msaitoh 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
   1629  1.1  msaitoh 				rxbuf->pmap = NULL;
   1630  1.1  msaitoh 			}
   1631  1.1  msaitoh 		}
   1632  1.1  msaitoh 		if (rxr->rx_buffers != NULL) {
   1633  1.1  msaitoh 			free(rxr->rx_buffers, M_DEVBUF);
   1634  1.1  msaitoh 			rxr->rx_buffers = NULL;
   1635  1.1  msaitoh 		}
   1636  1.1  msaitoh 	}
   1637  1.1  msaitoh 
   1638  1.1  msaitoh 	if (rxr->ptag != NULL) {
   1639  1.1  msaitoh 		ixgbe_dma_tag_destroy(rxr->ptag);
   1640  1.1  msaitoh 		rxr->ptag = NULL;
   1641  1.1  msaitoh 	}
   1642  1.1  msaitoh 
   1643  1.1  msaitoh 	return;
   1644  1.1  msaitoh }
   1645  1.1  msaitoh 
   1646  1.1  msaitoh static __inline void
   1647  1.1  msaitoh ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
   1648  1.1  msaitoh {
   1649  1.1  msaitoh 	int s;
   1650  1.1  msaitoh 
   1651  1.1  msaitoh #ifdef LRO
   1652  1.1  msaitoh 	struct adapter	*adapter = ifp->if_softc;
   1653  1.1  msaitoh 	struct ethercom *ec = &adapter->osdep.ec;
   1654  1.1  msaitoh 
   1655  1.1  msaitoh         /*
   1656  1.1  msaitoh          * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
   1657  1.1  msaitoh          * should be computed by hardware. Also it should not have VLAN tag in
   1658  1.1  msaitoh          * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
   1659  1.1  msaitoh          */
   1660  1.1  msaitoh         if (rxr->lro_enabled &&
   1661  1.1  msaitoh             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
   1662  1.1  msaitoh             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   1663  1.1  msaitoh             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1664  1.1  msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
   1665  1.1  msaitoh             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
   1666  1.1  msaitoh             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
   1667  1.1  msaitoh             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
   1668  1.1  msaitoh             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
   1669  1.1  msaitoh                 /*
   1670  1.1  msaitoh                  * Send to the stack if:
   1671  1.1  msaitoh                  **  - LRO not enabled, or
   1672  1.1  msaitoh                  **  - no LRO resources, or
   1673  1.1  msaitoh                  **  - lro enqueue fails
   1674  1.1  msaitoh                  */
   1675  1.1  msaitoh                 if (rxr->lro.lro_cnt != 0)
   1676  1.1  msaitoh                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
   1677  1.1  msaitoh                                 return;
   1678  1.1  msaitoh         }
   1679  1.1  msaitoh #endif /* LRO */
   1680  1.1  msaitoh 
   1681  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1682  1.1  msaitoh 
   1683  1.1  msaitoh 	s = splnet();
   1684  1.1  msaitoh 	/* Pass this up to any BPF listeners. */
   1685  1.1  msaitoh 	bpf_mtap(ifp, m);
   1686  1.1  msaitoh 	if_input(ifp, m);
   1687  1.1  msaitoh 	splx(s);
   1688  1.1  msaitoh 
   1689  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1690  1.1  msaitoh }
   1691  1.1  msaitoh 
   1692  1.1  msaitoh static __inline void
   1693  1.1  msaitoh ixgbe_rx_discard(struct rx_ring *rxr, int i)
   1694  1.1  msaitoh {
   1695  1.1  msaitoh 	struct ixgbe_rx_buf	*rbuf;
   1696  1.1  msaitoh 
   1697  1.1  msaitoh 	rbuf = &rxr->rx_buffers[i];
   1698  1.1  msaitoh 
   1699  1.1  msaitoh 
   1700  1.1  msaitoh 	/*
   1701  1.1  msaitoh 	** With advanced descriptors the writeback
   1702  1.1  msaitoh 	** clobbers the buffer addrs, so its easier
   1703  1.1  msaitoh 	** to just free the existing mbufs and take
   1704  1.1  msaitoh 	** the normal refresh path to get new buffers
   1705  1.1  msaitoh 	** and mapping.
   1706  1.1  msaitoh 	*/
   1707  1.1  msaitoh 
   1708  1.1  msaitoh 	if (rbuf->buf != NULL) {/* Partial chain ? */
   1709  1.1  msaitoh 		rbuf->fmp->m_flags |= M_PKTHDR;
   1710  1.1  msaitoh 		m_freem(rbuf->fmp);
   1711  1.1  msaitoh 		rbuf->fmp = NULL;
   1712  1.1  msaitoh 		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
   1713  1.1  msaitoh 	} else if (rbuf->buf) {
   1714  1.1  msaitoh 		m_free(rbuf->buf);
   1715  1.1  msaitoh 		rbuf->buf = NULL;
   1716  1.1  msaitoh 	}
   1717  1.1  msaitoh 
   1718  1.1  msaitoh 	rbuf->flags = 0;
   1719  1.1  msaitoh 
   1720  1.1  msaitoh 	return;
   1721  1.1  msaitoh }
   1722  1.1  msaitoh 
   1723  1.1  msaitoh 
   1724  1.1  msaitoh /*********************************************************************
   1725  1.1  msaitoh  *
   1726  1.1  msaitoh  *  This routine executes in interrupt context. It replenishes
   1727  1.1  msaitoh  *  the mbufs in the descriptor and sends data which has been
   1728  1.1  msaitoh  *  dma'ed into host memory to upper layer.
   1729  1.1  msaitoh  *
   1730  1.1  msaitoh  *  We loop at most count times if count is > 0, or until done if
   1731  1.1  msaitoh  *  count < 0.
   1732  1.1  msaitoh  *
   1733  1.1  msaitoh  *  Return TRUE for more work, FALSE for all clean.
   1734  1.1  msaitoh  *********************************************************************/
   1735  1.1  msaitoh bool
   1736  1.1  msaitoh ixgbe_rxeof(struct ix_queue *que)
   1737  1.1  msaitoh {
   1738  1.1  msaitoh 	struct adapter		*adapter = que->adapter;
   1739  1.1  msaitoh 	struct rx_ring		*rxr = que->rxr;
   1740  1.1  msaitoh 	struct ifnet		*ifp = adapter->ifp;
   1741  1.1  msaitoh #ifdef LRO
   1742  1.1  msaitoh 	struct lro_ctrl		*lro = &rxr->lro;
   1743  1.1  msaitoh 	struct lro_entry	*queued;
   1744  1.1  msaitoh #endif /* LRO */
   1745  1.1  msaitoh 	int			i, nextp, processed = 0;
   1746  1.1  msaitoh 	u32			staterr = 0;
   1747  1.1  msaitoh 	u16			count = rxr->process_limit;
   1748  1.1  msaitoh 	union ixgbe_adv_rx_desc	*cur;
   1749  1.1  msaitoh 	struct ixgbe_rx_buf	*rbuf, *nbuf;
   1750  1.1  msaitoh #ifdef RSS
   1751  1.1  msaitoh 	u16			pkt_info;
   1752  1.1  msaitoh #endif
   1753  1.1  msaitoh 
   1754  1.1  msaitoh 	IXGBE_RX_LOCK(rxr);
   1755  1.1  msaitoh 
   1756  1.1  msaitoh #ifdef DEV_NETMAP
   1757  1.1  msaitoh 	/* Same as the txeof routine: wakeup clients on intr. */
   1758  1.1  msaitoh 	if (netmap_rx_irq(ifp, rxr->me, &processed)) {
   1759  1.1  msaitoh 		IXGBE_RX_UNLOCK(rxr);
   1760  1.1  msaitoh 		return (FALSE);
   1761  1.1  msaitoh 	}
   1762  1.1  msaitoh #endif /* DEV_NETMAP */
   1763  1.1  msaitoh 
   1764  1.1  msaitoh 	for (i = rxr->next_to_check; count != 0;) {
   1765  1.1  msaitoh 		struct mbuf	*sendmp, *mp;
   1766  1.1  msaitoh 		u32		rsc, ptype;
   1767  1.1  msaitoh 		u16		len;
   1768  1.1  msaitoh 		u16		vtag = 0;
   1769  1.1  msaitoh 		bool		eop;
   1770  1.1  msaitoh 
   1771  1.1  msaitoh 		/* Sync the ring. */
   1772  1.1  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1773  1.1  msaitoh 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1774  1.1  msaitoh 
   1775  1.1  msaitoh 		cur = &rxr->rx_base[i];
   1776  1.1  msaitoh 		staterr = le32toh(cur->wb.upper.status_error);
   1777  1.1  msaitoh #ifdef RSS
   1778  1.1  msaitoh 		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
   1779  1.1  msaitoh #endif
   1780  1.1  msaitoh 
   1781  1.1  msaitoh 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
   1782  1.1  msaitoh 			break;
   1783  1.1  msaitoh 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   1784  1.1  msaitoh 			break;
   1785  1.1  msaitoh 
   1786  1.1  msaitoh 		count--;
   1787  1.1  msaitoh 		sendmp = NULL;
   1788  1.1  msaitoh 		nbuf = NULL;
   1789  1.1  msaitoh 		rsc = 0;
   1790  1.1  msaitoh 		cur->wb.upper.status_error = 0;
   1791  1.1  msaitoh 		rbuf = &rxr->rx_buffers[i];
   1792  1.1  msaitoh 		mp = rbuf->buf;
   1793  1.1  msaitoh 
   1794  1.1  msaitoh 		len = le16toh(cur->wb.upper.length);
   1795  1.1  msaitoh 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
   1796  1.1  msaitoh 		    IXGBE_RXDADV_PKTTYPE_MASK;
   1797  1.1  msaitoh 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
   1798  1.1  msaitoh 
   1799  1.1  msaitoh 		/* Make sure bad packets are discarded */
   1800  1.1  msaitoh 		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
   1801  1.1  msaitoh 			rxr->rx_discarded.ev_count++;
   1802  1.1  msaitoh 			ixgbe_rx_discard(rxr, i);
   1803  1.1  msaitoh 			goto next_desc;
   1804  1.1  msaitoh 		}
   1805  1.1  msaitoh 
   1806  1.1  msaitoh 		/*
   1807  1.1  msaitoh 		** On 82599 which supports a hardware
   1808  1.1  msaitoh 		** LRO (called HW RSC), packets need
   1809  1.1  msaitoh 		** not be fragmented across sequential
   1810  1.1  msaitoh 		** descriptors, rather the next descriptor
   1811  1.1  msaitoh 		** is indicated in bits of the descriptor.
   1812  1.1  msaitoh 		** This also means that we might proceses
   1813  1.1  msaitoh 		** more than one packet at a time, something
   1814  1.1  msaitoh 		** that has never been true before, it
   1815  1.1  msaitoh 		** required eliminating global chain pointers
   1816  1.1  msaitoh 		** in favor of what we are doing here.  -jfv
   1817  1.1  msaitoh 		*/
   1818  1.1  msaitoh 		if (!eop) {
   1819  1.1  msaitoh 			/*
   1820  1.1  msaitoh 			** Figure out the next descriptor
   1821  1.1  msaitoh 			** of this frame.
   1822  1.1  msaitoh 			*/
   1823  1.1  msaitoh 			if (rxr->hw_rsc == TRUE) {
   1824  1.1  msaitoh 				rsc = ixgbe_rsc_count(cur);
   1825  1.1  msaitoh 				rxr->rsc_num += (rsc - 1);
   1826  1.1  msaitoh 			}
   1827  1.1  msaitoh 			if (rsc) { /* Get hardware index */
   1828  1.1  msaitoh 				nextp = ((staterr &
   1829  1.1  msaitoh 				    IXGBE_RXDADV_NEXTP_MASK) >>
   1830  1.1  msaitoh 				    IXGBE_RXDADV_NEXTP_SHIFT);
   1831  1.1  msaitoh 			} else { /* Just sequential */
   1832  1.1  msaitoh 				nextp = i + 1;
   1833  1.1  msaitoh 				if (nextp == adapter->num_rx_desc)
   1834  1.1  msaitoh 					nextp = 0;
   1835  1.1  msaitoh 			}
   1836  1.1  msaitoh 			nbuf = &rxr->rx_buffers[nextp];
   1837  1.1  msaitoh 			prefetch(nbuf);
   1838  1.1  msaitoh 		}
   1839  1.1  msaitoh 		/*
   1840  1.1  msaitoh 		** Rather than using the fmp/lmp global pointers
   1841  1.1  msaitoh 		** we now keep the head of a packet chain in the
   1842  1.1  msaitoh 		** buffer struct and pass this along from one
   1843  1.1  msaitoh 		** descriptor to the next, until we get EOP.
   1844  1.1  msaitoh 		*/
   1845  1.1  msaitoh 		mp->m_len = len;
   1846  1.1  msaitoh 		/*
   1847  1.1  msaitoh 		** See if there is a stored head
   1848  1.1  msaitoh 		** that determines what we are
   1849  1.1  msaitoh 		*/
   1850  1.1  msaitoh 		sendmp = rbuf->fmp;
   1851  1.1  msaitoh 		if (sendmp != NULL) {  /* secondary frag */
   1852  1.1  msaitoh 			rbuf->buf = rbuf->fmp = NULL;
   1853  1.1  msaitoh 			mp->m_flags &= ~M_PKTHDR;
   1854  1.1  msaitoh 			sendmp->m_pkthdr.len += mp->m_len;
   1855  1.1  msaitoh 		} else {
   1856  1.1  msaitoh 			/*
   1857  1.1  msaitoh 			 * Optimize.  This might be a small packet,
   1858  1.1  msaitoh 			 * maybe just a TCP ACK.  Do a fast copy that
   1859  1.1  msaitoh 			 * is cache aligned into a new mbuf, and
   1860  1.1  msaitoh 			 * leave the old mbuf+cluster for re-use.
   1861  1.1  msaitoh 			 */
   1862  1.1  msaitoh 			if (eop && len <= IXGBE_RX_COPY_LEN) {
   1863  1.1  msaitoh 				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
   1864  1.1  msaitoh 				if (sendmp != NULL) {
   1865  1.1  msaitoh 					sendmp->m_data +=
   1866  1.1  msaitoh 					    IXGBE_RX_COPY_ALIGN;
   1867  1.1  msaitoh 					ixgbe_bcopy(mp->m_data,
   1868  1.1  msaitoh 					    sendmp->m_data, len);
   1869  1.1  msaitoh 					sendmp->m_len = len;
   1870  1.1  msaitoh 					rxr->rx_copies.ev_count++;
   1871  1.1  msaitoh 					rbuf->flags |= IXGBE_RX_COPY;
   1872  1.1  msaitoh 				}
   1873  1.1  msaitoh 			}
   1874  1.1  msaitoh 			if (sendmp == NULL) {
   1875  1.1  msaitoh 				rbuf->buf = rbuf->fmp = NULL;
   1876  1.1  msaitoh 				sendmp = mp;
   1877  1.1  msaitoh 			}
   1878  1.1  msaitoh 
   1879  1.1  msaitoh 			/* first desc of a non-ps chain */
   1880  1.1  msaitoh 			sendmp->m_flags |= M_PKTHDR;
   1881  1.1  msaitoh 			sendmp->m_pkthdr.len = mp->m_len;
   1882  1.1  msaitoh 		}
   1883  1.1  msaitoh 		++processed;
   1884  1.1  msaitoh 
   1885  1.1  msaitoh 		/* Pass the head pointer on */
   1886  1.1  msaitoh 		if (eop == 0) {
   1887  1.1  msaitoh 			nbuf->fmp = sendmp;
   1888  1.1  msaitoh 			sendmp = NULL;
   1889  1.1  msaitoh 			mp->m_next = nbuf->buf;
   1890  1.1  msaitoh 		} else { /* Sending this frame */
   1891  1.1  msaitoh 			m_set_rcvif(sendmp, ifp);
   1892  1.1  msaitoh 			ifp->if_ipackets++;
   1893  1.1  msaitoh 			rxr->rx_packets.ev_count++;
   1894  1.1  msaitoh 			/* capture data for AIM */
   1895  1.1  msaitoh 			rxr->bytes += sendmp->m_pkthdr.len;
   1896  1.1  msaitoh 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
   1897  1.1  msaitoh 			/* Process vlan info */
   1898  1.1  msaitoh 			if ((rxr->vtag_strip) &&
   1899  1.1  msaitoh 			    (staterr & IXGBE_RXD_STAT_VP))
   1900  1.1  msaitoh 				vtag = le16toh(cur->wb.upper.vlan);
   1901  1.1  msaitoh 			if (vtag) {
   1902  1.1  msaitoh 				VLAN_INPUT_TAG(ifp, sendmp, vtag,
   1903  1.1  msaitoh 				    printf("%s: could not apply VLAN "
   1904  1.1  msaitoh 					"tag", __func__));
   1905  1.1  msaitoh 			}
   1906  1.1  msaitoh 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
   1907  1.1  msaitoh 				ixgbe_rx_checksum(staterr, sendmp, ptype,
   1908  1.1  msaitoh 				   &adapter->stats);
   1909  1.1  msaitoh 			}
   1910  1.1  msaitoh #if __FreeBSD_version >= 800000
   1911  1.1  msaitoh #ifdef RSS
   1912  1.1  msaitoh 			sendmp->m_pkthdr.flowid =
   1913  1.1  msaitoh 			    le32toh(cur->wb.lower.hi_dword.rss);
   1914  1.1  msaitoh 			switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
   1915  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
   1916  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
   1917  1.1  msaitoh 				break;
   1918  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV4:
   1919  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV4);
   1920  1.1  msaitoh 				break;
   1921  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
   1922  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6);
   1923  1.1  msaitoh 				break;
   1924  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
   1925  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6_EX);
   1926  1.1  msaitoh 				break;
   1927  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6:
   1928  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6);
   1929  1.1  msaitoh 				break;
   1930  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
   1931  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
   1932  1.1  msaitoh 				break;
   1933  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
   1934  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV4);
   1935  1.1  msaitoh 				break;
   1936  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
   1937  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6);
   1938  1.1  msaitoh 				break;
   1939  1.1  msaitoh 			case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
   1940  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6_EX);
   1941  1.1  msaitoh 				break;
   1942  1.1  msaitoh 			default:
   1943  1.1  msaitoh 				/* XXX fallthrough */
   1944  1.1  msaitoh 				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   1945  1.1  msaitoh 				break;
   1946  1.1  msaitoh 			}
   1947  1.1  msaitoh #else /* RSS */
   1948  1.1  msaitoh 			sendmp->m_pkthdr.flowid = que->msix;
   1949  1.1  msaitoh 			M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
   1950  1.1  msaitoh #endif /* RSS */
   1951  1.1  msaitoh #endif /* FreeBSD_version */
   1952  1.1  msaitoh 		}
   1953  1.1  msaitoh next_desc:
   1954  1.1  msaitoh 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
   1955  1.1  msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1956  1.1  msaitoh 
   1957  1.1  msaitoh 		/* Advance our pointers to the next descriptor. */
   1958  1.1  msaitoh 		if (++i == rxr->num_desc)
   1959  1.1  msaitoh 			i = 0;
   1960  1.1  msaitoh 
   1961  1.1  msaitoh 		/* Now send to the stack or do LRO */
   1962  1.1  msaitoh 		if (sendmp != NULL) {
   1963  1.1  msaitoh 			rxr->next_to_check = i;
   1964  1.1  msaitoh 			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
   1965  1.1  msaitoh 			i = rxr->next_to_check;
   1966  1.1  msaitoh 		}
   1967  1.1  msaitoh 
   1968  1.1  msaitoh                /* Every 8 descriptors we go to refresh mbufs */
   1969  1.1  msaitoh 		if (processed == 8) {
   1970  1.1  msaitoh 			ixgbe_refresh_mbufs(rxr, i);
   1971  1.1  msaitoh 			processed = 0;
   1972  1.1  msaitoh 		}
   1973  1.1  msaitoh 	}
   1974  1.1  msaitoh 
   1975  1.1  msaitoh 	/* Refresh any remaining buf structs */
   1976  1.1  msaitoh 	if (ixgbe_rx_unrefreshed(rxr))
   1977  1.1  msaitoh 		ixgbe_refresh_mbufs(rxr, i);
   1978  1.1  msaitoh 
   1979  1.1  msaitoh 	rxr->next_to_check = i;
   1980  1.1  msaitoh 
   1981  1.1  msaitoh #ifdef LRO
   1982  1.1  msaitoh 	/*
   1983  1.1  msaitoh 	 * Flush any outstanding LRO work
   1984  1.1  msaitoh 	 */
   1985  1.1  msaitoh 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
   1986  1.1  msaitoh 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
   1987  1.1  msaitoh 		tcp_lro_flush(lro, queued);
   1988  1.1  msaitoh 	}
   1989  1.1  msaitoh #endif /* LRO */
   1990  1.1  msaitoh 
   1991  1.1  msaitoh 	IXGBE_RX_UNLOCK(rxr);
   1992  1.1  msaitoh 
   1993  1.1  msaitoh 	/*
   1994  1.1  msaitoh 	** Still have cleaning to do?
   1995  1.1  msaitoh 	*/
   1996  1.1  msaitoh 	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
   1997  1.1  msaitoh 		return true;
   1998  1.1  msaitoh 	else
   1999  1.1  msaitoh 		return false;
   2000  1.1  msaitoh }
   2001  1.1  msaitoh 
   2002  1.1  msaitoh 
   2003  1.1  msaitoh /*********************************************************************
   2004  1.1  msaitoh  *
   2005  1.1  msaitoh  *  Verify that the hardware indicated that the checksum is valid.
   2006  1.1  msaitoh  *  Inform the stack about the status of checksum so that stack
   2007  1.1  msaitoh  *  doesn't spend time verifying the checksum.
   2008  1.1  msaitoh  *
   2009  1.1  msaitoh  *********************************************************************/
   2010  1.1  msaitoh static void
   2011  1.1  msaitoh ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
   2012  1.1  msaitoh     struct ixgbe_hw_stats *stats)
   2013  1.1  msaitoh {
   2014  1.1  msaitoh 	u16	status = (u16) staterr;
   2015  1.1  msaitoh 	u8	errors = (u8) (staterr >> 24);
   2016  1.1  msaitoh #if 0
   2017  1.1  msaitoh 	bool	sctp = FALSE;
   2018  1.1  msaitoh 
   2019  1.1  msaitoh 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
   2020  1.1  msaitoh 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
   2021  1.1  msaitoh 		sctp = TRUE;
   2022  1.1  msaitoh #endif
   2023  1.1  msaitoh 
   2024  1.1  msaitoh 	if (status & IXGBE_RXD_STAT_IPCS) {
   2025  1.1  msaitoh 		stats->ipcs.ev_count++;
   2026  1.1  msaitoh 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
   2027  1.1  msaitoh 			/* IP Checksum Good */
   2028  1.1  msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
   2029  1.1  msaitoh 
   2030  1.1  msaitoh 		} else {
   2031  1.1  msaitoh 			stats->ipcs_bad.ev_count++;
   2032  1.1  msaitoh 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
   2033  1.1  msaitoh 		}
   2034  1.1  msaitoh 	}
   2035  1.1  msaitoh 	if (status & IXGBE_RXD_STAT_L4CS) {
   2036  1.1  msaitoh 		stats->l4cs.ev_count++;
   2037  1.1  msaitoh 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
   2038  1.1  msaitoh 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
   2039  1.1  msaitoh 			mp->m_pkthdr.csum_flags |= type;
   2040  1.1  msaitoh 		} else {
   2041  1.1  msaitoh 			stats->l4cs_bad.ev_count++;
   2042  1.1  msaitoh 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
   2043  1.1  msaitoh 		}
   2044  1.1  msaitoh 	}
   2045  1.1  msaitoh 	return;
   2046  1.1  msaitoh }
   2047  1.1  msaitoh 
   2048  1.1  msaitoh 
   2049  1.1  msaitoh /********************************************************************
   2050  1.1  msaitoh  * Manage DMA'able memory.
   2051  1.1  msaitoh  *******************************************************************/
   2052  1.1  msaitoh 
   2053  1.1  msaitoh int
   2054  1.1  msaitoh ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
   2055  1.1  msaitoh 		struct ixgbe_dma_alloc *dma, const int mapflags)
   2056  1.1  msaitoh {
   2057  1.1  msaitoh 	device_t dev = adapter->dev;
   2058  1.1  msaitoh 	int             r, rsegs;
   2059  1.1  msaitoh 
   2060  1.1  msaitoh 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
   2061  1.1  msaitoh 			       DBA_ALIGN, 0,	/* alignment, bounds */
   2062  1.1  msaitoh 			       size,	/* maxsize */
   2063  1.1  msaitoh 			       1,	/* nsegments */
   2064  1.1  msaitoh 			       size,	/* maxsegsize */
   2065  1.1  msaitoh 			       BUS_DMA_ALLOCNOW,	/* flags */
   2066  1.1  msaitoh 			       &dma->dma_tag);
   2067  1.1  msaitoh 	if (r != 0) {
   2068  1.1  msaitoh 		aprint_error_dev(dev,
   2069  1.1  msaitoh 		    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, r);
   2070  1.1  msaitoh 		goto fail_0;
   2071  1.1  msaitoh 	}
   2072  1.1  msaitoh 
   2073  1.1  msaitoh 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
   2074  1.1  msaitoh 		size,
   2075  1.1  msaitoh 		dma->dma_tag->dt_alignment,
   2076  1.1  msaitoh 		dma->dma_tag->dt_boundary,
   2077  1.1  msaitoh 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
   2078  1.1  msaitoh 	if (r != 0) {
   2079  1.1  msaitoh 		aprint_error_dev(dev,
   2080  1.1  msaitoh 		    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
   2081  1.1  msaitoh 		goto fail_1;
   2082  1.1  msaitoh 	}
   2083  1.1  msaitoh 
   2084  1.1  msaitoh 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
   2085  1.1  msaitoh 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
   2086  1.1  msaitoh 	if (r != 0) {
   2087  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2088  1.1  msaitoh 		    __func__, r);
   2089  1.1  msaitoh 		goto fail_2;
   2090  1.1  msaitoh 	}
   2091  1.1  msaitoh 
   2092  1.1  msaitoh 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
   2093  1.1  msaitoh 	if (r != 0) {
   2094  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
   2095  1.1  msaitoh 		    __func__, r);
   2096  1.1  msaitoh 		goto fail_3;
   2097  1.1  msaitoh 	}
   2098  1.1  msaitoh 
   2099  1.1  msaitoh 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
   2100  1.1  msaitoh 			    size,
   2101  1.1  msaitoh 			    NULL,
   2102  1.1  msaitoh 			    mapflags | BUS_DMA_NOWAIT);
   2103  1.1  msaitoh 	if (r != 0) {
   2104  1.1  msaitoh 		aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
   2105  1.1  msaitoh 		    __func__, r);
   2106  1.1  msaitoh 		goto fail_4;
   2107  1.1  msaitoh 	}
   2108  1.1  msaitoh 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
   2109  1.1  msaitoh 	dma->dma_size = size;
   2110  1.1  msaitoh 	return 0;
   2111  1.1  msaitoh fail_4:
   2112  1.1  msaitoh 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
   2113  1.1  msaitoh fail_3:
   2114  1.1  msaitoh 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
   2115  1.1  msaitoh fail_2:
   2116  1.1  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
   2117  1.1  msaitoh fail_1:
   2118  1.1  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2119  1.1  msaitoh fail_0:
   2120  1.1  msaitoh 	return r;
   2121  1.1  msaitoh }
   2122  1.1  msaitoh 
   2123  1.1  msaitoh static void
   2124  1.1  msaitoh ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
   2125  1.1  msaitoh {
   2126  1.1  msaitoh 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
   2127  1.1  msaitoh 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2128  1.1  msaitoh 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
   2129  1.1  msaitoh 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
   2130  1.1  msaitoh 	ixgbe_dma_tag_destroy(dma->dma_tag);
   2131  1.1  msaitoh }
   2132  1.1  msaitoh 
   2133  1.1  msaitoh 
   2134  1.1  msaitoh /*********************************************************************
   2135  1.1  msaitoh  *
   2136  1.1  msaitoh  *  Allocate memory for the transmit and receive rings, and then
   2137  1.1  msaitoh  *  the descriptors associated with each, called only once at attach.
   2138  1.1  msaitoh  *
   2139  1.1  msaitoh  **********************************************************************/
   2140  1.1  msaitoh int
   2141  1.1  msaitoh ixgbe_allocate_queues(struct adapter *adapter)
   2142  1.1  msaitoh {
   2143  1.1  msaitoh 	device_t	dev = adapter->dev;
   2144  1.1  msaitoh 	struct ix_queue	*que;
   2145  1.1  msaitoh 	struct tx_ring	*txr;
   2146  1.1  msaitoh 	struct rx_ring	*rxr;
   2147  1.1  msaitoh 	int rsize, tsize, error = IXGBE_SUCCESS;
   2148  1.1  msaitoh 	int txconf = 0, rxconf = 0;
   2149  1.1  msaitoh 
   2150  1.1  msaitoh         /* First allocate the top level queue structs */
   2151  1.1  msaitoh         if (!(adapter->queues =
   2152  1.1  msaitoh             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
   2153  1.1  msaitoh             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2154  1.1  msaitoh                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
   2155  1.1  msaitoh                 error = ENOMEM;
   2156  1.1  msaitoh                 goto fail;
   2157  1.1  msaitoh         }
   2158  1.1  msaitoh 
   2159  1.1  msaitoh 	/* First allocate the TX ring struct memory */
   2160  1.1  msaitoh 	if (!(adapter->tx_rings =
   2161  1.1  msaitoh 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
   2162  1.1  msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2163  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
   2164  1.1  msaitoh 		error = ENOMEM;
   2165  1.1  msaitoh 		goto tx_fail;
   2166  1.1  msaitoh 	}
   2167  1.1  msaitoh 
   2168  1.1  msaitoh 	/* Next allocate the RX */
   2169  1.1  msaitoh 	if (!(adapter->rx_rings =
   2170  1.1  msaitoh 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
   2171  1.1  msaitoh 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
   2172  1.1  msaitoh 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
   2173  1.1  msaitoh 		error = ENOMEM;
   2174  1.1  msaitoh 		goto rx_fail;
   2175  1.1  msaitoh 	}
   2176  1.1  msaitoh 
   2177  1.1  msaitoh 	/* For the ring itself */
   2178  1.1  msaitoh 	tsize = roundup2(adapter->num_tx_desc *
   2179  1.1  msaitoh 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
   2180  1.1  msaitoh 
   2181  1.1  msaitoh 	/*
   2182  1.1  msaitoh 	 * Now set up the TX queues, txconf is needed to handle the
   2183  1.1  msaitoh 	 * possibility that things fail midcourse and we need to
   2184  1.1  msaitoh 	 * undo memory gracefully
   2185  1.1  msaitoh 	 */
   2186  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
   2187  1.1  msaitoh 		/* Set up some basics */
   2188  1.1  msaitoh 		txr = &adapter->tx_rings[i];
   2189  1.1  msaitoh 		txr->adapter = adapter;
   2190  1.1  msaitoh 		txr->me = i;
   2191  1.1  msaitoh 		txr->num_desc = adapter->num_tx_desc;
   2192  1.1  msaitoh 
   2193  1.1  msaitoh 		/* Initialize the TX side lock */
   2194  1.1  msaitoh 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
   2195  1.1  msaitoh 		    device_xname(dev), txr->me);
   2196  1.1  msaitoh 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
   2197  1.1  msaitoh 
   2198  1.1  msaitoh 		if (ixgbe_dma_malloc(adapter, tsize,
   2199  1.1  msaitoh 			&txr->txdma, BUS_DMA_NOWAIT)) {
   2200  1.1  msaitoh 			aprint_error_dev(dev,
   2201  1.1  msaitoh 			    "Unable to allocate TX Descriptor memory\n");
   2202  1.1  msaitoh 			error = ENOMEM;
   2203  1.1  msaitoh 			goto err_tx_desc;
   2204  1.1  msaitoh 		}
   2205  1.1  msaitoh 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
   2206  1.1  msaitoh 		bzero((void *)txr->tx_base, tsize);
   2207  1.1  msaitoh 
   2208  1.1  msaitoh         	/* Now allocate transmit buffers for the ring */
   2209  1.1  msaitoh         	if (ixgbe_allocate_transmit_buffers(txr)) {
   2210  1.1  msaitoh 			aprint_error_dev(dev,
   2211  1.1  msaitoh 			    "Critical Failure setting up transmit buffers\n");
   2212  1.1  msaitoh 			error = ENOMEM;
   2213  1.1  msaitoh 			goto err_tx_desc;
   2214  1.1  msaitoh         	}
   2215  1.1  msaitoh #ifndef IXGBE_LEGACY_TX
   2216  1.1  msaitoh 		/* Allocate a buf ring */
   2217  1.1  msaitoh 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
   2218  1.1  msaitoh 		    M_WAITOK, &txr->tx_mtx);
   2219  1.1  msaitoh 		if (txr->br == NULL) {
   2220  1.1  msaitoh 			aprint_error_dev(dev,
   2221  1.1  msaitoh 			    "Critical Failure setting up buf ring\n");
   2222  1.1  msaitoh 			error = ENOMEM;
   2223  1.1  msaitoh 			goto err_tx_desc;
   2224  1.1  msaitoh         	}
   2225  1.1  msaitoh #endif
   2226  1.1  msaitoh 	}
   2227  1.1  msaitoh 
   2228  1.1  msaitoh 	/*
   2229  1.1  msaitoh 	 * Next the RX queues...
   2230  1.1  msaitoh 	 */
   2231  1.1  msaitoh 	rsize = roundup2(adapter->num_rx_desc *
   2232  1.1  msaitoh 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
   2233  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
   2234  1.1  msaitoh 		rxr = &adapter->rx_rings[i];
   2235  1.1  msaitoh 		/* Set up some basics */
   2236  1.1  msaitoh 		rxr->adapter = adapter;
   2237  1.1  msaitoh 		rxr->me = i;
   2238  1.1  msaitoh 		rxr->num_desc = adapter->num_rx_desc;
   2239  1.1  msaitoh 
   2240  1.1  msaitoh 		/* Initialize the RX side lock */
   2241  1.1  msaitoh 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
   2242  1.1  msaitoh 		    device_xname(dev), rxr->me);
   2243  1.1  msaitoh 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
   2244  1.1  msaitoh 
   2245  1.1  msaitoh 		if (ixgbe_dma_malloc(adapter, rsize,
   2246  1.1  msaitoh 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
   2247  1.1  msaitoh 			aprint_error_dev(dev,
   2248  1.1  msaitoh 			    "Unable to allocate RxDescriptor memory\n");
   2249  1.1  msaitoh 			error = ENOMEM;
   2250  1.1  msaitoh 			goto err_rx_desc;
   2251  1.1  msaitoh 		}
   2252  1.1  msaitoh 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
   2253  1.1  msaitoh 		bzero((void *)rxr->rx_base, rsize);
   2254  1.1  msaitoh 
   2255  1.1  msaitoh         	/* Allocate receive buffers for the ring*/
   2256  1.1  msaitoh 		if (ixgbe_allocate_receive_buffers(rxr)) {
   2257  1.1  msaitoh 			aprint_error_dev(dev,
   2258  1.1  msaitoh 			    "Critical Failure setting up receive buffers\n");
   2259  1.1  msaitoh 			error = ENOMEM;
   2260  1.1  msaitoh 			goto err_rx_desc;
   2261  1.1  msaitoh 		}
   2262  1.1  msaitoh 	}
   2263  1.1  msaitoh 
   2264  1.1  msaitoh 	/*
   2265  1.1  msaitoh 	** Finally set up the queue holding structs
   2266  1.1  msaitoh 	*/
   2267  1.1  msaitoh 	for (int i = 0; i < adapter->num_queues; i++) {
   2268  1.1  msaitoh 		que = &adapter->queues[i];
   2269  1.1  msaitoh 		que->adapter = adapter;
   2270  1.1  msaitoh 		que->txr = &adapter->tx_rings[i];
   2271  1.1  msaitoh 		que->rxr = &adapter->rx_rings[i];
   2272  1.1  msaitoh 	}
   2273  1.1  msaitoh 
   2274  1.1  msaitoh 	return (0);
   2275  1.1  msaitoh 
   2276  1.1  msaitoh err_rx_desc:
   2277  1.1  msaitoh 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
   2278  1.1  msaitoh 		ixgbe_dma_free(adapter, &rxr->rxdma);
   2279  1.1  msaitoh err_tx_desc:
   2280  1.1  msaitoh 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
   2281  1.1  msaitoh 		ixgbe_dma_free(adapter, &txr->txdma);
   2282  1.1  msaitoh 	free(adapter->rx_rings, M_DEVBUF);
   2283  1.1  msaitoh rx_fail:
   2284  1.1  msaitoh 	free(adapter->tx_rings, M_DEVBUF);
   2285  1.1  msaitoh tx_fail:
   2286  1.1  msaitoh 	free(adapter->queues, M_DEVBUF);
   2287  1.1  msaitoh fail:
   2288  1.1  msaitoh 	return (error);
   2289  1.1  msaitoh }
   2290  1.1  msaitoh 
   2291