Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe_netmap.c revision 1.3
      1  1.3  msaitoh /* $NetBSD: ixgbe_netmap.c,v 1.3 2021/04/30 06:41:36 msaitoh Exp $ */
      2  1.1  msaitoh /******************************************************************************
      3  1.1  msaitoh 
      4  1.1  msaitoh   Copyright (c) 2001-2017, Intel Corporation
      5  1.1  msaitoh   All rights reserved.
      6  1.1  msaitoh 
      7  1.1  msaitoh   Redistribution and use in source and binary forms, with or without
      8  1.1  msaitoh   modification, are permitted provided that the following conditions are met:
      9  1.1  msaitoh 
     10  1.1  msaitoh    1. Redistributions of source code must retain the above copyright notice,
     11  1.1  msaitoh       this list of conditions and the following disclaimer.
     12  1.1  msaitoh 
     13  1.1  msaitoh    2. Redistributions in binary form must reproduce the above copyright
     14  1.1  msaitoh       notice, this list of conditions and the following disclaimer in the
     15  1.1  msaitoh       documentation and/or other materials provided with the distribution.
     16  1.1  msaitoh 
     17  1.1  msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     18  1.1  msaitoh       contributors may be used to endorse or promote products derived from
     19  1.1  msaitoh       this software without specific prior written permission.
     20  1.1  msaitoh 
     21  1.1  msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     22  1.1  msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  1.1  msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  1.1  msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     25  1.1  msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  1.1  msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  1.1  msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  1.1  msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  1.1  msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  1.1  msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  1.1  msaitoh   POSSIBILITY OF SUCH DAMAGE.
     32  1.1  msaitoh 
     33  1.1  msaitoh ******************************************************************************/
     34  1.1  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_netmap.c 320688 2017-07-05 17:27:03Z erj $*/
     35  1.1  msaitoh 
     36  1.1  msaitoh /*
     37  1.1  msaitoh  * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
     38  1.1  msaitoh  *
     39  1.1  msaitoh  * Redistribution and use in source and binary forms, with or without
     40  1.1  msaitoh  * modification, are permitted provided that the following conditions
     41  1.1  msaitoh  * are met:
     42  1.1  msaitoh  * 1. Redistributions of source code must retain the above copyright
     43  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer.
     44  1.1  msaitoh  * 2. Redistributions in binary form must reproduce the above copyright
     45  1.1  msaitoh  *    notice, this list of conditions and the following disclaimer in the
     46  1.1  msaitoh  *    documentation and/or other materials provided with the distribution.
     47  1.1  msaitoh  *
     48  1.1  msaitoh  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     49  1.1  msaitoh  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  1.1  msaitoh  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  1.1  msaitoh  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     52  1.1  msaitoh  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  1.1  msaitoh  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  1.1  msaitoh  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  1.1  msaitoh  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  1.1  msaitoh  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  1.1  msaitoh  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  1.1  msaitoh  * SUCH DAMAGE.
     59  1.1  msaitoh  */
     60  1.1  msaitoh 
     61  1.1  msaitoh /*
     62  1.1  msaitoh  * $FreeBSD: head/sys/dev/ixgbe/ixgbe_netmap.c 320688 2017-07-05 17:27:03Z erj $
     63  1.1  msaitoh  *
     64  1.1  msaitoh  * netmap support for: ixgbe
     65  1.1  msaitoh  *
     66  1.1  msaitoh  * This file is meant to be a reference on how to implement
     67  1.1  msaitoh  * netmap support for a network driver.
     68  1.1  msaitoh  * This file contains code but only static or inline functions used
     69  1.1  msaitoh  * by a single driver. To avoid replication of code we just #include
     70  1.1  msaitoh  * it near the beginning of the standard driver.
     71  1.1  msaitoh  */
     72  1.1  msaitoh 
     73  1.1  msaitoh #ifdef DEV_NETMAP
     74  1.1  msaitoh /*
     75  1.1  msaitoh  * Some drivers may need the following headers. Others
     76  1.1  msaitoh  * already include them by default
     77  1.1  msaitoh 
     78  1.1  msaitoh #include <vm/vm.h>
     79  1.1  msaitoh #include <vm/pmap.h>
     80  1.1  msaitoh 
     81  1.1  msaitoh  */
     82  1.1  msaitoh #include "ixgbe.h"
     83  1.1  msaitoh 
     84  1.1  msaitoh /*
     85  1.1  msaitoh  * device-specific sysctl variables:
     86  1.1  msaitoh  *
     87  1.1  msaitoh  * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
     88  1.1  msaitoh  *	During regular operations the CRC is stripped, but on some
     89  1.1  msaitoh  *	hardware reception of frames not multiple of 64 is slower,
     90  1.1  msaitoh  *	so using crcstrip=0 helps in benchmarks.
     91  1.1  msaitoh  *
     92  1.1  msaitoh  * ix_rx_miss, ix_rx_miss_bufs:
     93  1.1  msaitoh  *	count packets that might be missed due to lost interrupts.
     94  1.1  msaitoh  */
     95  1.1  msaitoh SYSCTL_DECL(_dev_netmap);
     96  1.1  msaitoh static int ix_rx_miss, ix_rx_miss_bufs;
     97  1.1  msaitoh int ix_crcstrip;
     98  1.1  msaitoh SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
     99  1.1  msaitoh     CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
    100  1.1  msaitoh SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
    101  1.1  msaitoh     CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
    102  1.1  msaitoh SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
    103  1.1  msaitoh     CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
    104  1.1  msaitoh 
    105  1.1  msaitoh 
    106  1.1  msaitoh static void
    107  1.1  msaitoh set_crcstrip(struct ixgbe_hw *hw, int onoff)
    108  1.1  msaitoh {
    109  1.1  msaitoh 	/* crc stripping is set in two places:
    110  1.1  msaitoh 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
    111  1.1  msaitoh 	 * IXGBE_RDRXCTL (set by the original driver in
    112  1.1  msaitoh 	 *	ixgbe_setup_hw_rsc() called in init_locked.
    113  1.1  msaitoh 	 *	We disable the setting when netmap is compiled in).
    114  1.1  msaitoh 	 * We update the values here, but also in ixgbe.c because
    115  1.1  msaitoh 	 * init_locked sometimes is called outside our control.
    116  1.1  msaitoh 	 */
    117  1.1  msaitoh 	uint32_t hl, rxc;
    118  1.1  msaitoh 
    119  1.1  msaitoh 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    120  1.1  msaitoh 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
    121  1.1  msaitoh 	if (netmap_verbose)
    122  1.1  msaitoh 		D("%s read  HLREG 0x%x rxc 0x%x",
    123  1.1  msaitoh 			onoff ? "enter" : "exit", hl, rxc);
    124  1.1  msaitoh 	/* hw requirements ... */
    125  1.1  msaitoh 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
    126  1.1  msaitoh 	rxc |= IXGBE_RDRXCTL_RSCACKC;
    127  1.1  msaitoh 	if (onoff && !ix_crcstrip) {
    128  1.1  msaitoh 		/* keep the crc. Fast rx */
    129  1.1  msaitoh 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
    130  1.1  msaitoh 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
    131  1.1  msaitoh 	} else {
    132  1.1  msaitoh 		/* reset default mode */
    133  1.1  msaitoh 		hl |= IXGBE_HLREG0_RXCRCSTRP;
    134  1.1  msaitoh 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
    135  1.1  msaitoh 	}
    136  1.1  msaitoh 	if (netmap_verbose)
    137  1.1  msaitoh 		D("%s write HLREG 0x%x rxc 0x%x",
    138  1.1  msaitoh 			onoff ? "enter" : "exit", hl, rxc);
    139  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
    140  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
    141  1.1  msaitoh }
    142  1.1  msaitoh 
    143  1.1  msaitoh 
    144  1.1  msaitoh /*
    145  1.1  msaitoh  * Register/unregister. We are already under netmap lock.
    146  1.1  msaitoh  * Only called on the first register or the last unregister.
    147  1.1  msaitoh  */
    148  1.1  msaitoh static int
    149  1.1  msaitoh ixgbe_netmap_reg(struct netmap_adapter *na, int onoff)
    150  1.1  msaitoh {
    151  1.1  msaitoh 	struct ifnet *ifp = na->ifp;
    152  1.1  msaitoh 	struct adapter *adapter = ifp->if_softc;
    153  1.1  msaitoh 
    154  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    155  1.1  msaitoh 	adapter->stop_locked(adapter);
    156  1.1  msaitoh 
    157  1.1  msaitoh 	set_crcstrip(&adapter->hw, onoff);
    158  1.1  msaitoh 	/* enable or disable flags and callbacks in na and ifp */
    159  1.1  msaitoh 	if (onoff) {
    160  1.1  msaitoh 		nm_set_native_flags(na);
    161  1.1  msaitoh 	} else {
    162  1.1  msaitoh 		nm_clear_native_flags(na);
    163  1.1  msaitoh 	}
    164  1.1  msaitoh 	adapter->init_locked(adapter);	/* also enables intr */
    165  1.1  msaitoh 	set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
    166  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    167  1.1  msaitoh 	return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
    168  1.1  msaitoh }
    169  1.1  msaitoh 
    170  1.1  msaitoh 
    171  1.1  msaitoh /*
    172  1.1  msaitoh  * Reconcile kernel and user view of the transmit ring.
    173  1.1  msaitoh  *
    174  1.1  msaitoh  * All information is in the kring.
    175  1.1  msaitoh  * Userspace wants to send packets up to the one before kring->rhead,
    176  1.1  msaitoh  * kernel knows kring->nr_hwcur is the first unsent packet.
    177  1.1  msaitoh  *
    178  1.1  msaitoh  * Here we push packets out (as many as possible), and possibly
    179  1.1  msaitoh  * reclaim buffers from previously completed transmission.
    180  1.1  msaitoh  *
    181  1.1  msaitoh  * The caller (netmap) guarantees that there is only one instance
    182  1.1  msaitoh  * running at any time. Any interference with other driver
    183  1.1  msaitoh  * methods should be handled by the individual drivers.
    184  1.1  msaitoh  */
    185  1.1  msaitoh static int
    186  1.1  msaitoh ixgbe_netmap_txsync(struct netmap_kring *kring, int flags)
    187  1.1  msaitoh {
    188  1.1  msaitoh 	struct netmap_adapter *na = kring->na;
    189  1.1  msaitoh 	struct ifnet *ifp = na->ifp;
    190  1.1  msaitoh 	struct netmap_ring *ring = kring->ring;
    191  1.1  msaitoh 	u_int nm_i;	/* index into the netmap ring */
    192  1.1  msaitoh 	u_int nic_i;	/* index into the NIC ring */
    193  1.1  msaitoh 	u_int n;
    194  1.1  msaitoh 	u_int const lim = kring->nkr_num_slots - 1;
    195  1.1  msaitoh 	u_int const head = kring->rhead;
    196  1.1  msaitoh 	/*
    197  1.1  msaitoh 	 * interrupts on every tx packet are expensive so request
    198  1.1  msaitoh 	 * them every half ring, or where NS_REPORT is set
    199  1.1  msaitoh 	 */
    200  1.1  msaitoh 	u_int report_frequency = kring->nkr_num_slots >> 1;
    201  1.1  msaitoh 
    202  1.1  msaitoh 	/* device-specific */
    203  1.1  msaitoh 	struct adapter *adapter = ifp->if_softc;
    204  1.1  msaitoh 	struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
    205  1.1  msaitoh 	int reclaim_tx;
    206  1.1  msaitoh 
    207  1.1  msaitoh 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    208  1.1  msaitoh 			BUS_DMASYNC_POSTREAD);
    209  1.1  msaitoh 
    210  1.1  msaitoh 	/*
    211  1.1  msaitoh 	 * First part: process new packets to send.
    212  1.1  msaitoh 	 * nm_i is the current index in the netmap ring,
    213  1.1  msaitoh 	 * nic_i is the corresponding index in the NIC ring.
    214  1.1  msaitoh 	 * The two numbers differ because upon a *_init() we reset
    215  1.1  msaitoh 	 * the NIC ring but leave the netmap ring unchanged.
    216  1.1  msaitoh 	 * For the transmit ring, we have
    217  1.1  msaitoh 	 *
    218  1.1  msaitoh 	 *		nm_i = kring->nr_hwcur
    219  1.1  msaitoh 	 *		nic_i = IXGBE_TDT (not tracked in the driver)
    220  1.1  msaitoh 	 * and
    221  1.1  msaitoh 	 * 		nm_i == (nic_i + kring->nkr_hwofs) % ring_size
    222  1.1  msaitoh 	 *
    223  1.1  msaitoh 	 * In this driver kring->nkr_hwofs >= 0, but for other
    224  1.1  msaitoh 	 * drivers it might be negative as well.
    225  1.1  msaitoh 	 */
    226  1.1  msaitoh 
    227  1.1  msaitoh 	/*
    228  1.1  msaitoh 	 * If we have packets to send (kring->nr_hwcur != kring->rhead)
    229  1.1  msaitoh 	 * iterate over the netmap ring, fetch length and update
    230  1.1  msaitoh 	 * the corresponding slot in the NIC ring. Some drivers also
    231  1.1  msaitoh 	 * need to update the buffer's physical address in the NIC slot
    232  1.1  msaitoh 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
    233  1.1  msaitoh 	 *
    234  1.1  msaitoh 	 * The netmap_reload_map() calls is especially expensive,
    235  1.1  msaitoh 	 * even when (as in this case) the tag is 0, so do only
    236  1.1  msaitoh 	 * when the buffer has actually changed.
    237  1.1  msaitoh 	 *
    238  1.1  msaitoh 	 * If possible do not set the report/intr bit on all slots,
    239  1.1  msaitoh 	 * but only a few times per ring or when NS_REPORT is set.
    240  1.1  msaitoh 	 *
    241  1.1  msaitoh 	 * Finally, on 10G and faster drivers, it might be useful
    242  1.1  msaitoh 	 * to prefetch the next slot and txr entry.
    243  1.1  msaitoh 	 */
    244  1.1  msaitoh 
    245  1.1  msaitoh 	nm_i = kring->nr_hwcur;
    246  1.1  msaitoh 	if (nm_i != head) {	/* we have new packets to send */
    247  1.1  msaitoh 		nic_i = netmap_idx_k2n(kring, nm_i);
    248  1.1  msaitoh 
    249  1.1  msaitoh 		__builtin_prefetch(&ring->slot[nm_i]);
    250  1.1  msaitoh 		__builtin_prefetch(&txr->tx_buffers[nic_i]);
    251  1.1  msaitoh 
    252  1.1  msaitoh 		for (n = 0; nm_i != head; n++) {
    253  1.1  msaitoh 			struct netmap_slot *slot = &ring->slot[nm_i];
    254  1.1  msaitoh 			u_int len = slot->len;
    255  1.1  msaitoh 			uint64_t paddr;
    256  1.1  msaitoh 			void *addr = PNMB(na, slot, &paddr);
    257  1.1  msaitoh 
    258  1.1  msaitoh 			/* device-specific */
    259  1.1  msaitoh 			union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i];
    260  1.1  msaitoh 			struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i];
    261  1.1  msaitoh 			int flags = (slot->flags & NS_REPORT ||
    262  1.1  msaitoh 				nic_i == 0 || nic_i == report_frequency) ?
    263  1.1  msaitoh 				IXGBE_TXD_CMD_RS : 0;
    264  1.1  msaitoh 
    265  1.1  msaitoh 			/* prefetch for next round */
    266  1.1  msaitoh 			__builtin_prefetch(&ring->slot[nm_i + 1]);
    267  1.1  msaitoh 			__builtin_prefetch(&txr->tx_buffers[nic_i + 1]);
    268  1.1  msaitoh 
    269  1.1  msaitoh 			NM_CHECK_ADDR_LEN(na, addr, len);
    270  1.1  msaitoh 
    271  1.1  msaitoh 			if (slot->flags & NS_BUF_CHANGED) {
    272  1.1  msaitoh 				/* buffer has changed, reload map */
    273  1.1  msaitoh 				netmap_reload_map(na, txr->txtag, txbuf->map, addr);
    274  1.1  msaitoh 			}
    275  1.1  msaitoh 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
    276  1.1  msaitoh 
    277  1.1  msaitoh 			/* Fill the slot in the NIC ring. */
    278  1.1  msaitoh 			/* Use legacy descriptor, they are faster? */
    279  1.1  msaitoh 			curr->read.buffer_addr = htole64(paddr);
    280  1.1  msaitoh 			curr->read.olinfo_status = 0;
    281  1.1  msaitoh 			curr->read.cmd_type_len = htole32(len | flags |
    282  1.1  msaitoh 				IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
    283  1.1  msaitoh 
    284  1.1  msaitoh 			/* make sure changes to the buffer are synced */
    285  1.1  msaitoh 			bus_dmamap_sync(txr->txtag, txbuf->map,
    286  1.1  msaitoh 				BUS_DMASYNC_PREWRITE);
    287  1.1  msaitoh 
    288  1.1  msaitoh 			nm_i = nm_next(nm_i, lim);
    289  1.1  msaitoh 			nic_i = nm_next(nic_i, lim);
    290  1.1  msaitoh 		}
    291  1.1  msaitoh 		kring->nr_hwcur = head;
    292  1.1  msaitoh 
    293  1.1  msaitoh 		/* synchronize the NIC ring */
    294  1.1  msaitoh 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    295  1.1  msaitoh 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    296  1.1  msaitoh 
    297  1.1  msaitoh 		/* (re)start the tx unit up to slot nic_i (excluded) */
    298  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i);
    299  1.1  msaitoh 	}
    300  1.1  msaitoh 
    301  1.1  msaitoh 	/*
    302  1.1  msaitoh 	 * Second part: reclaim buffers for completed transmissions.
    303  1.1  msaitoh 	 * Because this is expensive (we read a NIC register etc.)
    304  1.1  msaitoh 	 * we only do it in specific cases (see below).
    305  1.1  msaitoh 	 */
    306  1.1  msaitoh 	if (flags & NAF_FORCE_RECLAIM) {
    307  1.1  msaitoh 		reclaim_tx = 1; /* forced reclaim */
    308  1.1  msaitoh 	} else if (!nm_kr_txempty(kring)) {
    309  1.1  msaitoh 		reclaim_tx = 0; /* have buffers, no reclaim */
    310  1.1  msaitoh 	} else {
    311  1.1  msaitoh 		/*
    312  1.1  msaitoh 		 * No buffers available. Locate previous slot with
    313  1.1  msaitoh 		 * REPORT_STATUS set.
    314  1.1  msaitoh 		 * If the slot has DD set, we can reclaim space,
    315  1.1  msaitoh 		 * otherwise wait for the next interrupt.
    316  1.1  msaitoh 		 * This enables interrupt moderation on the tx
    317  1.1  msaitoh 		 * side though it might reduce throughput.
    318  1.1  msaitoh 		 */
    319  1.1  msaitoh 		struct ixgbe_legacy_tx_desc *txd =
    320  1.1  msaitoh 		    (struct ixgbe_legacy_tx_desc *)txr->tx_base;
    321  1.1  msaitoh 
    322  1.1  msaitoh 		nic_i = txr->next_to_clean + report_frequency;
    323  1.1  msaitoh 		if (nic_i > lim)
    324  1.1  msaitoh 			nic_i -= lim + 1;
    325  1.1  msaitoh 		// round to the closest with dd set
    326  1.1  msaitoh 		nic_i = (nic_i < kring->nkr_num_slots / 4 ||
    327  1.1  msaitoh 			 nic_i >= kring->nkr_num_slots*3/4) ?
    328  1.1  msaitoh 			0 : report_frequency;
    329  1.1  msaitoh 		reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD;	// XXX cpu_to_le32 ?
    330  1.1  msaitoh 	}
    331  1.1  msaitoh 	if (reclaim_tx) {
    332  1.1  msaitoh 		/*
    333  1.1  msaitoh 		 * Record completed transmissions.
    334  1.1  msaitoh 		 * We (re)use the driver's txr->next_to_clean to keep
    335  1.1  msaitoh 		 * track of the most recently completed transmission.
    336  1.1  msaitoh 		 *
    337  1.1  msaitoh 		 * The datasheet discourages the use of TDH to find
    338  1.1  msaitoh 		 * out the number of sent packets, but we only set
    339  1.1  msaitoh 		 * REPORT_STATUS in a few slots so TDH is the only
    340  1.1  msaitoh 		 * good way.
    341  1.1  msaitoh 		 */
    342  1.1  msaitoh 		nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(kring->ring_id));
    343  1.1  msaitoh 		if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
    344  1.1  msaitoh 			D("TDH wrap %d", nic_i);
    345  1.1  msaitoh 			nic_i -= kring->nkr_num_slots;
    346  1.1  msaitoh 		}
    347  1.1  msaitoh 		if (nic_i != txr->next_to_clean) {
    348  1.1  msaitoh 			/* some tx completed, increment avail */
    349  1.1  msaitoh 			txr->next_to_clean = nic_i;
    350  1.1  msaitoh 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
    351  1.1  msaitoh 		}
    352  1.1  msaitoh 	}
    353  1.1  msaitoh 
    354  1.1  msaitoh 	return 0;
    355  1.1  msaitoh }
    356  1.1  msaitoh 
    357  1.1  msaitoh 
    358  1.1  msaitoh /*
    359  1.1  msaitoh  * Reconcile kernel and user view of the receive ring.
    360  1.1  msaitoh  * Same as for the txsync, this routine must be efficient.
    361  1.1  msaitoh  * The caller guarantees a single invocations, but races against
    362  1.1  msaitoh  * the rest of the driver should be handled here.
    363  1.1  msaitoh  *
    364  1.1  msaitoh  * On call, kring->rhead is the first packet that userspace wants
    365  1.1  msaitoh  * to keep, and kring->rcur is the wakeup point.
    366  1.1  msaitoh  * The kernel has previously reported packets up to kring->rtail.
    367  1.1  msaitoh  *
    368  1.1  msaitoh  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
    369  1.1  msaitoh  * of whether or not we received an interrupt.
    370  1.1  msaitoh  */
    371  1.1  msaitoh static int
    372  1.1  msaitoh ixgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
    373  1.1  msaitoh {
    374  1.1  msaitoh 	struct netmap_adapter *na = kring->na;
    375  1.1  msaitoh 	struct ifnet *ifp = na->ifp;
    376  1.1  msaitoh 	struct netmap_ring *ring = kring->ring;
    377  1.1  msaitoh 	u_int nm_i;	/* index into the netmap ring */
    378  1.1  msaitoh 	u_int nic_i;	/* index into the NIC ring */
    379  1.1  msaitoh 	u_int n;
    380  1.1  msaitoh 	u_int const lim = kring->nkr_num_slots - 1;
    381  1.1  msaitoh 	u_int const head = kring->rhead;
    382  1.1  msaitoh 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
    383  1.1  msaitoh 
    384  1.1  msaitoh 	/* device-specific */
    385  1.1  msaitoh 	struct adapter *adapter = ifp->if_softc;
    386  1.1  msaitoh 	struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
    387  1.1  msaitoh 
    388  1.1  msaitoh 	if (head > lim)
    389  1.1  msaitoh 		return netmap_ring_reinit(kring);
    390  1.1  msaitoh 
    391  1.1  msaitoh 	/* XXX check sync modes */
    392  1.1  msaitoh 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
    393  1.1  msaitoh 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    394  1.1  msaitoh 
    395  1.1  msaitoh 	/*
    396  1.1  msaitoh 	 * First part: import newly received packets.
    397  1.1  msaitoh 	 *
    398  1.1  msaitoh 	 * nm_i is the index of the next free slot in the netmap ring,
    399  1.1  msaitoh 	 * nic_i is the index of the next received packet in the NIC ring,
    400  1.1  msaitoh 	 * and they may differ in case if_init() has been called while
    401  1.1  msaitoh 	 * in netmap mode. For the receive ring we have
    402  1.1  msaitoh 	 *
    403  1.1  msaitoh 	 *	nic_i = rxr->next_to_check;
    404  1.1  msaitoh 	 *	nm_i = kring->nr_hwtail (previous)
    405  1.1  msaitoh 	 * and
    406  1.1  msaitoh 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
    407  1.1  msaitoh 	 *
    408  1.1  msaitoh 	 * rxr->next_to_check is set to 0 on a ring reinit
    409  1.1  msaitoh 	 */
    410  1.1  msaitoh 	if (netmap_no_pendintr || force_update) {
    411  1.1  msaitoh 		int crclen = (ix_crcstrip) ? 0 : 4;
    412  1.1  msaitoh 
    413  1.1  msaitoh 		nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
    414  1.1  msaitoh 		nm_i = netmap_idx_n2k(kring, nic_i);
    415  1.1  msaitoh 
    416  1.1  msaitoh 		for (n = 0; ; n++) {
    417  1.1  msaitoh 			union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
    418  1.1  msaitoh 			uint32_t staterr = le32toh(curr->wb.upper.status_error);
    419  1.1  msaitoh 
    420  1.1  msaitoh 			if ((staterr & IXGBE_RXD_STAT_DD) == 0)
    421  1.1  msaitoh 				break;
    422  1.1  msaitoh 			ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
    423  1.2  msaitoh 			ring->slot[nm_i].flags = 0;
    424  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag,
    425  1.1  msaitoh 			    rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
    426  1.1  msaitoh 			nm_i = nm_next(nm_i, lim);
    427  1.1  msaitoh 			nic_i = nm_next(nic_i, lim);
    428  1.1  msaitoh 		}
    429  1.1  msaitoh 		if (n) { /* update the state variables */
    430  1.1  msaitoh 			if (netmap_no_pendintr && !force_update) {
    431  1.1  msaitoh 				/* diagnostics */
    432  1.1  msaitoh 				ix_rx_miss ++;
    433  1.1  msaitoh 				ix_rx_miss_bufs += n;
    434  1.1  msaitoh 			}
    435  1.1  msaitoh 			rxr->next_to_check = nic_i;
    436  1.1  msaitoh 			kring->nr_hwtail = nm_i;
    437  1.1  msaitoh 		}
    438  1.1  msaitoh 		kring->nr_kflags &= ~NKR_PENDINTR;
    439  1.1  msaitoh 	}
    440  1.1  msaitoh 
    441  1.1  msaitoh 	/*
    442  1.1  msaitoh 	 * Second part: skip past packets that userspace has released.
    443  1.1  msaitoh 	 * (kring->nr_hwcur to kring->rhead excluded),
    444  1.1  msaitoh 	 * and make the buffers available for reception.
    445  1.1  msaitoh 	 * As usual nm_i is the index in the netmap ring,
    446  1.1  msaitoh 	 * nic_i is the index in the NIC ring, and
    447  1.1  msaitoh 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
    448  1.1  msaitoh 	 */
    449  1.1  msaitoh 	nm_i = kring->nr_hwcur;
    450  1.1  msaitoh 	if (nm_i != head) {
    451  1.1  msaitoh 		nic_i = netmap_idx_k2n(kring, nm_i);
    452  1.1  msaitoh 		for (n = 0; nm_i != head; n++) {
    453  1.1  msaitoh 			struct netmap_slot *slot = &ring->slot[nm_i];
    454  1.1  msaitoh 			uint64_t paddr;
    455  1.1  msaitoh 			void *addr = PNMB(na, slot, &paddr);
    456  1.1  msaitoh 
    457  1.1  msaitoh 			union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
    458  1.1  msaitoh 			struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i];
    459  1.1  msaitoh 
    460  1.1  msaitoh 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
    461  1.1  msaitoh 				goto ring_reset;
    462  1.1  msaitoh 
    463  1.1  msaitoh 			if (slot->flags & NS_BUF_CHANGED) {
    464  1.1  msaitoh 				/* buffer has changed, reload map */
    465  1.1  msaitoh 				netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
    466  1.1  msaitoh 				slot->flags &= ~NS_BUF_CHANGED;
    467  1.1  msaitoh 			}
    468  1.1  msaitoh 			curr->wb.upper.status_error = 0;
    469  1.1  msaitoh 			curr->read.pkt_addr = htole64(paddr);
    470  1.1  msaitoh 			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
    471  1.1  msaitoh 			    BUS_DMASYNC_PREREAD);
    472  1.1  msaitoh 			nm_i = nm_next(nm_i, lim);
    473  1.1  msaitoh 			nic_i = nm_next(nic_i, lim);
    474  1.1  msaitoh 		}
    475  1.1  msaitoh 		kring->nr_hwcur = head;
    476  1.1  msaitoh 
    477  1.1  msaitoh 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
    478  1.1  msaitoh 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    479  1.1  msaitoh 		/*
    480  1.1  msaitoh 		 * IMPORTANT: we must leave one free slot in the ring,
    481  1.1  msaitoh 		 * so move nic_i back by one unit
    482  1.1  msaitoh 		 */
    483  1.1  msaitoh 		nic_i = nm_prev(nic_i, lim);
    484  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i);
    485  1.1  msaitoh 	}
    486  1.1  msaitoh 
    487  1.1  msaitoh 	return 0;
    488  1.1  msaitoh 
    489  1.1  msaitoh ring_reset:
    490  1.1  msaitoh 	return netmap_ring_reinit(kring);
    491  1.1  msaitoh }
    492  1.1  msaitoh 
    493  1.1  msaitoh 
    494  1.1  msaitoh /*
    495  1.1  msaitoh  * The attach routine, called near the end of ixgbe_attach(),
    496  1.1  msaitoh  * fills the parameters for netmap_attach() and calls it.
    497  1.1  msaitoh  * It cannot fail, in the worst case (such as no memory)
    498  1.1  msaitoh  * netmap mode will be disabled and the driver will only
    499  1.1  msaitoh  * operate in standard mode.
    500  1.1  msaitoh  */
    501  1.1  msaitoh void
    502  1.1  msaitoh ixgbe_netmap_attach(struct adapter *adapter)
    503  1.1  msaitoh {
    504  1.1  msaitoh 	struct netmap_adapter na;
    505  1.1  msaitoh 
    506  1.1  msaitoh 	bzero(&na, sizeof(na));
    507  1.1  msaitoh 
    508  1.1  msaitoh 	na.ifp = adapter->ifp;
    509  1.1  msaitoh 	na.na_flags = NAF_BDG_MAYSLEEP;
    510  1.1  msaitoh 	na.num_tx_desc = adapter->num_tx_desc;
    511  1.1  msaitoh 	na.num_rx_desc = adapter->num_rx_desc;
    512  1.1  msaitoh 	na.nm_txsync = ixgbe_netmap_txsync;
    513  1.1  msaitoh 	na.nm_rxsync = ixgbe_netmap_rxsync;
    514  1.1  msaitoh 	na.nm_register = ixgbe_netmap_reg;
    515  1.1  msaitoh 	na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
    516  1.1  msaitoh 	netmap_attach(&na);
    517  1.1  msaitoh }
    518  1.1  msaitoh 
    519  1.1  msaitoh #endif /* DEV_NETMAP */
    520  1.1  msaitoh 
    521  1.1  msaitoh /* end of file */
    522