| /src/sys/dev/pci/ixgbe/ |
| ix_txrx.c | 143 ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) 147 struct ixgbe_softc *sc = txr->sc; 149 IXGBE_TX_LOCK_ASSERT(txr); 156 ixgbe_drain(ifp, txr); 161 if (txr->txr_no_space) 165 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) 172 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { 198 struct tx_ring *txr = sc->tx_rings; local 201 IXGBE_TX_LOCK(txr); 202 ixgbe_legacy_start_locked(ifp, txr); 216 struct tx_ring *txr; local 358 struct tx_ring *txr = arg; local 376 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); local 395 struct tx_ring *txr = que->txr; local 743 struct tx_ring *txr = sc->tx_rings; local 757 struct tx_ring *txr = sc->tx_rings; local 2360 struct tx_ring *txr; local [all...] |
| ixgbe_netmap.c | 211 struct tx_ring *txr = &sc->tx_rings[kring->ring_id]; local 214 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 249 * to prefetch the next slot and txr entry. 257 __builtin_prefetch(&txr->tx_buffers[nic_i]); 266 union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i]; 267 struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i]; 274 __builtin_prefetch(&txr->tx_buffers[nic_i + 1]); 280 netmap_reload_map(na, txr->txtag, txbuf->map, addr); 292 bus_dmamap_sync(txr->txtag, txbuf->map [all...] |
| if_fdir.c | 86 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) 88 struct ixgbe_softc *sc = txr->sc; 142 que = &sc->queues[txr->me]; 161 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) 163 UNREFERENCED_2PARAMETER(txr, mp);
|
| ixv.c | 607 struct tx_ring *txr = sc->tx_rings; local 652 txr = sc->tx_rings; 653 for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) { 657 evcnt_detach(&txr->total_packets); 659 evcnt_detach(&txr->pcq_drops); 661 evcnt_detach(&txr->no_desc_avail); 662 evcnt_detach(&txr->tso_tx); 886 struct tx_ring *txr = que->txr; local 901 IXGBE_TX_LOCK(txr); 1306 struct tx_ring *txr = que->txr; local 1557 struct tx_ring *txr = sc->tx_rings; local 1724 struct tx_ring *txr = sc->tx_rings; local 1994 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local 2014 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local 2625 struct tx_ring *txr = sc->tx_rings; local 2814 struct tx_ring *txr = sc->tx_rings; local 3303 struct tx_ring *txr = que->txr; local 3366 struct tx_ring *txr = sc->tx_rings; local [all...] |
| ixgbe.c | 670 struct tx_ring *txr = sc->tx_rings; local 677 for (i = 0; i < sc->num_queues; i++, txr++) { 678 u64 tdba = txr->txdma.dma_paddr; 683 int j = txr->me; 709 txr->tail = IXGBE_TDT(j); 711 txr->txr_no_space = false; 1812 struct tx_ring *txr = sc->tx_rings; local 1885 for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 1917 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1924 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 2141 struct tx_ring *txr = sc->tx_rings; local 2281 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local 2306 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local 2820 struct tx_ring *txr = que->txr; local 3649 struct tx_ring *txr = sc->tx_rings; local 3702 struct tx_ring *txr = sc->tx_rings; local 4054 struct tx_ring *txr; local 4400 struct tx_ring *txr = &sc->tx_rings[i]; local 4660 struct tx_ring *txr = que->txr; local 5300 struct tx_ring *txr = sc->tx_rings; local 6693 struct tx_ring *txr = que->txr; local 6752 struct tx_ring *txr = sc->tx_rings; local 6861 struct tx_ring *txr = sc->tx_rings; local [all...] |
| ixgbe.h | 331 struct tx_ring *txr; member in struct:ix_queue
|
| /src/sys/dev/hyperv/ |
| if_hvn.c | 1104 hvn_transmit_common(struct ifnet *ifp, struct hvn_tx_ring *txr, 1111 KASSERT(mutex_owned(&txr->txr_lock)); 1115 if (txr->txr_oactive) 1117 if (txr->txr_suspended) 1121 if (!hvn_txd_peek(txr)) { 1123 txr->txr_oactive = 1; 1124 txr->txr_evnodesc.ev_count++; 1129 m = pcq_get(txr->txr_interq); 1146 txd = hvn_txd_get(txr); 1147 if (hvn_encap(txr, txd, m, l2hlen)) 1177 struct hvn_tx_ring *txr = &sc->sc_txr[0]; local 1199 struct hvn_tx_ring *txr; local 1222 struct hvn_tx_ring *txr = arg; local 1927 struct hvn_tx_ring *txr; local 2041 struct hvn_tx_ring *txr; local 2107 struct hvn_tx_ring *txr; local 2164 struct hvn_tx_ring *txr; local 2402 struct hvn_tx_ring *txr; local 2592 struct hvn_tx_ring *txr; local 2946 struct hvn_tx_ring *txr; local 3020 struct hvn_tx_ring *txr; local 3035 struct hvn_tx_ring *txr; local 3626 struct hvn_tx_ring *txr = rxr->rxr_txr; local [all...] |
| /src/sys/dev/pci/igc/ |
| if_igc.c | 270 igc_txdesc_sync(struct tx_ring *txr, int id, int ops) 273 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 659 struct tx_ring *txr = &sc->tx_rings[iq]; local 664 txr->sc = sc; 665 txr->txr_igcq = &sc->queues[iq]; 666 txr->me = iq; 667 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 672 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr 777 struct tx_ring *txr = &sc->tx_rings[iq]; local 1637 struct tx_ring *txr = &sc->tx_rings[0]; \/* queue 0 *\/ local 1656 struct tx_ring *txr = &sc->tx_rings[qid]; local 1914 struct tx_ring *txr = &sc->tx_rings[iq]; local 2760 struct tx_ring *txr = iq->txr; local 2794 struct tx_ring *txr = iq->txr; local 2864 struct tx_ring *txr = iq->txr; local 2981 struct tx_ring *txr = &sc->tx_rings[iq]; local 3039 struct tx_ring *txr = &sc->tx_rings[iq]; local 3087 struct tx_ring *txr = &sc->tx_rings[iq]; local [all...] |
| if_igc.h | 266 struct tx_ring *txr; member in struct:igc_queue
|
| /src/sys/dev/pci/ |
| if_iavf.c | 1099 struct iavf_tx_ring *txr; local 1110 txr = sc->sc_qps[i].qp_txr; 1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP; 1155 struct iavf_tx_ring *txr; local 1184 txr = sc->sc_qps[i].qp_txr; 1190 mutex_enter(&txr->txr_lock); 1191 iavf_txr_clean(sc, txr); 1192 mutex_exit(&txr->txr_lock); 1211 iavf_watchdog(struct iavf_tx_ring *txr) 1215 sc = txr->txr_sc 2217 struct iavf_tx_ring *txr; local 3090 struct iavf_tx_ring *txr; local 3125 struct iavf_tx_ring *txr; local 3150 struct iavf_tx_ring *txr; local 3181 struct iavf_tx_ring *txr; local 3231 struct iavf_tx_ring *txr; local 3284 struct iavf_tx_ring *txr; local 3330 struct iavf_tx_ring *txr; local 4638 struct iavf_tx_ring *txr; local 4705 struct iavf_tx_ring *txr; local [all...] |
| if_ixl.c | 1967 struct ixl_tx_ring *txr; local 1980 txr = sc->sc_qps[i].qp_txr; 1983 ixl_txr_config(sc, txr); 1991 txr = sc->sc_qps[i].qp_txr; 1998 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2012 ixl_txr_qdis(sc, txr, 1); 2018 if (ixl_txr_enabled(sc, txr) != 0) 2180 struct ixl_tx_ring *txr; local 2190 txr = sc->sc_qps[i].qp_txr 2359 struct ixl_tx_ring *txr = NULL; local 2863 struct ixl_tx_ring *txr; local 2887 struct ixl_tx_ring *txr; local 2925 struct ixl_tx_ring *txr = xtxr; local 3377 struct ixl_tx_ring *txr = qp->qp_txr; local 3411 struct ixl_tx_ring *txr; local 3457 struct ixl_tx_ring *txr = qp->qp_txr; local 3498 struct ixl_tx_ring *txr = qp->qp_txr; local 6043 struct ixl_tx_ring *txr; local 6202 struct ixl_tx_ring *txr; local [all...] |
| if_vmx.c | 537 vmxnet3_txring_avail(struct vmxnet3_txring *txr) 539 int avail = txr->vxtxr_next - txr->vxtxr_head - 1; 540 return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail); 1160 struct vmxnet3_txring *txr; local 1163 txr = &txq->vxtxq_cmd_ring; 1180 txr->vxtxr_ndesc = sc->vmx_ntxdescs; 1181 txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc * 1265 struct vmxnet3_txring *txr; local 1386 struct vmxnet3_txring *txr; local 1439 struct vmxnet3_txring *txr; local 2121 struct vmxnet3_txring *txr; local 2678 struct vmxnet3_txring *txr; local 2801 struct vmxnet3_txring *txr; local 3105 struct vmxnet3_txring *txr; local 3211 struct vmxnet3_txring *txr; local [all...] |
| if_ena.c | 539 struct ena_ring *txr, *rxr; local 546 txr = &adapter->tx_ring[i]; 550 ena_init_io_rings_common(adapter, txr, i); 554 txr->ring_size = adapter->tx_ring_size; 555 txr->tx_max_header_size = ena_dev->tx_max_header_size; 556 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 557 txr->smoothed_interval = 561 txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF, 562 M_WAITOK, &txr->ring_mtx); 565 ena_alloc_counters_tx(adapter, &txr->tx_stats, i) 600 struct ena_ring *txr = &adapter->tx_ring[qid]; local [all...] |
| if_aq.c | 5917 struct aq_txring *txr = &sc->sc_queue[i].txring; local 5920 mutex_enter(&txr->txr_mutex); 5921 txr->txr_stopping = false; 5922 mutex_exit(&txr->txr_mutex); 5940 struct aq_txring *txr = &sc->sc_queue[i].txring; local 5943 mutex_enter(&txr->txr_mutex); 5944 txr->txr_stopping = true; 5945 mutex_exit(&txr->txr_mutex);
|
| /src/sys/dev/ic/ |
| dwc_eqos.c | 1683 struct eqos_ring *txr = &sc->sc_tx; local 1696 NULL, 0, &txr->cur, 1702 NULL, 0, &txr->next, 1708 NULL, 0, &txr->queued, 1714 eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1720 eqos_sysctl_tx_end_handler, 0, (void *)txr,
|