/src/sys/dev/pci/ixgbe/ |
ixgbe_netmap.c | 211 struct tx_ring *txr = &sc->tx_rings[kring->ring_id]; local in function:ixgbe_netmap_txsync 214 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 249 * to prefetch the next slot and txr entry. 257 __builtin_prefetch(&txr->tx_buffers[nic_i]); 266 union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i]; 267 struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i]; 274 __builtin_prefetch(&txr->tx_buffers[nic_i + 1]); 280 netmap_reload_map(na, txr->txtag, txbuf->map, addr); 292 bus_dmamap_sync(txr->txtag, txbuf->map [all...] |
ixgbe_netmap.c | 211 struct tx_ring *txr = &sc->tx_rings[kring->ring_id]; local in function:ixgbe_netmap_txsync 214 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 249 * to prefetch the next slot and txr entry. 257 __builtin_prefetch(&txr->tx_buffers[nic_i]); 266 union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i]; 267 struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i]; 274 __builtin_prefetch(&txr->tx_buffers[nic_i + 1]); 280 netmap_reload_map(na, txr->txtag, txbuf->map, addr); 292 bus_dmamap_sync(txr->txtag, txbuf->map [all...] |
ix_txrx.c | 143 ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) 147 struct ixgbe_softc *sc = txr->sc; 149 IXGBE_TX_LOCK_ASSERT(txr); 156 ixgbe_drain(ifp, txr); 161 if (txr->txr_no_space) 165 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) 172 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { 198 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_legacy_start 201 IXGBE_TX_LOCK(txr); 202 ixgbe_legacy_start_locked(ifp, txr); 216 struct tx_ring *txr; local in function:ixgbe_mq_start 358 struct tx_ring *txr = arg; local in function:ixgbe_deferred_mq_start 376 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); local in function:ixgbe_deferred_mq_start_work 395 struct tx_ring *txr = que->txr; local in function:ixgbe_drain_all 743 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_setup_transmit_structures 757 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_free_transmit_structures 2360 struct tx_ring *txr; local in function:ixgbe_allocate_queues [all...] |
ix_txrx.c | 143 ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) 147 struct ixgbe_softc *sc = txr->sc; 149 IXGBE_TX_LOCK_ASSERT(txr); 156 ixgbe_drain(ifp, txr); 161 if (txr->txr_no_space) 165 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) 172 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { 198 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_legacy_start 201 IXGBE_TX_LOCK(txr); 202 ixgbe_legacy_start_locked(ifp, txr); 216 struct tx_ring *txr; local in function:ixgbe_mq_start 358 struct tx_ring *txr = arg; local in function:ixgbe_deferred_mq_start 376 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); local in function:ixgbe_deferred_mq_start_work 395 struct tx_ring *txr = que->txr; local in function:ixgbe_drain_all 743 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_setup_transmit_structures 757 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_free_transmit_structures 2360 struct tx_ring *txr; local in function:ixgbe_allocate_queues [all...] |
ixv.c | 607 struct tx_ring *txr = sc->tx_rings; local in function:ixv_detach 652 txr = sc->tx_rings; 653 for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) { 657 evcnt_detach(&txr->total_packets); 659 evcnt_detach(&txr->pcq_drops); 661 evcnt_detach(&txr->no_desc_avail); 662 evcnt_detach(&txr->tso_tx); 886 struct tx_ring *txr = que->txr; local in function:ixv_msix_que 901 IXGBE_TX_LOCK(txr); 1306 struct tx_ring *txr = que->txr; local in function:ixv_handle_timer 1557 struct tx_ring *txr = sc->tx_rings; local in function:ixv_free_deferred_handlers 1724 struct tx_ring *txr = sc->tx_rings; local in function:ixv_initialize_transmit_units 1994 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixv_sysctl_tdh_handler 2014 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixv_sysctl_tdt_handler 2625 struct tx_ring *txr = sc->tx_rings; local in function:ixv_add_stats_sysctls 2814 struct tx_ring *txr = sc->tx_rings; local in function:ixv_clear_evcnt 3303 struct tx_ring *txr = que->txr; local in function:ixv_handle_que 3366 struct tx_ring *txr = sc->tx_rings; local in function:ixv_allocate_msix [all...] |
ixv.c | 607 struct tx_ring *txr = sc->tx_rings; local in function:ixv_detach 652 txr = sc->tx_rings; 653 for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) { 657 evcnt_detach(&txr->total_packets); 659 evcnt_detach(&txr->pcq_drops); 661 evcnt_detach(&txr->no_desc_avail); 662 evcnt_detach(&txr->tso_tx); 886 struct tx_ring *txr = que->txr; local in function:ixv_msix_que 901 IXGBE_TX_LOCK(txr); 1306 struct tx_ring *txr = que->txr; local in function:ixv_handle_timer 1557 struct tx_ring *txr = sc->tx_rings; local in function:ixv_free_deferred_handlers 1724 struct tx_ring *txr = sc->tx_rings; local in function:ixv_initialize_transmit_units 1994 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixv_sysctl_tdh_handler 2014 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixv_sysctl_tdt_handler 2625 struct tx_ring *txr = sc->tx_rings; local in function:ixv_add_stats_sysctls 2814 struct tx_ring *txr = sc->tx_rings; local in function:ixv_clear_evcnt 3303 struct tx_ring *txr = que->txr; local in function:ixv_handle_que 3366 struct tx_ring *txr = sc->tx_rings; local in function:ixv_allocate_msix [all...] |
ixgbe.c | 670 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_initialize_transmit_units 677 for (i = 0; i < sc->num_queues; i++, txr++) { 678 u64 tdba = txr->txdma.dma_paddr; 683 int j = txr->me; 709 txr->tail = IXGBE_TDT(j); 711 txr->txr_no_space = false; 1812 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_add_hw_stats 1885 for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 1917 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1924 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 2141 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_clear_evcnt 2281 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixgbe_sysctl_tdh_handler 2306 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixgbe_sysctl_tdt_handler 2820 struct tx_ring *txr = que->txr; local in function:ixgbe_msix_que 3649 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_free_deferred_handlers 3702 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_detach 4054 struct tx_ring *txr; local in function:ixgbe_init_locked 4400 struct tx_ring *txr = &sc->tx_rings[i]; local in function:ixgbe_configure_ivars 4660 struct tx_ring *txr = que->txr; local in function:ixgbe_handle_timer 5300 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_legacy_irq 6694 struct tx_ring *txr = que->txr; local in function:ixgbe_handle_que 6753 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_allocate_legacy 6862 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_allocate_msix [all...] |
ixgbe.h | 331 struct tx_ring *txr; member in struct:ix_queue
|
ixgbe.c | 670 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_initialize_transmit_units 677 for (i = 0; i < sc->num_queues; i++, txr++) { 678 u64 tdba = txr->txdma.dma_paddr; 683 int j = txr->me; 709 txr->tail = IXGBE_TDT(j); 711 txr->txr_no_space = false; 1812 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_add_hw_stats 1885 for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 1917 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1924 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 2141 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_clear_evcnt 2281 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixgbe_sysctl_tdh_handler 2306 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; local in function:ixgbe_sysctl_tdt_handler 2820 struct tx_ring *txr = que->txr; local in function:ixgbe_msix_que 3649 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_free_deferred_handlers 3702 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_detach 4054 struct tx_ring *txr; local in function:ixgbe_init_locked 4400 struct tx_ring *txr = &sc->tx_rings[i]; local in function:ixgbe_configure_ivars 4660 struct tx_ring *txr = que->txr; local in function:ixgbe_handle_timer 5300 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_legacy_irq 6694 struct tx_ring *txr = que->txr; local in function:ixgbe_handle_que 6753 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_allocate_legacy 6862 struct tx_ring *txr = sc->tx_rings; local in function:ixgbe_allocate_msix [all...] |
ixgbe.h | 331 struct tx_ring *txr; member in struct:ix_queue
|
/src/sys/dev/pci/igc/ |
if_igc.h | 266 struct tx_ring *txr; member in struct:igc_queue
|
if_igc.h | 266 struct tx_ring *txr; member in struct:igc_queue
|
if_igc.c | 269 igc_txdesc_sync(struct tx_ring *txr, int id, int ops) 272 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 657 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_allocate_queues 662 txr->sc = sc; 663 txr->txr_igcq = &sc->queues[iq]; 664 txr->me = iq; 665 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 670 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr 724 for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--) local in function:igc_allocate_queues 775 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_free_queues 1635 struct tx_ring *txr = &sc->tx_rings[0]; \/* queue 0 *\/ local in function:igc_start 1654 struct tx_ring *txr = &sc->tx_rings[qid]; local in function:igc_transmit 1912 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_stop_locked 2758 struct tx_ring *txr = iq->txr; local in function:igc_intr_queue 2792 struct tx_ring *txr = iq->txr; local in function:igc_intr 2862 struct tx_ring *txr = iq->txr; local in function:igc_handle_queue 2979 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_setup_transmit_structures 3037 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_initialize_transmit_unit 3085 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_free_transmit_structures [all...] |
if_igc.c | 269 igc_txdesc_sync(struct tx_ring *txr, int id, int ops) 272 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 657 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_allocate_queues 662 txr->sc = sc; 663 txr->txr_igcq = &sc->queues[iq]; 664 txr->me = iq; 665 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 670 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr 724 for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--) local in function:igc_allocate_queues 775 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_free_queues 1635 struct tx_ring *txr = &sc->tx_rings[0]; \/* queue 0 *\/ local in function:igc_start 1654 struct tx_ring *txr = &sc->tx_rings[qid]; local in function:igc_transmit 1912 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_stop_locked 2758 struct tx_ring *txr = iq->txr; local in function:igc_intr_queue 2792 struct tx_ring *txr = iq->txr; local in function:igc_intr 2862 struct tx_ring *txr = iq->txr; local in function:igc_handle_queue 2979 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_setup_transmit_structures 3037 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_initialize_transmit_unit 3085 struct tx_ring *txr = &sc->tx_rings[iq]; local in function:igc_free_transmit_structures [all...] |
/src/sys/dev/ic/ |
dwc_eqos.c | 1685 struct eqos_ring *txr = &sc->sc_tx; local in function:eqos_init_sysctls 1698 NULL, 0, &txr->cur, 1704 NULL, 0, &txr->next, 1710 NULL, 0, &txr->queued, 1716 eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1722 eqos_sysctl_tx_end_handler, 0, (void *)txr,
|
dwc_eqos.c | 1685 struct eqos_ring *txr = &sc->sc_tx; local in function:eqos_init_sysctls 1698 NULL, 0, &txr->cur, 1704 NULL, 0, &txr->next, 1710 NULL, 0, &txr->queued, 1716 eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1722 eqos_sysctl_tx_end_handler, 0, (void *)txr,
|
/src/sys/dev/pci/ |
if_ena.c | 539 struct ena_ring *txr, *rxr; local in function:ena_init_io_rings 546 txr = &adapter->tx_ring[i]; 550 ena_init_io_rings_common(adapter, txr, i); 554 txr->ring_size = adapter->tx_ring_size; 555 txr->tx_max_header_size = ena_dev->tx_max_header_size; 556 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 557 txr->smoothed_interval = 561 txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF, 562 M_WAITOK, &txr->ring_mtx); 565 ena_alloc_counters_tx(adapter, &txr->tx_stats, i) 600 struct ena_ring *txr = &adapter->tx_ring[qid]; local in function:ena_free_io_ring_resources [all...] |
if_ena.c | 539 struct ena_ring *txr, *rxr; local in function:ena_init_io_rings 546 txr = &adapter->tx_ring[i]; 550 ena_init_io_rings_common(adapter, txr, i); 554 txr->ring_size = adapter->tx_ring_size; 555 txr->tx_max_header_size = ena_dev->tx_max_header_size; 556 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 557 txr->smoothed_interval = 561 txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF, 562 M_WAITOK, &txr->ring_mtx); 565 ena_alloc_counters_tx(adapter, &txr->tx_stats, i) 600 struct ena_ring *txr = &adapter->tx_ring[qid]; local in function:ena_free_io_ring_resources [all...] |
if_vmx.c | 537 vmxnet3_txring_avail(struct vmxnet3_txring *txr) 539 int avail = txr->vxtxr_next - txr->vxtxr_head - 1; 540 return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail); 1160 struct vmxnet3_txring *txr; local in function:vmxnet3_init_txq 1163 txr = &txq->vxtxq_cmd_ring; 1180 txr->vxtxr_ndesc = sc->vmx_ntxdescs; 1181 txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc * 1265 struct vmxnet3_txring *txr; local in function:vmxnet3_destroy_txq 1386 struct vmxnet3_txring *txr; local in function:vmxnet3_alloc_txq_data 1439 struct vmxnet3_txring *txr; local in function:vmxnet3_free_txq_data 2121 struct vmxnet3_txring *txr; local in function:vmxnet3_txq_eof 2678 struct vmxnet3_txring *txr; local in function:vmxnet3_txstop 2801 struct vmxnet3_txring *txr; local in function:vmxnet3_txinit 3105 struct vmxnet3_txring *txr; local in function:vmxnet3_txq_encap 3211 struct vmxnet3_txring *txr; local in function:vmxnet3_tx_common_locked [all...] |
if_vmx.c | 537 vmxnet3_txring_avail(struct vmxnet3_txring *txr) 539 int avail = txr->vxtxr_next - txr->vxtxr_head - 1; 540 return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail); 1160 struct vmxnet3_txring *txr; local in function:vmxnet3_init_txq 1163 txr = &txq->vxtxq_cmd_ring; 1180 txr->vxtxr_ndesc = sc->vmx_ntxdescs; 1181 txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc * 1265 struct vmxnet3_txring *txr; local in function:vmxnet3_destroy_txq 1386 struct vmxnet3_txring *txr; local in function:vmxnet3_alloc_txq_data 1439 struct vmxnet3_txring *txr; local in function:vmxnet3_free_txq_data 2121 struct vmxnet3_txring *txr; local in function:vmxnet3_txq_eof 2678 struct vmxnet3_txring *txr; local in function:vmxnet3_txstop 2801 struct vmxnet3_txring *txr; local in function:vmxnet3_txinit 3105 struct vmxnet3_txring *txr; local in function:vmxnet3_txq_encap 3211 struct vmxnet3_txring *txr; local in function:vmxnet3_tx_common_locked [all...] |
if_iavf.c | 1099 struct iavf_tx_ring *txr; local in function:iavf_reinit 1110 txr = sc->sc_qps[i].qp_txr; 1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP; 1155 struct iavf_tx_ring *txr; local in function:iavf_stop_locked 1184 txr = sc->sc_qps[i].qp_txr; 1190 mutex_enter(&txr->txr_lock); 1191 iavf_txr_clean(sc, txr); 1192 mutex_exit(&txr->txr_lock); 1211 iavf_watchdog(struct iavf_tx_ring *txr) 1215 sc = txr->txr_sc 2217 struct iavf_tx_ring *txr; local in function:iavf_txr_alloc 3090 struct iavf_tx_ring *txr; local in function:iavf_handle_queue_common 3125 struct iavf_tx_ring *txr; local in function:iavf_start 3150 struct iavf_tx_ring *txr; local in function:iavf_transmit 3181 struct iavf_tx_ring *txr; local in function:iavf_deferred_transmit 3231 struct iavf_tx_ring *txr; local in function:iavf_intr 3284 struct iavf_tx_ring *txr; local in function:iavf_queue_intr 3330 struct iavf_tx_ring *txr; local in function:iavf_handle_queue 4638 struct iavf_tx_ring *txr; local in function:iavf_config_irq_map 4705 struct iavf_tx_ring *txr; local in function:iavf_config_vsi_queues [all...] |
if_iavf.c | 1099 struct iavf_tx_ring *txr; local in function:iavf_reinit 1110 txr = sc->sc_qps[i].qp_txr; 1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP; 1155 struct iavf_tx_ring *txr; local in function:iavf_stop_locked 1184 txr = sc->sc_qps[i].qp_txr; 1190 mutex_enter(&txr->txr_lock); 1191 iavf_txr_clean(sc, txr); 1192 mutex_exit(&txr->txr_lock); 1211 iavf_watchdog(struct iavf_tx_ring *txr) 1215 sc = txr->txr_sc 2217 struct iavf_tx_ring *txr; local in function:iavf_txr_alloc 3090 struct iavf_tx_ring *txr; local in function:iavf_handle_queue_common 3125 struct iavf_tx_ring *txr; local in function:iavf_start 3150 struct iavf_tx_ring *txr; local in function:iavf_transmit 3181 struct iavf_tx_ring *txr; local in function:iavf_deferred_transmit 3231 struct iavf_tx_ring *txr; local in function:iavf_intr 3284 struct iavf_tx_ring *txr; local in function:iavf_queue_intr 3330 struct iavf_tx_ring *txr; local in function:iavf_handle_queue 4638 struct iavf_tx_ring *txr; local in function:iavf_config_irq_map 4705 struct iavf_tx_ring *txr; local in function:iavf_config_vsi_queues [all...] |
if_ixl.c | 1967 struct ixl_tx_ring *txr; local in function:ixl_reinit 1980 txr = sc->sc_qps[i].qp_txr; 1983 ixl_txr_config(sc, txr); 1991 txr = sc->sc_qps[i].qp_txr; 1998 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2012 ixl_txr_qdis(sc, txr, 1); 2018 if (ixl_txr_enabled(sc, txr) != 0) 2180 struct ixl_tx_ring *txr; local in function:ixl_stop_locked 2190 txr = sc->sc_qps[i].qp_txr 2359 struct ixl_tx_ring *txr = NULL; local in function:ixl_txr_alloc 2863 struct ixl_tx_ring *txr; local in function:ixl_start 2887 struct ixl_tx_ring *txr; local in function:ixl_transmit 2925 struct ixl_tx_ring *txr = xtxr; local in function:ixl_deferred_transmit 3377 struct ixl_tx_ring *txr = qp->qp_txr; local in function:ixl_handle_queue_common 3411 struct ixl_tx_ring *txr; local in function:ixl_intr 3457 struct ixl_tx_ring *txr = qp->qp_txr; local in function:ixl_queue_intr 3498 struct ixl_tx_ring *txr = qp->qp_txr; local in function:ixl_handle_queue 6043 struct ixl_tx_ring *txr; local in function:ixl_setup_stats 6202 struct ixl_tx_ring *txr; local in function:ixl_teardown_stats [all...] |
/src/sys/dev/hyperv/ |
if_hvn.c | 1104 hvn_transmit_common(struct ifnet *ifp, struct hvn_tx_ring *txr, 1111 KASSERT(mutex_owned(&txr->txr_lock)); 1115 if (txr->txr_oactive) 1117 if (txr->txr_suspended) 1121 if (!hvn_txd_peek(txr)) { 1123 txr->txr_oactive = 1; 1124 txr->txr_evnodesc.ev_count++; 1129 m = pcq_get(txr->txr_interq); 1146 txd = hvn_txd_get(txr); 1147 if (hvn_encap(txr, txd, m, l2hlen)) 1177 struct hvn_tx_ring *txr = &sc->sc_txr[0]; local in function:hvn_start 1199 struct hvn_tx_ring *txr; local in function:hvn_transmit 1222 struct hvn_tx_ring *txr = arg; local in function:hvn_deferred_transmit 1927 struct hvn_tx_ring *txr; local in function:hvn_tx_ring_create 2041 struct hvn_tx_ring *txr; local in function:hvn_tx_ring_destroy 2107 struct hvn_tx_ring *txr; local in function:hvn_set_chim_size 2164 struct hvn_tx_ring *txr; local in function:hvn_fixup_tx_data 2402 struct hvn_tx_ring *txr; local in function:hvn_channel_attach 2592 struct hvn_tx_ring *txr; local in function:hvn_set_txagg 2946 struct hvn_tx_ring *txr; local in function:hvn_suspend_data 3020 struct hvn_tx_ring *txr; local in function:hvn_resume_tx 3035 struct hvn_tx_ring *txr; local in function:hvn_resume_data 3626 struct hvn_tx_ring *txr = rxr->rxr_txr; local in function:hvn_nvs_intr1 [all...] |
if_hvn.c | 1104 hvn_transmit_common(struct ifnet *ifp, struct hvn_tx_ring *txr, 1111 KASSERT(mutex_owned(&txr->txr_lock)); 1115 if (txr->txr_oactive) 1117 if (txr->txr_suspended) 1121 if (!hvn_txd_peek(txr)) { 1123 txr->txr_oactive = 1; 1124 txr->txr_evnodesc.ev_count++; 1129 m = pcq_get(txr->txr_interq); 1146 txd = hvn_txd_get(txr); 1147 if (hvn_encap(txr, txd, m, l2hlen)) 1177 struct hvn_tx_ring *txr = &sc->sc_txr[0]; local in function:hvn_start 1199 struct hvn_tx_ring *txr; local in function:hvn_transmit 1222 struct hvn_tx_ring *txr = arg; local in function:hvn_deferred_transmit 1927 struct hvn_tx_ring *txr; local in function:hvn_tx_ring_create 2041 struct hvn_tx_ring *txr; local in function:hvn_tx_ring_destroy 2107 struct hvn_tx_ring *txr; local in function:hvn_set_chim_size 2164 struct hvn_tx_ring *txr; local in function:hvn_fixup_tx_data 2402 struct hvn_tx_ring *txr; local in function:hvn_channel_attach 2592 struct hvn_tx_ring *txr; local in function:hvn_set_txagg 2946 struct hvn_tx_ring *txr; local in function:hvn_suspend_data 3020 struct hvn_tx_ring *txr; local in function:hvn_resume_tx 3035 struct hvn_tx_ring *txr; local in function:hvn_resume_data 3626 struct hvn_tx_ring *txr = rxr->rxr_txr; local in function:hvn_nvs_intr1 [all...] |