Lines Matching refs:txr
270 igc_txdesc_sync(struct tx_ring *txr, int id, int ops)
273 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
658 struct tx_ring *txr = &sc->tx_rings[iq];
663 txr->sc = sc;
664 txr->txr_igcq = &sc->queues[iq];
665 txr->me = iq;
666 if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
671 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
672 memset(txr->tx_base, 0, tsize);
716 q->txr = &sc->tx_rings[iq];
725 for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--)
726 igc_dma_free(sc, &txr->txdma);
776 struct tx_ring *txr = &sc->tx_rings[iq];
778 igc_dma_free(sc, &txr->txdma);
1636 struct tx_ring *txr = &sc->tx_rings[0]; /* queue 0 */
1637 mutex_enter(&txr->txr_lock);
1638 igc_tx_common_locked(ifp, txr, IGC_TX_START);
1639 mutex_exit(&txr->txr_lock);
1655 struct tx_ring *txr = &sc->tx_rings[qid];
1656 struct igc_queue *q = txr->txr_igcq;
1658 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1664 mutex_enter(&txr->txr_lock);
1665 igc_tx_common_locked(ifp, txr, IGC_TX_TRANSMIT);
1666 mutex_exit(&txr->txr_lock);
1672 igc_tx_common_locked(struct ifnet *ifp, struct tx_ring *txr, int caller)
1675 struct igc_queue *q = txr->txr_igcq;
1680 prod = txr->next_avail_desc;
1681 free = txr->next_to_clean;
1688 txr->me, prod, txr->next_to_clean, free);
1699 m = pcq_get(txr->txr_interq);
1705 struct igc_tx_buf *txbuf = &txr->tx_buffers[prod];
1709 igc_load_mbuf(q, txr->txdma.dma_tag, map, m))) {
1718 if (igc_tx_ctx_setup(txr, m, prod, &ctx_cmd_type_len,
1726 bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1730 union igc_adv_tx_desc *txdesc = &txr->tx_base[prod];
1740 igc_txdesc_sync(txr, prod,
1746 igc_txdesc_sync(txr, prod,
1769 txr->next_avail_desc = prod;
1770 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1775 txr->me, prod, txr->next_to_clean, free);
1781 igc_txeof(struct tx_ring *txr, u_int limit)
1783 struct igc_softc *sc = txr->sc;
1788 prod = txr->next_avail_desc;
1789 cons = txr->next_to_clean;
1793 txr->me, cons, prod);
1798 struct igc_tx_buf *txbuf = &txr->tx_buffers[cons];
1804 union igc_adv_tx_desc *txdesc = &txr->tx_base[last];
1805 igc_txdesc_sync(txr, last, BUS_DMASYNC_POSTREAD);
1807 igc_txdesc_sync(txr, last, BUS_DMASYNC_PREREAD);
1817 txr->me, cons, last, prod, status);
1824 txr->me, cons, last, prod, status);
1829 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1831 bus_dmamap_unload(txr->txdma.dma_tag, map);
1840 txr->next_to_clean = cons;
1913 struct tx_ring *txr = &sc->tx_rings[iq];
1915 igc_withdraw_transmit_packets(txr, false);
2759 struct tx_ring *txr = iq->txr;
2769 mutex_enter(&txr->txr_lock);
2770 txmore = igc_txeof(txr, txlimit);
2771 mutex_exit(&txr->txr_lock);
2793 struct tx_ring *txr = iq->txr;
2831 mutex_enter(&txr->txr_lock);
2832 txmore = igc_txeof(txr, txlimit);
2833 mutex_exit(&txr->txr_lock);
2863 struct tx_ring *txr = iq->txr;
2871 mutex_enter(&txr->txr_lock);
2872 txmore = igc_txeof(txr, txlimit);
2874 if (txr->me == 0) {
2877 igc_tx_common_locked(ifp, txr, IGC_TX_START);
2879 mutex_exit(&txr->txr_lock);
2939 igc_allocate_transmit_buffers(struct tx_ring *txr)
2941 struct igc_softc *sc = txr->sc;
2944 txr->tx_buffers =
2946 txr->txtag = txr->txdma.dma_tag;
2950 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
2952 error = bus_dmamap_create(txr->txdma.dma_tag,
2980 struct tx_ring *txr = &sc->tx_rings[iq];
2982 if (igc_setup_transmit_ring(txr))
2998 igc_setup_transmit_ring(struct tx_ring *txr)
3000 struct igc_softc *sc = txr->sc;
3003 if (igc_allocate_transmit_buffers(txr))
3007 memset(txr->tx_base, 0,
3011 txr->next_avail_desc = 0;
3012 txr->next_to_clean = 0;
3014 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
3015 txr->txdma.dma_map->dm_mapsize,
3018 txr->txr_interq = pcq_create(sc->num_tx_desc, KM_SLEEP);
3020 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
3038 struct tx_ring *txr = &sc->tx_rings[iq];
3040 txr->txdma.dma_map->dm_segs[0].ds_addr;
3049 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3052 txr->watchdog_timer = 0;
3086 struct tx_ring *txr = &sc->tx_rings[iq];
3088 igc_free_transmit_buffers(txr);
3098 igc_free_transmit_buffers(struct tx_ring *txr)
3100 struct igc_softc *sc = txr->sc;
3102 if (txr->tx_buffers == NULL)
3105 igc_withdraw_transmit_packets(txr, true);
3107 kmem_free(txr->tx_buffers,
3109 txr->tx_buffers = NULL;
3110 txr->txtag = NULL;
3112 pcq_destroy(txr->txr_interq);
3113 mutex_destroy(&txr->txr_lock);
3122 igc_withdraw_transmit_packets(struct tx_ring *txr, bool destroy)
3124 struct igc_softc *sc = txr->sc;
3125 struct igc_queue *q = txr->txr_igcq;
3127 mutex_enter(&txr->txr_lock);
3130 union igc_adv_tx_desc *txdesc = &txr->tx_base[id];
3132 igc_txdesc_sync(txr, id,
3137 igc_txdesc_sync(txr, id,
3140 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
3144 bus_dmamap_sync(txr->txdma.dma_tag, map,
3146 bus_dmamap_unload(txr->txdma.dma_tag, map);
3151 bus_dmamap_destroy(txr->txdma.dma_tag, map);
3156 txr->next_avail_desc = 0;
3157 txr->next_to_clean = 0;
3161 while ((m = pcq_get(txr->txr_interq)) != NULL) {
3166 mutex_exit(&txr->txr_lock);
3177 igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3306 (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3308 igc_txdesc_sync(txr, prod,
3317 igc_txdesc_sync(txr, prod,