Home | History | Annotate | Download | only in igc

Lines Matching defs:txr

269 igc_txdesc_sync(struct tx_ring *txr, int id, int ops)
272 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
657 struct tx_ring *txr = &sc->tx_rings[iq];
662 txr->sc = sc;
663 txr->txr_igcq = &sc->queues[iq];
664 txr->me = iq;
665 if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
670 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
671 memset(txr->tx_base, 0, tsize);
715 q->txr = &sc->tx_rings[iq];
724 for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--)
725 igc_dma_free(sc, &txr->txdma);
775 struct tx_ring *txr = &sc->tx_rings[iq];
777 igc_dma_free(sc, &txr->txdma);
1635 struct tx_ring *txr = &sc->tx_rings[0]; /* queue 0 */
1636 mutex_enter(&txr->txr_lock);
1637 igc_tx_common_locked(ifp, txr, IGC_TX_START);
1638 mutex_exit(&txr->txr_lock);
1654 struct tx_ring *txr = &sc->tx_rings[qid];
1655 struct igc_queue *q = txr->txr_igcq;
1657 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1663 mutex_enter(&txr->txr_lock);
1664 igc_tx_common_locked(ifp, txr, IGC_TX_TRANSMIT);
1665 mutex_exit(&txr->txr_lock);
1671 igc_tx_common_locked(struct ifnet *ifp, struct tx_ring *txr, int caller)
1674 struct igc_queue *q = txr->txr_igcq;
1679 prod = txr->next_avail_desc;
1680 free = txr->next_to_clean;
1687 txr->me, prod, txr->next_to_clean, free);
1698 m = pcq_get(txr->txr_interq);
1704 struct igc_tx_buf *txbuf = &txr->tx_buffers[prod];
1708 igc_load_mbuf(q, txr->txdma.dma_tag, map, m))) {
1717 if (igc_tx_ctx_setup(txr, m, prod, &ctx_cmd_type_len,
1725 bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1729 union igc_adv_tx_desc *txdesc = &txr->tx_base[prod];
1739 igc_txdesc_sync(txr, prod,
1745 igc_txdesc_sync(txr, prod,
1768 txr->next_avail_desc = prod;
1769 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1774 txr->me, prod, txr->next_to_clean, free);
1780 igc_txeof(struct tx_ring *txr, u_int limit)
1782 struct igc_softc *sc = txr->sc;
1787 prod = txr->next_avail_desc;
1788 cons = txr->next_to_clean;
1792 txr->me, cons, prod);
1797 struct igc_tx_buf *txbuf = &txr->tx_buffers[cons];
1803 union igc_adv_tx_desc *txdesc = &txr->tx_base[last];
1804 igc_txdesc_sync(txr, last, BUS_DMASYNC_POSTREAD);
1806 igc_txdesc_sync(txr, last, BUS_DMASYNC_PREREAD);
1816 txr->me, cons, last, prod, status);
1823 txr->me, cons, last, prod, status);
1828 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1830 bus_dmamap_unload(txr->txdma.dma_tag, map);
1839 txr->next_to_clean = cons;
1912 struct tx_ring *txr = &sc->tx_rings[iq];
1914 igc_withdraw_transmit_packets(txr, false);
2758 struct tx_ring *txr = iq->txr;
2768 mutex_enter(&txr->txr_lock);
2769 txmore = igc_txeof(txr, txlimit);
2770 mutex_exit(&txr->txr_lock);
2792 struct tx_ring *txr = iq->txr;
2830 mutex_enter(&txr->txr_lock);
2831 txmore = igc_txeof(txr, txlimit);
2832 mutex_exit(&txr->txr_lock);
2862 struct tx_ring *txr = iq->txr;
2870 mutex_enter(&txr->txr_lock);
2871 txmore = igc_txeof(txr, txlimit);
2873 if (txr->me == 0) {
2876 igc_tx_common_locked(ifp, txr, IGC_TX_START);
2878 mutex_exit(&txr->txr_lock);
2938 igc_allocate_transmit_buffers(struct tx_ring *txr)
2940 struct igc_softc *sc = txr->sc;
2943 txr->tx_buffers =
2945 txr->txtag = txr->txdma.dma_tag;
2949 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
2951 error = bus_dmamap_create(txr->txdma.dma_tag,
2979 struct tx_ring *txr = &sc->tx_rings[iq];
2981 if (igc_setup_transmit_ring(txr))
2997 igc_setup_transmit_ring(struct tx_ring *txr)
2999 struct igc_softc *sc = txr->sc;
3002 if (igc_allocate_transmit_buffers(txr))
3006 memset(txr->tx_base, 0,
3010 txr->next_avail_desc = 0;
3011 txr->next_to_clean = 0;
3013 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
3014 txr->txdma.dma_map->dm_mapsize,
3017 txr->txr_interq = pcq_create(sc->num_tx_desc, KM_SLEEP);
3019 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
3037 struct tx_ring *txr = &sc->tx_rings[iq];
3039 txr->txdma.dma_map->dm_segs[0].ds_addr;
3048 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3051 txr->watchdog_timer = 0;
3085 struct tx_ring *txr = &sc->tx_rings[iq];
3087 igc_free_transmit_buffers(txr);
3097 igc_free_transmit_buffers(struct tx_ring *txr)
3099 struct igc_softc *sc = txr->sc;
3101 if (txr->tx_buffers == NULL)
3104 igc_withdraw_transmit_packets(txr, true);
3106 kmem_free(txr->tx_buffers,
3108 txr->tx_buffers = NULL;
3109 txr->txtag = NULL;
3111 pcq_destroy(txr->txr_interq);
3112 mutex_destroy(&txr->txr_lock);
3121 igc_withdraw_transmit_packets(struct tx_ring *txr, bool destroy)
3123 struct igc_softc *sc = txr->sc;
3124 struct igc_queue *q = txr->txr_igcq;
3126 mutex_enter(&txr->txr_lock);
3129 union igc_adv_tx_desc *txdesc = &txr->tx_base[id];
3131 igc_txdesc_sync(txr, id,
3136 igc_txdesc_sync(txr, id,
3139 struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
3143 bus_dmamap_sync(txr->txdma.dma_tag, map,
3145 bus_dmamap_unload(txr->txdma.dma_tag, map);
3150 bus_dmamap_destroy(txr->txdma.dma_tag, map);
3155 txr->next_avail_desc = 0;
3156 txr->next_to_clean = 0;
3160 txr->txr_interq)) != NULL) {
3165 mutex_exit(&txr->txr_lock);
3176 igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3305 (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3307 igc_txdesc_sync(txr, prod,
3316 igc_txdesc_sync(txr, prod,