Lines Matching defs:txr
1099 struct iavf_tx_ring *txr;
1110 txr = sc->sc_qps[i].qp_txr;
1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1155 struct iavf_tx_ring *txr;
1184 txr = sc->sc_qps[i].qp_txr;
1190 mutex_enter(&txr->txr_lock);
1191 iavf_txr_clean(sc, txr);
1192 mutex_exit(&txr->txr_lock);
1211 iavf_watchdog(struct iavf_tx_ring *txr)
1215 sc = txr->txr_sc;
1217 mutex_enter(&txr->txr_lock);
1219 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1220 || --txr->txr_watchdog > 0) {
1221 mutex_exit(&txr->txr_lock);
1225 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1226 txr->txr_watchdogto.ev_count++;
1227 mutex_exit(&txr->txr_lock);
1230 txr->txr_qid);
2217 struct iavf_tx_ring *txr;
2222 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2223 if (txr == NULL)
2231 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2245 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2246 if (txr->txr_intrq == NULL)
2249 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2250 iavf_deferred_transmit, txr);
2251 if (txr->txr_si == NULL)
2254 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2257 iavf_evcnt_attach(&txr->txr_defragged,
2258 txr->txr_name, "m_defrag successed");
2259 iavf_evcnt_attach(&txr->txr_defrag_failed,
2260 txr->txr_name, "m_defrag failed");
2261 iavf_evcnt_attach(&txr->txr_pcqdrop,
2262 txr->txr_name, "Dropped in pcq");
2263 iavf_evcnt_attach(&txr->txr_transmitdef,
2264 txr->txr_name, "Deferred transmit");
2265 iavf_evcnt_attach(&txr->txr_watchdogto,
2266 txr->txr_name, "Watchdog timedout on queue");
2267 iavf_evcnt_attach(&txr->txr_defer,
2268 txr->txr_name, "Handled queue in softint/workqueue");
2270 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2271 txr->txr_name, "Interrupt on queue");
2273 txr->txr_qid = qid;
2274 txr->txr_sc = sc;
2275 txr->txr_maps = maps;
2276 txr->txr_prod = txr->txr_cons = 0;
2277 txr->txr_tail = I40E_QTX_TAIL1(qid);
2278 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2280 return txr;
2282 pcq_destroy(txr->txr_intrq);
2290 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2294 kmem_free(txr, sizeof(*txr));
2299 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2304 maps = txr->txr_maps;
2311 kmem_free(txr->txr_maps,
2313 txr->txr_maps = NULL;
2316 evcnt_detach(&txr->txr_defragged);
2317 evcnt_detach(&txr->txr_defrag_failed);
2318 evcnt_detach(&txr->txr_pcqdrop);
2319 evcnt_detach(&txr->txr_transmitdef);
2320 evcnt_detach(&txr->txr_watchdogto);
2321 evcnt_detach(&txr->txr_defer);
2322 evcnt_detach(&txr->txr_intr);
2324 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2325 softint_disestablish(txr->txr_si);
2326 pcq_destroy(txr->txr_intrq);
2327 mutex_destroy(&txr->txr_lock);
2328 kmem_free(txr, sizeof(*txr));
2786 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2799 KASSERT(mutex_owned(&txr->txr_lock));
2801 prod = txr->txr_prod;
2802 cons = txr->txr_cons;
2807 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2808 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2810 ring = IXL_DMA_KVA(&txr->txr_mem);
2821 txm = &txr->txr_maps[cons];
2854 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2855 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2857 txr->txr_cons = cons;
2861 softint_schedule(txr->txr_si);
2862 if (txr->txr_qid == 0) {
2868 if (txr->txr_cons == txr->txr_prod) {
2869 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2877 struct iavf_tx_ring *txr)
2882 KASSERT(mutex_owned(&txr->txr_lock));
2894 txr->txr_defragged.ev_count++;
2898 txr->txr_defrag_failed.ev_count++;
2971 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2984 KASSERT(mutex_owned(&txr->txr_lock));
2995 prod = txr->txr_prod;
2996 free = txr->txr_cons;
3002 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3003 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
3005 ring = IXL_DMA_KVA(&txr->txr_mem);
3019 m = pcq_get(txr->txr_intrq);
3026 txm = &txr->txr_maps[prod];
3029 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3075 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3076 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3079 txr->txr_prod = prod;
3080 iavf_wr(sc, txr->txr_tail, prod);
3081 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3090 struct iavf_tx_ring *txr;
3095 txr = qp->qp_txr;
3098 mutex_enter(&txr->txr_lock);
3099 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3100 mutex_exit(&txr->txr_lock);
3125 struct iavf_tx_ring *txr;
3128 txr = sc->sc_qps[0].qp_txr;
3130 mutex_enter(&txr->txr_lock);
3131 iavf_tx_common_locked(ifp, txr, false);
3132 mutex_exit(&txr->txr_lock);
3150 struct iavf_tx_ring *txr;
3156 txr = sc->sc_qps[qid].qp_txr;
3158 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3159 mutex_enter(&txr->txr_lock);
3160 txr->txr_pcqdrop.ev_count++;
3161 mutex_exit(&txr->txr_lock);
3167 if (mutex_tryenter(&txr->txr_lock)) {
3168 iavf_tx_common_locked(ifp, txr, true);
3169 mutex_exit(&txr->txr_lock);
3172 softint_schedule(txr->txr_si);
3181 struct iavf_tx_ring *txr;
3185 txr = xtxr;
3186 sc = txr->txr_sc;
3189 mutex_enter(&txr->txr_lock);
3190 txr->txr_transmitdef.ev_count++;
3191 if (pcq_peek(txr->txr_intrq) != NULL)
3192 iavf_tx_common_locked(ifp, txr, true);
3193 mutex_exit(&txr->txr_lock);
3197 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3203 KASSERT(mutex_owned(&txr->txr_lock));
3205 maps = txr->txr_maps;
3221 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3222 txr->txr_prod = txr->txr_cons = 0;
3231 struct iavf_tx_ring *txr;
3259 txr = sc->sc_qps[i].qp_txr;
3268 mutex_enter(&txr->txr_lock);
3269 while (iavf_txeof(sc, txr, UINT_MAX,
3270 &txr->txr_intr) != 0) {
3273 mutex_exit(&txr->txr_lock);
3284 struct iavf_tx_ring *txr;
3291 txr = qp->qp_txr;
3293 sc = txr->txr_sc;
3294 qid = txr->txr_qid;
3301 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3307 if (txr->txr_qid == 0)
3309 softint_schedule(txr->txr_si);
3330 struct iavf_tx_ring *txr;
3337 txr = qp->qp_txr;
3339 sc = txr->txr_sc;
3340 qid = txr->txr_qid;
3346 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
4638 struct iavf_tx_ring *txr;
4658 txr = sc->sc_qps[num_vec].qp_txr;
4663 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4705 struct iavf_tx_ring *txr;
4724 txr = sc->sc_qps[i].qp_txr;
4728 txq->queue_id = htole16(txr->txr_qid);
4731 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));