Home | History | Annotate | Download | only in pci

Lines Matching defs:txr

1967 	struct ixl_tx_ring *txr;
1980 txr = sc->sc_qps[i].qp_txr;
1983 ixl_txr_config(sc, txr);
1991 txr = sc->sc_qps[i].qp_txr;
1998 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2012 ixl_txr_qdis(sc, txr, 1);
2018 if (ixl_txr_enabled(sc, txr) != 0)
2180 struct ixl_tx_ring *txr;
2190 txr = sc->sc_qps[i].qp_txr;
2195 mutex_enter(&txr->txr_lock);
2196 ixl_txr_qdis(sc, txr, 0);
2197 mutex_exit(&txr->txr_lock);
2205 txr = sc->sc_qps[i].qp_txr;
2208 mutex_enter(&txr->txr_lock);
2212 mutex_exit(&txr->txr_lock);
2226 txr = sc->sc_qps[i].qp_txr;
2229 mutex_enter(&txr->txr_lock);
2230 if (ixl_txr_disabled(sc, txr) != 0) {
2231 mutex_exit(&txr->txr_lock);
2234 mutex_exit(&txr->txr_lock);
2251 txr = sc->sc_qps[i].qp_txr;
2254 mutex_enter(&txr->txr_lock);
2255 ixl_txr_unconfig(sc, txr);
2256 mutex_exit(&txr->txr_lock);
2262 ixl_txr_clean(sc, txr);
2359 struct ixl_tx_ring *txr = NULL;
2363 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2367 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2384 txr->txr_cons = txr->txr_prod = 0;
2385 txr->txr_maps = maps;
2387 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2388 if (txr->txr_intrq == NULL)
2391 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2392 ixl_deferred_transmit, txr);
2393 if (txr->txr_si == NULL)
2396 txr->txr_tail = I40E_QTX_TAIL(qid);
2397 txr->txr_qid = qid;
2398 txr->txr_sc = sc;
2399 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2401 return txr;
2404 pcq_destroy(txr->txr_intrq);
2415 ixl_dmamem_free(sc, &txr->txr_mem);
2418 kmem_free(txr, sizeof(*txr));
2424 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2430 qid = txr->txr_qid + sc->sc_base_queue;
2443 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2450 txq.head = htole16(txr->txr_cons);
2452 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2460 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2467 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2471 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2473 txr->txr_cons = txr->txr_prod = 0;
2477 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2483 maps = txr->txr_maps;
2501 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2503 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2519 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2521 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2525 KASSERT(mutex_owned(&txr->txr_lock));
2539 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2545 softint_disestablish(txr->txr_si);
2547 maps = txr->txr_maps;
2554 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2556 pcq_destroy(txr->txr_intrq);
2558 ixl_dmamem_free(sc, &txr->txr_mem);
2559 mutex_destroy(&txr->txr_lock);
2561 kmem_free(txr, sizeof(*txr));
2566 struct ixl_tx_ring *txr)
2571 KASSERT(mutex_owned(&txr->txr_lock));
2583 txr->txr_defragged.ev_count++;
2588 txr->txr_defrag_failed.ev_count++;
2661 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2674 KASSERT(mutex_owned(&txr->txr_lock));
2683 prod = txr->txr_prod;
2684 free = txr->txr_cons;
2689 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2690 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2692 ring = IXL_DMA_KVA(&txr->txr_mem);
2706 m = pcq_get(txr->txr_intrq);
2713 txm = &txr->txr_maps[prod];
2716 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2766 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2767 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2770 txr->txr_prod = prod;
2771 ixl_wr(sc, txr->txr_tail, prod);
2776 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2788 KASSERT(mutex_owned(&txr->txr_lock));
2790 prod = txr->txr_prod;
2791 cons = txr->txr_cons;
2796 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2797 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2799 ring = IXL_DMA_KVA(&txr->txr_mem);
2810 txm = &txr->txr_maps[cons];
2843 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2844 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2846 txr->txr_cons = cons;
2849 softint_schedule(txr->txr_si);
2850 if (txr->txr_qid == 0) {
2863 struct ixl_tx_ring *txr;
2866 txr = sc->sc_qps[0].qp_txr;
2868 mutex_enter(&txr->txr_lock);
2869 ixl_tx_common_locked(ifp, txr, false);
2870 mutex_exit(&txr->txr_lock);
2887 struct ixl_tx_ring *txr;
2893 txr = sc->sc_qps[qid].qp_txr;
2895 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2896 mutex_enter(&txr->txr_lock);
2897 txr->txr_pcqdrop.ev_count++;
2898 mutex_exit(&txr->txr_lock);
2906 softint_schedule(txr->txr_si);
2909 if (mutex_tryenter(&txr->txr_lock)) {
2910 ixl_tx_common_locked(ifp, txr, true);
2911 mutex_exit(&txr->txr_lock);
2914 softint_schedule(txr->txr_si);
2925 struct ixl_tx_ring *txr = xtxr;
2926 struct ixl_softc *sc = txr->txr_sc;
2929 mutex_enter(&txr->txr_lock);
2930 txr->txr_transmitdef.ev_count++;
2931 if (pcq_peek(txr->txr_intrq) != NULL)
2932 ixl_tx_common_locked(ifp, txr, true);
2933 mutex_exit(&txr->txr_lock);
3377 struct ixl_tx_ring *txr = qp->qp_txr;
3382 mutex_enter(&txr->txr_lock);
3384 txmore = ixl_txeof(sc, txr, txlimit);
3385 mutex_exit(&txr->txr_lock);
3411 struct ixl_tx_ring *txr;
3440 txr = sc->sc_qps[i].qp_txr;
3444 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3457 struct ixl_tx_ring *txr = qp->qp_txr;
3468 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3474 if (txr->txr_qid == 0)
3476 softint_schedule(txr->txr_si);
3498 struct ixl_tx_ring *txr = qp->qp_txr;
3507 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
6043 struct ixl_tx_ring *txr;
6050 txr = qp->qp_txr;
6053 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6055 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6057 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6059 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6061 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6063 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6202 struct ixl_tx_ring *txr;
6208 txr = sc->sc_qps[i].qp_txr;
6211 evcnt_detach(&txr->txr_defragged);
6212 evcnt_detach(&txr->txr_defrag_failed);
6213 evcnt_detach(&txr->txr_pcqdrop);
6214 evcnt_detach(&txr->txr_transmitdef);
6215 evcnt_detach(&txr->txr_intr);
6216 evcnt_detach(&txr->txr_defer);