Home | History | Annotate | Download | only in hyperv

Lines Matching defs:txr

1104 hvn_transmit_common(struct ifnet *ifp, struct hvn_tx_ring *txr,
1111 KASSERT(mutex_owned(&txr->txr_lock));
1115 if (txr->txr_oactive)
1117 if (txr->txr_suspended)
1121 if (!hvn_txd_peek(txr)) {
1123 txr->txr_oactive = 1;
1124 txr->txr_evnodesc.ev_count++;
1129 m = pcq_get(txr->txr_interq);
1146 txd = hvn_txd_get(txr);
1147 if (hvn_encap(txr, txd, m, l2hlen)) {
1150 hvn_txd_put(txr, txd);
1155 if (txr->txr_agg_pktleft == 0) {
1156 if (txr->txr_agg_txd != NULL) {
1157 hvn_flush_txagg(txr);
1159 if (hvn_txpkt(txr, txd)) {
1169 if (txr->txr_agg_txd != NULL)
1170 hvn_flush_txagg(txr);
1177 struct hvn_tx_ring *txr = &sc->sc_txr[0];
1179 mutex_enter(&txr->txr_lock);
1180 hvn_transmit_common(ifp, txr, false);
1181 mutex_exit(&txr->txr_lock);
1199 struct hvn_tx_ring *txr;
1203 txr = &sc->sc_txr[qid];
1205 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1206 mutex_enter(&txr->txr_lock);
1207 txr->txr_evpcqdrop.ev_count++;
1208 mutex_exit(&txr->txr_lock);
1214 softint_schedule(txr->txr_si);
1222 struct hvn_tx_ring *txr = arg;
1223 struct hvn_softc *sc = txr->txr_softc;
1226 mutex_enter(&txr->txr_lock);
1227 txr->txr_evtransmitdefer.ev_count++;
1228 hvn_transmit_common(ifp, txr, true);
1229 mutex_exit(&txr->txr_lock);
1352 hvn_flush_txagg(struct hvn_tx_ring *txr)
1354 struct hvn_softc *sc = txr->txr_softc;
1360 txd = txr->txr_agg_txd;
1368 pkts = txr->txr_stat_pkts;
1376 error = hvn_txpkt(txr, txd);
1380 txr->txr_evflushfailed.ev_count++;
1385 txr->txr_agg_txd = NULL;
1386 txr->txr_agg_szleft = 0;
1387 txr->txr_agg_pktleft = 0;
1388 txr->txr_agg_prevpkt = NULL;
1394 hvn_try_txagg(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd, int pktsz)
1396 struct hvn_softc *sc = txr->txr_softc;
1402 if (txr->txr_agg_txd != NULL) {
1403 if (txr->txr_agg_pktleft > 0 && txr->txr_agg_szleft > pktsz) {
1404 agg_txd = txr->txr_agg_txd;
1405 pkt = txr->txr_agg_prevpkt;
1418 pkt->rm_len = roundup2(olen, txr->txr_agg_align);
1426 txr->txr_agg_prevpkt = chim;
1428 txr->txr_agg_pktleft--;
1429 txr->txr_agg_szleft -= pktsz;
1430 if (txr->txr_agg_szleft <=
1431 HVN_PKTSIZE_MIN(txr->txr_agg_align)) {
1436 txr->txr_agg_pktleft = 0;
1442 hvn_flush_txagg(txr);
1445 txr->txr_evchimneytried.ev_count++;
1449 txr->txr_evchimney.ev_count++;
1453 if (txr->txr_agg_pktmax > 1 &&
1454 txr->txr_agg_szmax > pktsz + HVN_PKTSIZE_MIN(txr->txr_agg_align)) {
1455 txr->txr_agg_txd = txd;
1456 txr->txr_agg_pktleft = txr->txr_agg_pktmax - 1;
1457 txr->txr_agg_szleft = txr->txr_agg_szmax - pktsz;
1458 txr->txr_agg_prevpkt = chim;
1465 hvn_encap(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd, struct mbuf *m,
1470 struct hvn_softc *sc = txr->txr_softc;
1516 txr->txr_evvlanfixup.ev_count++;
1521 pktsize = HVN_PKTSIZE(m, txr->txr_agg_align);
1522 if (pktsize < txr->txr_chim_size) {
1523 chim = hvn_try_txagg(txr, txd, pktsize);
1527 if (txr->txr_agg_txd != NULL)
1528 hvn_flush_txagg(txr);
1539 if (txr->txr_flags & HVN_TXR_FLAG_UDP_HASH) {
1549 memcpy(cp, &txr->txr_id, HVN_NDIS_HASH_VALUE_SIZE);
1563 txr->txr_evvlanhwtagging.ev_count++;
1566 if (m->m_pkthdr.csum_flags & txr->txr_csum_assist) {
1605 tgt_txd = (txr->txr_agg_txd != NULL) ? txr->txr_agg_txd : txd;
1615 txr->txr_sendpkt = hvn_rndis_output_chim;
1619 KASSERTMSG(txr->txr_agg_txd == NULL, "aggregating sglist txdesc");
1631 txr->txr_evdefrag.ev_count++;
1639 txr->txr_evdmafailed.ev_count++;
1661 txr->txr_sendpkt = hvn_rndis_output_sgl;
1666 txr->txr_stat_pkts++;
1667 txr->txr_stat_size += m->m_pkthdr.len;
1669 txr->txr_stat_mcasts++;
1675 hvn_bpf_mtap(struct hvn_tx_ring *txr, struct mbuf *m, u_int direction)
1677 struct hvn_softc *sc = txr->txr_softc;
1690 txr->txr_evvlantap.ev_count++;
1715 hvn_txpkt(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd)
1717 struct hvn_softc *sc = txr->txr_softc;
1728 error = (*txr->txr_sendpkt)(txr, txd);
1730 hvn_bpf_mtap(txr, txd->txd_buf, BPF_D_OUT);
1732 hvn_bpf_mtap(txr, tmp_txd->txd_buf, BPF_D_OUT);
1734 if_statadd(ifp, if_opackets, txr->txr_stat_pkts);
1735 if_statadd(ifp, if_obytes, txr->txr_stat_size);
1736 if (txr->txr_stat_mcasts != 0)
1737 if_statadd(ifp, if_omcasts, txr->txr_stat_mcasts);
1738 txr->txr_evpkts.ev_count += txr->txr_stat_pkts;
1739 txr->txr_evsends.ev_count++;
1742 hvn_txd_put(txr, txd);
1752 hvn_txd_put(txr, txd);
1756 txr->txr_stat_pkts = 0;
1757 txr->txr_stat_size = 0;
1758 txr->txr_stat_mcasts = 0;
1764 hvn_txeof(struct hvn_tx_ring *txr, uint64_t tid)
1766 struct hvn_softc *sc = txr->txr_softc;
1779 txd = &txr->txr_desc[id];
1784 hvn_txd_put(txr, txd);
1927 struct hvn_tx_ring *txr;
1947 sc->sc_txr = kmem_zalloc(sizeof(*txr) * ring_cnt, KM_SLEEP);
1953 txr = &sc->sc_txr[j];
1954 txr->txr_softc = sc;
1955 txr->txr_id = j;
1957 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
1958 txr->txr_interq = pcq_create(HVN_TX_DESC, KM_SLEEP);
1960 snprintf(txr->txr_name, sizeof(txr->txr_name),
1962 evcnt_attach_dynamic(&txr->txr_evpkts, EVCNT_TYPE_MISC,
1963 NULL, txr->txr_name, "packets transmit");
1964 evcnt_attach_dynamic(&txr->txr_evsends, EVCNT_TYPE_MISC,
1965 NULL, txr->txr_name, "sends");
1966 evcnt_attach_dynamic(&txr->txr_evnodesc, EVCNT_TYPE_MISC,
1967 NULL, txr->txr_name, "descriptor shortage");
1968 evcnt_attach_dynamic(&txr->txr_evdmafailed, EVCNT_TYPE_MISC,
1969 NULL, txr->txr_name, "DMA failure");
1970 evcnt_attach_dynamic(&txr->txr_evdefrag, EVCNT_TYPE_MISC,
1971 NULL, txr->txr_name, "mbuf defraged");
1972 evcnt_attach_dynamic(&txr->txr_evpcqdrop, EVCNT_TYPE_MISC,
1973 NULL, txr->txr_name, "dropped in pcq");
1974 evcnt_attach_dynamic(&txr->txr_evtransmitdefer, EVCNT_TYPE_MISC,
1975 NULL, txr->txr_name, "deferred transmit");
1976 evcnt_attach_dynamic(&txr->txr_evflushfailed, EVCNT_TYPE_MISC,
1977 NULL, txr->txr_name, "aggregation flush failure");
1978 evcnt_attach_dynamic(&txr->txr_evchimneytried, EVCNT_TYPE_MISC,
1979 NULL, txr->txr_name, "chimney send tried");
1980 evcnt_attach_dynamic(&txr->txr_evchimney, EVCNT_TYPE_MISC,
1981 NULL, txr->txr_name, "chimney send");
1982 evcnt_attach_dynamic(&txr->txr_evvlanfixup, EVCNT_TYPE_MISC,
1983 NULL, txr->txr_name, "VLAN fixup");
1984 evcnt_attach_dynamic(&txr->txr_evvlanhwtagging, EVCNT_TYPE_MISC,
1985 NULL, txr->txr_name, "VLAN H/W tagging");
1986 evcnt_attach_dynamic(&txr->txr_evvlantap, EVCNT_TYPE_MISC,
1987 NULL, txr->txr_name, "VLAN bpf_mtap fixup");
1989 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1990 hvn_deferred_transmit, txr);
1991 if (txr->txr_si == NULL) {
1998 txr->txr_msgs = hyperv_dma_alloc(sc->sc_dmat, &txr->txr_dma,
2000 if (txr->txr_msgs == NULL) {
2006 TAILQ_INIT(&txr->txr_list);
2008 txd = &txr->txr_desc[i];
2019 seg = &txr->txr_dma.map->dm_segs[0];
2024 txd->txd_req = (void *)(txr->txr_msgs + (msgsize * i));
2026 TAILQ_INSERT_TAIL(&txr->txr_list, txd, txd_entry);
2028 txr->txr_avail = HVN_TX_DESC;
2041 struct hvn_tx_ring *txr;
2047 txr = &sc->sc_txr[j];
2049 mutex_enter(&txr->txr_lock);
2051 txd = &txr->txr_desc[i];
2052 hvn_txd_gc(txr, txd);
2054 mutex_exit(&txr->txr_lock);
2056 txd = &txr->txr_desc[i];
2063 if (txr->txr_msgs != NULL) {
2064 hyperv_dma_free(sc->sc_dmat, &txr->txr_dma);
2065 txr->txr_msgs = NULL;
2067 if (txr->txr_si != NULL) {
2068 softint_disestablish(txr->txr_si);
2069 txr->txr_si = NULL;
2071 if (txr->txr_interq != NULL) {
2072 hvn_tx_ring_qflush(sc, txr);
2073 pcq_destroy(txr->txr_interq);
2074 txr->txr_interq = NULL;
2077 evcnt_detach(&txr->txr_evpkts);
2078 evcnt_detach(&txr->txr_evsends);
2079 evcnt_detach(&txr->txr_evnodesc);
2080 evcnt_detach(&txr->txr_evdmafailed);
2081 evcnt_detach(&txr->txr_evdefrag);
2082 evcnt_detach(&txr->txr_evpcqdrop);
2083 evcnt_detach(&txr->txr_evtransmitdefer);
2084 evcnt_detach(&txr->txr_evflushfailed);
2085 evcnt_detach(&txr->txr_evchimneytried);
2086 evcnt_detach(&txr->txr_evchimney);
2087 evcnt_detach(&txr->txr_evvlanfixup);
2088 evcnt_detach(&txr->txr_evvlanhwtagging);
2089 evcnt_detach(&txr->txr_evvlantap);
2091 mutex_destroy(&txr->txr_lock);
2094 kmem_free(sc->sc_txr, sizeof(*txr) * sc->sc_ntxr);
2107 struct hvn_tx_ring *txr;
2111 txr = &sc->sc_txr[i];
2112 txr->txr_chim_size = chim_size;
2164 struct hvn_tx_ring *txr;
2199 txr = &sc->sc_txr[i];
2200 txr->txr_caps_assist = caps_assist;
2201 txr->txr_csum_assist = csum_assist;
2206 txr = &sc->sc_txr[i];
2207 txr->txr_flags |= HVN_TXR_FLAG_UDP_HASH;
2213 hvn_txd_peek(struct hvn_tx_ring *txr)
2216 KASSERT(mutex_owned(&txr->txr_lock));
2218 return txr->txr_avail;
2222 hvn_txd_get(struct hvn_tx_ring *txr)
2226 KASSERT(mutex_owned(&txr->txr_lock));
2228 txd = TAILQ_FIRST(&txr->txr_list);
2230 TAILQ_REMOVE(&txr->txr_list, txd, txd_entry);
2231 txr->txr_avail--;
2239 hvn_txd_put(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd)
2241 struct hvn_softc *sc = txr->txr_softc;
2244 KASSERT(mutex_owned(&txr->txr_lock));
2269 hvn_txd_put(txr, tmp_txd);
2290 TAILQ_INSERT_TAIL(&txr->txr_list, txd, txd_entry);
2291 txr->txr_avail++;
2292 txr->txr_oactive = 0;
2296 hvn_txd_gc(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd)
2304 hvn_txd_put(txr, txd);
2333 hvn_tx_ring_pending(struct hvn_tx_ring *txr)
2337 mutex_enter(&txr->txr_lock);
2338 if (hvn_txd_peek(txr) != HVN_TX_DESC)
2340 mutex_exit(&txr->txr_lock);
2346 hvn_tx_ring_qflush(struct hvn_softc *sc, struct hvn_tx_ring *txr)
2350 while ((m = pcq_get(txr->txr_interq)) != NULL)
2402 struct hvn_tx_ring *txr;
2416 txr = &sc->sc_txr[idx];
2417 txr->txr_chan = chan;
2592 struct hvn_tx_ring *txr;
2657 txr = &sc->sc_txr[i];
2659 mutex_enter(&txr->txr_lock);
2660 txr->txr_agg_szmax = size;
2661 txr->txr_agg_pktmax = pkts;
2662 txr->txr_agg_align = sc->sc_rndis_agg_align;
2663 mutex_exit(&txr->txr_lock);
2946 struct hvn_tx_ring *txr;
2953 txr = &sc->sc_txr[i];
2955 mutex_enter(&txr->txr_lock);
2956 txr->txr_suspended = 1;
2957 mutex_exit(&txr->txr_lock);
2967 while (hvn_tx_ring_pending(txr) &&
2971 hvn_nvs_intr1(txr->txr_rxr, sc->sc_tx_process_limit,
3020 struct hvn_tx_ring *txr;
3024 txr = &sc->sc_txr[i];
3025 mutex_enter(&txr->txr_lock);
3026 txr->txr_suspended = 0;
3027 mutex_exit(&txr->txr_lock);
3035 struct hvn_tx_ring *txr;
3059 txr = &sc->sc_txr[i];
3060 mutex_enter(&txr->txr_lock);
3061 txr->txr_oactive = 0;
3064 if (txr->txr_id == 0)
3066 softint_schedule(txr->txr_si);
3067 mutex_exit(&txr->txr_lock);
3626 struct hvn_tx_ring *txr = rxr->rxr_txr;
3633 if ((result & HVN_HANDLE_RING_DOTX) && txr != NULL) {
3634 mutex_enter(&txr->txr_lock);
3636 if (txr->txr_id == 0) {
3639 softint_schedule(txr->txr_si);
3640 mutex_exit(&txr->txr_lock);
4885 hvn_rndis_output_sgl(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd)
4887 struct hvn_softc *sc = txr->txr_softc;
4891 rv = vmbus_channel_send_sgl(txr->txr_chan, txd->txd_sgl, txd->txd_nsge,
4902 hvn_rndis_output_chim(struct hvn_tx_ring *txr, struct hvn_tx_desc *txd)
4914 rv = vmbus_channel_send(txr->txr_chan, &rndis, sizeof(rndis),