Home | History | Annotate | Download | only in ixgbe

Lines Matching defs:txr

143 ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
147 struct ixgbe_softc *sc = txr->sc;
149 IXGBE_TX_LOCK_ASSERT(txr);
156 ixgbe_drain(ifp, txr);
161 if (txr->txr_no_space)
165 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
172 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
198 struct tx_ring *txr = sc->tx_rings;
201 IXGBE_TX_LOCK(txr);
202 ixgbe_legacy_start_locked(ifp, txr);
203 IXGBE_TX_UNLOCK(txr);
216 struct tx_ring *txr;
251 txr = &sc->tx_rings[i];
253 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
255 IXGBE_EVC_ADD(&txr->pcq_drops, 1);
260 softint_schedule(txr->txr_si);
263 if (IXGBE_TX_TRYLOCK(txr)) {
264 ixgbe_mq_start_locked(ifp, txr);
265 IXGBE_TX_UNLOCK(txr);
283 &txr->wq_cookie, curcpu());
288 softint_schedule(txr->txr_si);
301 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
306 if (txr->sc->link_active != LINK_STATE_UP) {
311 ixgbe_drain(ifp, txr);
316 if (txr->txr_no_space)
320 while ((next = pcq_get(txr->txr_interq)) != NULL) {
321 if ((err = ixgbe_xmit(txr, next)) != 0) {
333 if ((txr->sc->feat_en & IXGBE_FEATURE_VF) &&
343 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->sc))
344 ixgbe_txeof(txr);
358 struct tx_ring *txr = arg;
359 struct ixgbe_softc *sc = txr->sc;
362 IXGBE_TX_LOCK(txr);
363 if (pcq_peek(txr->txr_interq) != NULL)
364 ixgbe_mq_start_locked(ifp, txr);
365 IXGBE_TX_UNLOCK(txr);
376 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
377 struct ixgbe_softc *sc = txr->sc;
382 ixgbe_deferred_mq_start(txr);
395 struct tx_ring *txr = que->txr;
397 IXGBE_TX_LOCK(txr);
398 ixgbe_drain(ifp, txr);
399 IXGBE_TX_UNLOCK(txr);
412 ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
414 struct ixgbe_softc *sc = txr->sc;
436 first = txr->next_avail_desc;
437 txbuf = &txr->tx_buffers[first];
444 error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
452 txr->q_eagain_tx_dma_setup++;
455 txr->q_enomem_tx_dma_setup++;
465 txr->q_efbig_tx_dma_setup++;
468 txr->q_mbuf_defrag_failed++;
474 txr->q_efbig2_tx_dma_setup++;
478 txr->q_einval_tx_dma_setup++;
481 txr->q_other_tx_dma_setup++;
487 if (txr->tx_avail < (map->dm_nsegs + 2)) {
488 txr->txr_no_space = true;
489 IXGBE_EVC_ADD(&txr->no_desc_avail, 1);
490 ixgbe_dmamap_unload(txr->txtag, txbuf->map);
498 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
506 (txr->atr_sample) && (!sc->fdir_reinit)) {
507 ++txr->atr_count;
508 if (txr->atr_count >= atr_sample_rate) {
509 ixgbe_atr(txr, m_head);
510 txr->atr_count = 0;
516 i = txr->next_avail_desc;
521 txbuf = &txr->tx_buffers[i];
522 txd = &txr->tx_base[i];
530 if (++i == txr->num_desc)
535 txr->tx_avail -= map->dm_nsegs;
536 txr->next_avail_desc = i;
545 txr->tx_buffers[first].map = txbuf->map;
547 bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
551 txbuf = &txr->tx_buffers[first];
554 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
560 IXGBE_EVC_ADD(&txr->total_packets, 1);
561 IXGBE_WRITE_REG(&sc->hw, txr->tail, i);
570 if (txr->busy == 0)
571 txr->busy = 1;
580 ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
584 IXGBE_TX_LOCK_ASSERT(txr);
586 if (txr->me == 0) {
594 while ((m = pcq_get(txr->txr_interq)) != NULL) {
596 IXGBE_EVC_ADD(&txr->pcq_drops, 1);
608 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
610 struct ixgbe_softc *sc = txr->sc;
626 &txr->txtag);
632 txr->tx_buffers = kmem_zalloc(sizeof(struct ixgbe_tx_buf) *
636 txbuf = txr->tx_buffers;
638 error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
652 ixgbe_free_transmit_buffers(txr);
661 ixgbe_setup_transmit_ring(struct tx_ring *txr)
663 struct ixgbe_softc *sc = txr->sc;
671 IXGBE_TX_LOCK(txr);
679 slot = netmap_reset(na, NR_TX, txr->me, 0);
683 bzero((void *)txr->tx_base,
686 txr->next_avail_desc = 0;
687 txr->next_to_clean = 0;
690 txbuf = txr->tx_buffers;
691 for (int i = 0; i < txr->num_desc; i++, txbuf++) {
693 bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
696 ixgbe_dmamap_unload(txr->txtag, txbuf->map);
713 int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
714 netmap_load_map(na, txr->txtag,
726 txr->atr_sample = atr_sample_rate;
730 txr->tx_avail = sc->num_tx_desc;
732 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
734 IXGBE_TX_UNLOCK(txr);
743 struct tx_ring *txr = sc->tx_rings;
745 for (int i = 0; i < sc->num_queues; i++, txr++)
746 ixgbe_setup_transmit_ring(txr);
757 struct tx_ring *txr = sc->tx_rings;
759 for (int i = 0; i < sc->num_queues; i++, txr++) {
760 ixgbe_free_transmit_buffers(txr);
761 ixgbe_dma_free(sc, &txr->txdma);
762 IXGBE_TX_LOCK_DESTROY(txr);
773 ixgbe_free_transmit_buffers(struct tx_ring *txr)
775 struct ixgbe_softc *sc = txr->sc;
781 if (txr->tx_buffers == NULL)
784 tx_buffer = txr->tx_buffers;
787 bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
790 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
794 ixgbe_dmamap_destroy(txr->txtag,
799 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
800 ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
804 if (txr->txr_interq != NULL) {
807 while ((m = pcq_get(txr->txr_interq)) != NULL)
809 pcq_destroy(txr->txr_interq);
811 if (txr->tx_buffers != NULL) {
812 kmem_free(txr->tx_buffers,
814 txr->tx_buffers = NULL;
816 if (txr->txtag != NULL) {
817 ixgbe_dma_tag_destroy(txr->txtag);
818 txr->txtag = NULL;
828 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
831 struct ixgbe_softc *sc = txr->sc;
842 int ctxd = txr->next_avail_desc;
852 int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
873 } else if (!(txr->sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
968 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
977 if (++ctxd == txr->num_desc)
979 txr->next_avail_desc = ctxd;
980 --txr->tx_avail;
992 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
1059 ctxd = txr->next_avail_desc;
1060 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1089 if (++ctxd == txr->num_desc)
1092 txr->tx_avail--;
1093 txr->next_avail_desc = ctxd;
1097 IXGBE_EVC_ADD(&txr->tso_tx, 1);
1111 ixgbe_txeof(struct tx_ring *txr)
1113 struct ixgbe_softc *sc = txr->sc;
1121 KASSERT(mutex_owned(&txr->tx_mtx));
1127 struct netmap_kring *kring = na->tx_rings[txr->me];
1128 txd = txr->tx_base;
1129 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1147 netmap_tx_irq(ifp, txr->me);
1153 if (txr->tx_avail == txr->num_desc) {
1154 txr->busy = 0;
1159 work = txr->next_to_clean;
1160 buf = &txr->tx_buffers[work];
1161 txd = &txr->tx_base[work];
1162 work -= txr->num_desc; /* The distance to ring end */
1163 avail = txr->tx_avail;
1164 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1176 txr->bytes += buf->m_head->m_pkthdr.len;
1177 bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1180 ixgbe_dmamap_unload(txr->txtag, buf->map);
1194 work -= txr->num_desc;
1195 buf = txr->tx_buffers;
1196 txd = txr->tx_base;
1199 txr->bytes +=
1201 bus_dmamap_sync(txr->txtag->dt_dmat,
1205 ixgbe_dmamap_unload(txr->txtag,
1222 work -= txr->num_desc;
1223 buf = txr->tx_buffers;
1224 txd = txr->tx_base;
1229 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1232 work += txr->num_desc;
1233 txr->next_to_clean = work;
1235 txr->tx_avail = avail;
1236 txr->txr_no_space = false;
1237 txr->packets += processed;
1249 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1250 ++txr->busy;
1256 txr->busy = 1;
1258 if (txr->tx_avail == txr->num_desc)
1259 txr->busy = 0;
2360 struct tx_ring *txr;
2388 txr = &sc->tx_rings[i];
2389 txr->sc = sc;
2390 txr->txr_interq = NULL;
2393 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
2396 txr->me = i;
2398 txr->num_desc = sc->num_tx_desc;
2401 mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2403 if (ixgbe_dma_malloc(sc, tsize, &txr->txdma,
2410 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2411 bzero((void *)txr->tx_base, tsize);
2414 if (ixgbe_allocate_transmit_buffers(txr)) {
2422 txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2423 if (txr->txr_interq == NULL) {
2479 que->txr = &sc->tx_rings[i];
2492 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2493 ixgbe_dma_free(sc, &txr->txdma);