Home | History | Annotate | Download | only in pci

Lines Matching defs:tx_ring

179 static int	ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
546 txr = &adapter->tx_ring[i];
587 que->tx_ring = txr;
600 struct ena_ring *txr = &adapter->tx_ring[qid];
681 struct ena_ring *tx_ring = que->tx_ring;
684 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
685 tx_ring->tx_buffer_info = kmem_zalloc(size, KM_SLEEP);
687 size = sizeof(uint16_t) * tx_ring->ring_size;
688 tx_ring->free_tx_ids = kmem_zalloc(size, KM_SLEEP);
691 for (i = 0; i < tx_ring->ring_size; i++)
692 tx_ring->free_tx_ids[i] = i;
695 ena_reset_counters((struct evcnt *)&tx_ring->tx_stats,
696 sizeof(tx_ring->tx_stats),
699 tx_ring->next_to_use = 0;
700 tx_ring->next_to_clean = 0;
702 tx_ring->br = pcq_create(ENA_DEFAULT_RING_SIZE, KM_SLEEP);
705 for (i = 0; i < tx_ring->ring_size; i++) {
709 &tx_ring->tx_buffer_info[i].map);
718 int rc = workqueue_create(&tx_ring->enqueue_tq, "ena_tx_enq",
719 ena_deferred_mq_start, tx_ring, 0, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
723 i = tx_ring->ring_size;
731 tx_ring->tx_buffer_info[i].map);
733 size = sizeof(uint16_t) * tx_ring->ring_size;
734 kmem_free(tx_ring->free_tx_ids, size);
735 tx_ring->free_tx_ids = NULL;
736 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
737 kmem_free(tx_ring->tx_buffer_info, size);
738 tx_ring->tx_buffer_info = NULL;
753 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
756 workqueue_wait(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
757 workqueue_destroy(tx_ring->enqueue_tq);
758 tx_ring->enqueue_tq = NULL;
761 while ((m = pcq_get(tx_ring->br)) != NULL)
763 pcq_destroy(tx_ring->br);
764 tx_ring->br = NULL;
767 for (int i = 0; i < tx_ring->ring_size; i++) {
769 tx_ring->tx_buffer_info[i].map);
771 tx_ring->tx_buffer_info[i].map);
772 m_freem(tx_ring->tx_buffer_info[i].mbuf);
773 tx_ring->tx_buffer_info[i].mbuf = NULL;
777 kmem_free(tx_ring->tx_buffer_info,
778 sizeof(struct ena_tx_buffer) * tx_ring->ring_size);
779 tx_ring->tx_buffer_info = NULL;
781 kmem_free(tx_ring->free_tx_ids, sizeof(uint16_t) * tx_ring->ring_size);
782 tx_ring->free_tx_ids = NULL;
1211 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1213 for (int i = 0; i < tx_ring->ring_size; i++) {
1214 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1276 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
1278 struct ena_adapter *adapter = tx_ring->adapter;
1280 KASSERT(ENA_RING_MTX_OWNED(tx_ring));
1282 if (likely(req_id < tx_ring->ring_size)) {
1283 tx_info = &tx_ring->tx_buffer_info[req_id];
1294 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
1324 ring = &adapter->tx_ring[i];
1381 * @tx_ring: ring for which we want to clean packets
1391 ena_tx_cleanup(struct ena_ring *tx_ring)
1404 KASSERT(ENA_RING_MTX_OWNED(tx_ring));
1406 adapter = tx_ring->que->adapter;
1407 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1409 next_to_clean = tx_ring->next_to_clean;
1419 rc = validate_tx_req_id(tx_ring, req_id);
1423 tx_info = &tx_ring->tx_buffer_info[req_id];
1436 tx_ring->qid, mbuf);
1442 tx_ring->free_tx_ids[next_to_clean] = req_id;
1444 tx_ring->ring_size);
1449 tx_ring->next_to_clean = next_to_clean;
1461 tx_ring->qid, work_done);
1465 tx_ring->next_to_clean = next_to_clean;
1471 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
1472 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL);
1866 struct ena_ring *tx_ring = que->tx_ring;
1899 ENA_RING_MTX_LOCK(tx_ring);
1900 if (tx_ring->stopping) {
1901 ENA_RING_MTX_UNLOCK(tx_ring);
1904 txc = ena_tx_cleanup(tx_ring);
1905 ENA_RING_MTX_UNLOCK(tx_ring);
2612 ring = adapter->que[i].tx_ring;
2765 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2771 adapter = tx_ring->adapter;
2777 counter_u64_add(tx_ring->tx_stats.collapse, 1);
2782 counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
2793 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2809 KASSERT(ENA_RING_MTX_OWNED(tx_ring));
2811 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2812 adapter = tx_ring->que->adapter;
2816 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
2823 next_to_use = tx_ring->next_to_use;
2824 req_id = tx_ring->free_tx_ids[next_to_use];
2825 tx_info = &tx_ring->tx_buffer_info[req_id];
2851 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
2879 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
2884 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
2885 counter_u64_add_protected(tx_ring->tx_stats.bytes,
2897 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2898 tx_ring->ring_size);
2913 ena_start_xmit(struct ena_ring *tx_ring)
2916 struct ena_adapter *adapter = tx_ring->adapter;
2923 KASSERT(ENA_RING_MTX_OWNED(tx_ring));
2932 ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2941 if ((mbuf = pcq_get(tx_ring->br)) == NULL)
2948 if (likely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) == 0)) {
2980 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2991 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2995 ena_tx_cleanup(tx_ring);
3001 struct ena_ring *tx_ring = (struct ena_ring *)arg;
3002 struct ifnet *ifp = tx_ring->adapter->ifp;
3004 atomic_swap_uint(&tx_ring->task_pending, 0);
3006 while (pcq_peek(tx_ring->br) != NULL &&
3008 ENA_RING_MTX_LOCK(tx_ring);
3009 if (tx_ring->stopping) {
3010 ENA_RING_MTX_UNLOCK(tx_ring);
3013 ena_start_xmit(tx_ring);
3014 ENA_RING_MTX_UNLOCK(tx_ring);
3022 struct ena_ring *tx_ring;
3038 tx_ring = &adapter->tx_ring[i];
3041 is_drbr_empty = pcq_peek(tx_ring->br);
3042 ret = pcq_put(tx_ring->br, m);
3045 counter_u64_add(tx_ring->tx_stats.pcq_drops, 1);
3046 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
3047 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
3052 if ((is_drbr_empty != NULL) && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
3053 if (!tx_ring->stopping)
3054 ena_start_xmit(tx_ring);
3055 ENA_RING_MTX_UNLOCK(tx_ring);
3057 if (atomic_cas_uint(&tx_ring->task_pending, 0, 1) == 0)
3058 workqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
3070 struct ena_ring *tx_ring = adapter->tx_ring;
3073 for(i = 0; i < adapter->num_queues; ++i, ++tx_ring)
3074 if (!drbr_empty(ifp, tx_ring->br)) {
3075 ENA_RING_MTX_LOCK(tx_ring);
3076 drbr_flush(ifp, tx_ring->br);
3077 ENA_RING_MTX_UNLOCK(tx_ring);
3413 struct ena_ring *tx_ring)
3422 for (i = 0; i < tx_ring->ring_size; i++) {
3423 tx_buf = &tx_ring->tx_buffer_info[i];
3437 tx_ring->qid, i);
3441 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, 1);
3471 struct ena_ring *tx_ring;
3489 tx_ring = &adapter->tx_ring[i];
3491 rc = check_missing_comp_in_queue(adapter, tx_ring);