Lines Matching defs:rx_ring
547 rxr = &adapter->rx_ring[i];
588 que->rx_ring = rxr;
601 struct ena_ring *rxr = &adapter->rx_ring[qid];
830 validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
832 if (likely(req_id < rx_ring->ring_size))
835 device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n",
837 counter_u64_add(rx_ring->rx_stats.bad_req_id, 1);
840 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
841 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter);
857 struct ena_ring *rx_ring = que->rx_ring;
860 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
868 rx_ring->rx_buffer_info = kmem_zalloc(size, KM_SLEEP);
870 size = sizeof(uint16_t) * rx_ring->ring_size;
871 rx_ring->free_rx_ids = kmem_zalloc(size, KM_SLEEP);
873 for (i = 0; i < rx_ring->ring_size; i++)
874 rx_ring->free_rx_ids[i] = i;
877 ena_reset_counters((struct evcnt *)&rx_ring->rx_stats,
878 sizeof(rx_ring->rx_stats),
881 rx_ring->next_to_clean = 0;
882 rx_ring->next_to_use = 0;
885 for (i = 0; i < rx_ring->ring_size; i++) {
889 &(rx_ring->rx_buffer_info[i].map));
900 int err = tcp_lro_init(&rx_ring->lro);
907 rx_ring->lro.ifp = adapter->ifp;
913 int rc = workqueue_create(&rx_ring->cleanup_tq, "ena_rx_comp",
926 rx_ring->rx_buffer_info[i].map);
929 size = sizeof(uint16_t) * rx_ring->ring_size;
930 kmem_free(rx_ring->free_rx_ids, size);
931 rx_ring->free_rx_ids = NULL;
932 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
933 kmem_free(rx_ring->rx_buffer_info, size);
934 rx_ring->rx_buffer_info = NULL;
948 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
950 workqueue_wait(rx_ring->cleanup_tq, &rx_ring->cleanup_task);
951 workqueue_destroy(rx_ring->cleanup_tq);
952 rx_ring->cleanup_tq = NULL;
955 for (int i = 0; i < rx_ring->ring_size; i++) {
957 rx_ring->rx_buffer_info[i].map);
959 rx_ring->rx_buffer_info[i].map);
960 m_freem(rx_ring->rx_buffer_info[i].mbuf);
961 rx_ring->rx_buffer_info[i].mbuf = NULL;
966 tcp_lro_free(&rx_ring->lro);
970 kmem_free(rx_ring->rx_buffer_info,
971 sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1));
972 rx_ring->rx_buffer_info = NULL;
974 kmem_free(rx_ring->free_rx_ids, sizeof(uint16_t) * rx_ring->ring_size);
975 rx_ring->free_rx_ids = NULL;
1023 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
1035 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1052 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1077 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1093 * @rx_ring: the ring which we want to feed with free descriptors
1098 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1100 struct ena_adapter *adapter = rx_ring->adapter;
1106 rx_ring->qid);
1108 next_to_use = rx_ring->next_to_use;
1116 req_id = rx_ring->free_rx_ids[next_to_use];
1117 rc = validate_rx_req_id(rx_ring, req_id);
1121 rx_info = &rx_ring->rx_buffer_info[req_id];
1123 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1127 rx_ring->qid);
1130 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1135 rx_ring->qid);
1139 rx_ring->ring_size);
1143 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1146 rx_ring->qid, i, num);
1151 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1153 rx_ring->next_to_use = next_to_use;
1160 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1163 for (i = 0; i < rx_ring->ring_size; i++) {
1164 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1167 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1179 struct ena_ring *rx_ring;
1183 rx_ring = &adapter->rx_ring[i];
1184 bufs_num = rx_ring->ring_size - 1;
1185 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1353 ring = &adapter->rx_ring[i];
1479 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1482 struct ena_adapter *adapter = rx_ring->adapter;
1525 mbuf->m_pkthdr.flowid = rx_ring->qid;
1533 * @rx_ring: ring for which we want to clean packets
1540 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
1550 adapter = rx_ring->adapter;
1554 rx_info = &rx_ring->rx_buffer_info[req_id];
1567 m_set_rcvif(mbuf, rx_ring->que->adapter->ifp);
1571 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
1578 bus_dmamap_unload(rx_ring->adapter->sc_dmat, rx_info->map);
1581 rx_ring->free_rx_ids[ntc] = req_id;
1582 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1592 rx_info = &rx_ring->rx_buffer_info[req_id];
1611 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1620 bus_dmamap_unload(rx_ring->adapter->sc_dmat, rx_info->map);
1624 rx_ring->free_rx_ids[ntc] = req_id;
1625 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1637 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1647 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1659 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1673 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1685 ena_rx_cleanup(struct ena_ring *rx_ring)
1702 adapter = rx_ring->que->adapter;
1704 qid = rx_ring->que->id;
1708 next_to_clean = rx_ring->next_to_clean;
1713 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1726 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1730 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
1736 rx_ring->free_rx_ids[next_to_clean] =
1737 rx_ring->ena_bufs[i].req_id;
1740 rx_ring->ring_size);
1752 ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
1756 counter_u64_add_protected(rx_ring->rx_stats.bytes,
1776 if ((rx_ring->lro.lro_cnt != 0) &&
1777 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
1788 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
1793 rx_ring->next_to_clean = next_to_clean;
1796 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1799 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1800 ena_refill_rx_bufs(rx_ring, refill_required);
1804 tcp_lro_flush_all(&rx_ring->lro);
1810 counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
1844 struct ena_ring *rx_ring = queue->rx_ring;
1846 ENA_RING_MTX_LOCK(rx_ring);
1847 if (unlikely(rx_ring->stopping)) {
1848 ENA_RING_MTX_UNLOCK(rx_ring);
1852 if (atomic_cas_uint(&rx_ring->task_pending, 0, 1) == 0)
1853 workqueue_enqueue(rx_ring->cleanup_tq, &rx_ring->cleanup_task,
1856 ENA_RING_MTX_UNLOCK(rx_ring);
1867 struct ena_ring *rx_ring = que->rx_ring;
1873 atomic_swap_uint(&rx_ring->task_pending, 0);
1890 ENA_RING_MTX_LOCK(rx_ring);
1891 if (rx_ring->stopping) {
1892 ENA_RING_MTX_UNLOCK(rx_ring);
1895 ENA_RING_MTX_UNLOCK(rx_ring);
1896 rxc = ena_rx_cleanup(rx_ring);
2618 ring = adapter->que[i].rx_ring;
2642 struct ena_ring *rx_ring = adapter->que[i].rx_ring;
2643 workqueue_wait(rx_ring->cleanup_tq,
2644 &rx_ring->cleanup_task);
3520 struct ena_ring *rx_ring;
3530 rx_ring = &adapter->rx_ring[i];
3532 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
3533 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3534 rx_ring->empty_rx_queue++;
3536 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3537 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3543 ENA_RING_MTX_LOCK(rx_ring);
3544 if (rx_ring->stopping) {
3545 ENA_RING_MTX_UNLOCK(rx_ring);
3548 if (atomic_cas_uint(&rx_ring->task_pending, 0, 1) == 0)
3549 workqueue_enqueue(rx_ring->cleanup_tq,
3550 &rx_ring->cleanup_task, curcpu());
3551 ENA_RING_MTX_UNLOCK(rx_ring);
3552 rx_ring->empty_rx_queue = 0;
3555 rx_ring->empty_rx_queue = 0;