Lines Matching refs:ena_dev
106 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
110 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
342 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
357 io_sq->bus = ena_dev->bus;
360 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
368 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
383 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
389 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
395 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
402 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
420 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
436 io_cq->bus = ena_dev->bus;
438 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
446 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
601 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
604 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
712 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
714 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
728 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
744 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
793 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
796 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
828 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
837 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
849 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
860 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer, size);
865 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
871 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
889 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
896 !(ena_dev->supported_features & feature_mask))
902 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
912 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
918 admin_queue = &ena_dev->admin_queue;
928 ret = ena_com_mem_addr_set(ena_dev,
955 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
959 return ena_com_get_feature_ex(ena_dev,
966 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
968 struct ena_rss *rss = &ena_dev->rss;
970 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
982 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
984 struct ena_rss *rss = &ena_dev->rss;
987 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
995 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
997 struct ena_rss *rss = &ena_dev->rss;
999 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1011 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1013 struct ena_rss *rss = &ena_dev->rss;
1016 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1024 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1027 struct ena_rss *rss = &ena_dev->rss;
1032 ret = ena_com_get_feature(ena_dev, &get_resp,
1049 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1059 ENA_MEM_ALLOC(ena_dev->dmadev, rss->host_rss_ind_tbl_size);
1071 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1082 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1084 struct ena_rss *rss = &ena_dev->rss;
1089 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1097 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl,
1102 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1105 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1138 ret = ena_com_mem_addr_set(ena_dev,
1159 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1163 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1167 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1176 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1178 struct ena_rss *rss = &ena_dev->rss;
1188 io_sq = &ena_dev->io_sq_queues[qid];
1199 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1202 struct ena_rss *rss = &ena_dev->rss;
1207 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1223 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1229 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1230 if (!ena_dev->intr_moder_tbl)
1233 ena_com_config_default_interrupt_moderation_table(ena_dev);
1238 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1241 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1248 ena_dev->intr_delay_resolution = intr_delay_resolution;
1255 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1296 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1299 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1316 ret = ena_com_mem_addr_set(ena_dev,
1336 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1341 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1346 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1354 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1364 *io_sq = &ena_dev->io_sq_queues[qid];
1365 *io_cq = &ena_dev->io_cq_queues[qid];
1370 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1372 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1390 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1410 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1413 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1435 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1437 return ena_dev->admin_queue.running_state;
1440 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1442 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1446 ena_dev->admin_queue.running_state = state;
1450 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1452 u16 depth = ena_dev->aenq.q_depth;
1454 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1459 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1462 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1470 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1484 admin_queue = &ena_dev->admin_queue;
1503 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1505 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1523 ena_dev->dma_addr_bits = width;
1528 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1537 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1538 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1580 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1582 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1585 struct ena_com_aenq *aenq = &ena_dev->aenq;
1601 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx, s);
1606 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1612 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1617 if (ena_dev->aenq.entries)
1618 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1623 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1630 ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1631 ena_dev->admin_queue.polling = polling;
1634 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1636 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1639 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1647 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1656 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1658 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1663 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1665 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1667 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1668 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1670 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1681 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1683 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1689 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1690 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1693 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1697 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1705 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1719 admin_queue->bus = ena_dev->bus;
1720 admin_queue->q_dmadev = ena_dev->dmadev;
1741 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1747 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1748 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1753 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1754 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1768 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1769 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1770 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1778 ena_com_admin_destroy(ena_dev);
1783 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1796 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1797 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1818 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1820 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1823 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1827 ret = ena_com_create_io_cq(ena_dev, io_cq);
1831 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1838 ena_com_destroy_io_cq(ena_dev, io_cq);
1840 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1844 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1855 io_sq = &ena_dev->io_sq_queues[qid];
1856 io_cq = &ena_dev->io_cq_queues[qid];
1858 ena_com_destroy_io_sq(ena_dev, io_sq);
1859 ena_com_destroy_io_cq(ena_dev, io_cq);
1861 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1864 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1867 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1870 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1876 rc = ena_com_get_feature(ena_dev, &get_resp,
1883 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1885 rc = ena_com_get_feature(ena_dev, &get_resp,
1892 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1894 rc = ena_com_get_feature(ena_dev, &get_resp,
1902 rc = ena_com_get_feature(ena_dev, &get_resp,
1913 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1923 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
1935 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1937 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2017 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2025 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2026 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2033 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2039 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2040 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2064 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2067 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2069 rc = wait_for_reset_state(ena_dev, timeout,
2077 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2078 rc = wait_for_reset_state(ena_dev, timeout, 0);
2088 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2090 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2095 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2104 admin_queue = &ena_dev->admin_queue;
2122 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2129 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2138 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2148 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2155 ret = ena_com_mem_addr_set(ena_dev,
2164 get_cmd->device_id = ena_dev->stats_func;
2165 get_cmd->queue_idx = ena_dev->stats_queue;
2167 ret = ena_get_dev_stats(ena_dev, &ctx,
2175 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2182 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2189 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2195 admin_queue = &ena_dev->admin_queue;
2214 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2220 ret = ena_com_get_feature(ena_dev, &resp,
2232 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2234 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2235 struct ena_rss *rss = &ena_dev->rss;
2241 if (!ena_com_check_supported_feature_id(ena_dev,
2249 ret = ena_com_get_feature(ena_dev, &get_resp,
2269 ret = ena_com_mem_addr_set(ena_dev,
2293 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2297 struct ena_rss *rss = &ena_dev->rss;
2307 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2339 rc = ena_com_set_hash_function(ena_dev);
2343 ena_com_get_hash_function(ena_dev, NULL, NULL);
2348 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2352 struct ena_rss *rss = &ena_dev->rss;
2358 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2375 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2379 struct ena_rss *rss = &ena_dev->rss;
2383 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2396 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2398 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2399 struct ena_rss *rss = &ena_dev->rss;
2405 if (!ena_com_check_supported_feature_id(ena_dev,
2422 ret = ena_com_mem_addr_set(ena_dev,
2442 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2444 struct ena_rss *rss = &ena_dev->rss;
2451 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2494 rc = ena_com_set_hash_ctrl(ena_dev);
2498 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2503 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2507 struct ena_rss *rss = &ena_dev->rss;
2518 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2531 rc = ena_com_set_hash_ctrl(ena_dev);
2535 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2540 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2543 struct ena_rss *rss = &ena_dev->rss;
2556 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2558 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2559 struct ena_rss *rss = &ena_dev->rss;
2564 if (!ena_com_check_supported_feature_id(ena_dev,
2571 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2586 ret = ena_com_mem_addr_set(ena_dev,
2609 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2611 struct ena_rss *rss = &ena_dev->rss;
2619 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2629 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2639 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2643 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2645 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2649 rc = ena_com_hash_key_allocate(ena_dev);
2653 rc = ena_com_hash_ctrl_init(ena_dev);
2660 ena_com_hash_key_destroy(ena_dev);
2662 ena_com_indirect_table_destroy(ena_dev);
2668 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2670 ena_com_indirect_table_destroy(ena_dev);
2671 ena_com_hash_key_destroy(ena_dev);
2672 ena_com_hash_ctrl_destroy(ena_dev);
2674 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2677 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2679 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2681 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2692 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2695 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2697 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2712 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2714 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2717 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2726 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2728 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2731 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2740 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2742 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2754 admin_queue = &ena_dev->admin_queue;
2759 ret = ena_com_mem_addr_set(ena_dev,
2767 ret = ena_com_mem_addr_set(ena_dev,
2790 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2792 return ena_com_check_supported_feature_id(ena_dev,
2796 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2799 if (!ena_dev->intr_delay_resolution) {
2804 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2805 ena_dev->intr_delay_resolution;
2810 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2813 if (!ena_dev->intr_delay_resolution) {
2821 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2822 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2827 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2832 if (ena_dev->intr_moder_tbl)
2833 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl, size);
2834 ena_dev->intr_moder_tbl = NULL;
2837 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2843 rc = ena_com_get_feature(ena_dev, &get_resp,
2857 ena_com_disable_adaptive_moderation(ena_dev);
2861 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2867 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2868 ena_com_enable_adaptive_moderation(ena_dev);
2872 ena_com_destroy_interrupt_moderation(ena_dev);
2876 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2878 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2919 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2921 return ena_dev->intr_moder_tx_interval;
2924 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2926 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2934 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2938 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2944 if (ena_dev->intr_delay_resolution)
2946 ena_dev->intr_delay_resolution;
2954 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2958 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2964 if (ena_dev->intr_delay_resolution)
2965 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2971 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2978 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2982 rc = ena_com_config_llq_info(ena_dev, llq);
2987 size = ena_dev->tx_max_header_size;
2988 size += ena_dev->llq_info.descs_num_before_header *
2991 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
2996 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;