Home | History | Annotate | Line # | Download | only in ena-com
ena_com.h revision 1.1
      1 /*-
      2  * BSD LICENSE
      3  *
      4  * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  *
     11  * * Redistributions of source code must retain the above copyright
     12  * notice, this list of conditions and the following disclaimer.
     13  * * Redistributions in binary form must reproduce the above copyright
     14  * notice, this list of conditions and the following disclaimer in
     15  * the documentation and/or other materials provided with the
     16  * distribution.
     17  * * Neither the name of copyright holder nor the names of its
     18  * contributors may be used to endorse or promote products derived
     19  * from this software without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #ifndef ENA_COM
     35 #define ENA_COM
     36 
     37 #ifndef ENA_INTERNAL
     38 #include "ena_plat.h"
     39 #else
     40 #include "ena_plat.h"
     41 #include "ena_includes.h"
     42 #endif
     43 
     44 #define ENA_MAX_NUM_IO_QUEUES		128U
     45 /* We need to queues for each IO (on for Tx and one for Rx) */
     46 #define ENA_TOTAL_NUM_QUEUES		(2 * (ENA_MAX_NUM_IO_QUEUES))
     47 
     48 #define ENA_MAX_HANDLERS 256
     49 
     50 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
     51 
     52 /* Unit in usec */
     53 #define ENA_REG_READ_TIMEOUT 200000
     54 
     55 #define ADMIN_SQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aq_entry))
     56 #define ADMIN_CQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_acq_entry))
     57 #define ADMIN_AENQ_SIZE(depth)	((depth) * sizeof(struct ena_admin_aenq_entry))
     58 
     59 /*****************************************************************************/
     60 /*****************************************************************************/
     61 /* ENA adaptive interrupt moderation settings */
     62 
     63 #define ENA_INTR_LOWEST_USECS           (0)
     64 #define ENA_INTR_LOWEST_PKTS            (3)
     65 #define ENA_INTR_LOWEST_BYTES           (2 * 1524)
     66 
     67 #define ENA_INTR_LOW_USECS              (32)
     68 #define ENA_INTR_LOW_PKTS               (12)
     69 #define ENA_INTR_LOW_BYTES              (16 * 1024)
     70 
     71 #define ENA_INTR_MID_USECS              (80)
     72 #define ENA_INTR_MID_PKTS               (48)
     73 #define ENA_INTR_MID_BYTES              (64 * 1024)
     74 
     75 #define ENA_INTR_HIGH_USECS             (128)
     76 #define ENA_INTR_HIGH_PKTS              (96)
     77 #define ENA_INTR_HIGH_BYTES             (128 * 1024)
     78 
     79 #define ENA_INTR_HIGHEST_USECS          (192)
     80 #define ENA_INTR_HIGHEST_PKTS           (128)
     81 #define ENA_INTR_HIGHEST_BYTES          (192 * 1024)
     82 
     83 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS		196
     84 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS		4
     85 #define ENA_INTR_DELAY_OLD_VALUE_WEIGHT			6
     86 #define ENA_INTR_DELAY_NEW_VALUE_WEIGHT			4
     87 #define ENA_INTR_MODER_LEVEL_STRIDE			1
     88 #define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED		0xFFFFFF
     89 
     90 #define ENA_HW_HINTS_NO_TIMEOUT				0xFFFF
     91 
     92 enum ena_intr_moder_level {
     93 	ENA_INTR_MODER_LOWEST = 0,
     94 	ENA_INTR_MODER_LOW,
     95 	ENA_INTR_MODER_MID,
     96 	ENA_INTR_MODER_HIGH,
     97 	ENA_INTR_MODER_HIGHEST,
     98 	ENA_INTR_MAX_NUM_OF_LEVELS,
     99 };
    100 
    101 struct ena_intr_moder_entry {
    102 	unsigned int intr_moder_interval;
    103 	unsigned int pkts_per_interval;
    104 	unsigned int bytes_per_interval;
    105 };
    106 
    107 enum queue_direction {
    108 	ENA_COM_IO_QUEUE_DIRECTION_TX,
    109 	ENA_COM_IO_QUEUE_DIRECTION_RX
    110 };
    111 
    112 struct ena_com_buf {
    113 	dma_addr_t paddr; /**< Buffer physical address */
    114 	u16 len; /**< Buffer length in bytes */
    115 };
    116 
    117 struct ena_com_rx_buf_info {
    118 	u16 len;
    119 	u16 req_id;
    120 };
    121 
    122 struct ena_com_io_desc_addr {
    123 	u8 __iomem *pbuf_dev_addr; /* LLQ address */
    124 	u8 *virt_addr;
    125 	dma_addr_t phys_addr;
    126 	ena_mem_handle_t mem_handle;
    127 };
    128 
    129 struct ena_com_tx_meta {
    130 	u16 mss;
    131 	u16 l3_hdr_len;
    132 	u16 l3_hdr_offset;
    133 	u16 l4_hdr_len; /* In words */
    134 };
    135 
    136 struct ena_com_llq_info {
    137 	bool inline_header;
    138 	u16 desc_stride_ctrl;
    139 
    140 	u16 desc_list_entry_size;
    141 	u16 descs_num_before_header;
    142 	u16 descs_per_entry;
    143 };
    144 
    145 struct ena_com_io_cq {
    146 	struct ena_com_io_desc_addr cdesc_addr;
    147 	void *bus;
    148 
    149 	/* Interrupt unmask register */
    150 	u32 __iomem *unmask_reg;
    151 
    152 	/* The completion queue head doorbell register */
    153 	u32 __iomem *cq_head_db_reg;
    154 
    155 	/* numa configuration register (for TPH) */
    156 	u32 __iomem *numa_node_cfg_reg;
    157 
    158 	/* The value to write to the above register to unmask
    159 	 * the interrupt of this queue
    160 	 */
    161 	u32 msix_vector;
    162 
    163 	enum queue_direction direction;
    164 
    165 	/* holds the number of cdesc of the current packet */
    166 	u16 cur_rx_pkt_cdesc_count;
    167 	/* save the firt cdesc idx of the current packet */
    168 	u16 cur_rx_pkt_cdesc_start_idx;
    169 
    170 	u16 q_depth;
    171 	/* Caller qid */
    172 	u16 qid;
    173 
    174 	/* Device queue index */
    175 	u16 idx;
    176 	u16 head;
    177 	u16 last_head_update;
    178 	u8 phase;
    179 	u8 cdesc_entry_size_in_bytes;
    180 
    181 } ____cacheline_aligned;
    182 
    183 struct ena_com_io_bounce_buffer_control {
    184 	u8 *base_buffer;
    185 	u16 next_to_use;
    186 	u16 buffer_size;
    187 	u16 buffers_num;  /* Must be a power of 2 */
    188 };
    189 
    190 /* This struct is to keep tracking the current location of the next llq entry */
    191 struct ena_com_llq_pkt_ctrl {
    192 	u8 *curr_bounce_buf;
    193 	u16 idx;
    194 	u16 descs_left_in_line;
    195 };
    196 
    197 struct ena_com_io_sq {
    198 	struct ena_com_io_desc_addr desc_addr;
    199 	void *bus;
    200 
    201 	u32 __iomem *db_addr;
    202 	u8 __iomem *header_addr;
    203 
    204 	enum queue_direction direction;
    205 	enum ena_admin_placement_policy_type mem_queue_type;
    206 
    207 	u32 msix_vector;
    208 	struct ena_com_tx_meta cached_tx_meta;
    209 	struct ena_com_llq_info llq_info;
    210 	struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
    211 	struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
    212 
    213 	u16 q_depth;
    214 	u16 qid;
    215 
    216 	u16 idx;
    217 	u16 tail;
    218 	u16 next_to_comp;
    219 	u16 llq_last_copy_tail;
    220 	u32 tx_max_header_size;
    221 	u8 phase;
    222 	u8 desc_entry_size;
    223 	u8 dma_addr_bits;
    224 } ____cacheline_aligned;
    225 
    226 struct ena_com_admin_cq {
    227 	struct ena_admin_acq_entry *entries;
    228 	ena_mem_handle_t mem_handle;
    229 	dma_addr_t dma_addr;
    230 
    231 	u16 head;
    232 	u8 phase;
    233 };
    234 
    235 struct ena_com_admin_sq {
    236 	struct ena_admin_aq_entry *entries;
    237 	ena_mem_handle_t mem_handle;
    238 	dma_addr_t dma_addr;
    239 
    240 	u32 __iomem *db_addr;
    241 
    242 	u16 head;
    243 	u16 tail;
    244 	u8 phase;
    245 
    246 };
    247 
    248 struct ena_com_stats_admin {
    249 	u32 aborted_cmd;
    250 	u32 submitted_cmd;
    251 	u32 completed_cmd;
    252 	u32 out_of_space;
    253 	u32 no_completion;
    254 };
    255 
    256 struct ena_com_admin_queue {
    257 	void *q_dmadev;
    258 	void *bus;
    259 	ena_spinlock_t q_lock; /* spinlock for the admin queue */
    260 
    261 	struct ena_comp_ctx *comp_ctx;
    262 	u32 completion_timeout;
    263 	u16 q_depth;
    264 	struct ena_com_admin_cq cq;
    265 	struct ena_com_admin_sq sq;
    266 
    267 	/* Indicate if the admin queue should poll for completion */
    268 	bool polling;
    269 
    270 	u16 curr_cmd_id;
    271 
    272 	/* Indicate that the ena was initialized and can
    273 	 * process new admin commands
    274 	 */
    275 	bool running_state;
    276 
    277 	/* Count the number of outstanding admin commands */
    278 	ena_atomic32_t outstanding_cmds;
    279 
    280 	struct ena_com_stats_admin stats;
    281 };
    282 
    283 struct ena_aenq_handlers;
    284 
    285 struct ena_com_aenq {
    286 	u16 head;
    287 	u8 phase;
    288 	struct ena_admin_aenq_entry *entries;
    289 	dma_addr_t dma_addr;
    290 	ena_mem_handle_t mem_handle;
    291 	u16 q_depth;
    292 	struct ena_aenq_handlers *aenq_handlers;
    293 };
    294 
    295 struct ena_com_mmio_read {
    296 	struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
    297 	dma_addr_t read_resp_dma_addr;
    298 	ena_mem_handle_t read_resp_mem_handle;
    299 	u32 reg_read_to; /* in us */
    300 	u16 seq_num;
    301 	bool readless_supported;
    302 	/* spin lock to ensure a single outstanding read */
    303 	ena_spinlock_t lock;
    304 };
    305 
    306 struct ena_rss {
    307 	/* Indirect table */
    308 	u16 *host_rss_ind_tbl;
    309 	struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
    310 	dma_addr_t rss_ind_tbl_dma_addr;
    311 	ena_mem_handle_t rss_ind_tbl_mem_handle;
    312 	u16 tbl_log_size;
    313 
    314 	/* Hash key */
    315 	enum ena_admin_hash_functions hash_func;
    316 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
    317 	dma_addr_t hash_key_dma_addr;
    318 	ena_mem_handle_t hash_key_mem_handle;
    319 	u32 hash_init_val;
    320 
    321 	/* Flow Control */
    322 	struct ena_admin_feature_rss_hash_control *hash_ctrl;
    323 	dma_addr_t hash_ctrl_dma_addr;
    324 	ena_mem_handle_t hash_ctrl_mem_handle;
    325 
    326 };
    327 
    328 struct ena_host_attribute {
    329 	/* Debug area */
    330 	u8 *debug_area_virt_addr;
    331 	dma_addr_t debug_area_dma_addr;
    332 	ena_mem_handle_t debug_area_dma_handle;
    333 	u32 debug_area_size;
    334 
    335 	/* Host information */
    336 	struct ena_admin_host_info *host_info;
    337 	dma_addr_t host_info_dma_addr;
    338 	ena_mem_handle_t host_info_dma_handle;
    339 };
    340 
    341 /* Each ena_dev is a PCI function. */
    342 struct ena_com_dev {
    343 	struct ena_com_admin_queue admin_queue;
    344 	struct ena_com_aenq aenq;
    345 	struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
    346 	struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
    347 	u8 __iomem *reg_bar;
    348 	void __iomem *mem_bar;
    349 	void *dmadev;
    350 	void *bus;
    351 
    352 	enum ena_admin_placement_policy_type tx_mem_queue_type;
    353 	u32 tx_max_header_size;
    354 	u16 stats_func; /* Selected function for extended statistic dump */
    355 	u16 stats_queue; /* Selected queue for extended statistic dump */
    356 
    357 	struct ena_com_mmio_read mmio_read;
    358 
    359 	struct ena_rss rss;
    360 	u32 supported_features;
    361 	u32 dma_addr_bits;
    362 
    363 	struct ena_host_attribute host_attr;
    364 	bool adaptive_coalescing;
    365 	u16 intr_delay_resolution;
    366 	u32 intr_moder_tx_interval;
    367 	struct ena_intr_moder_entry *intr_moder_tbl;
    368 
    369 	struct ena_com_llq_info llq_info;
    370 };
    371 
    372 struct ena_com_dev_get_features_ctx {
    373 	struct ena_admin_queue_feature_desc max_queues;
    374 	struct ena_admin_device_attr_feature_desc dev_attr;
    375 	struct ena_admin_feature_aenq_desc aenq;
    376 	struct ena_admin_feature_offload_desc offload;
    377 	struct ena_admin_ena_hw_hints hw_hints;
    378 	struct ena_admin_feature_llq_desc llq;
    379 };
    380 
    381 struct ena_com_create_io_ctx {
    382 	enum ena_admin_placement_policy_type mem_queue_type;
    383 	enum queue_direction direction;
    384 	int numa_node;
    385 	u32 msix_vector;
    386 	u16 queue_size;
    387 	u16 qid;
    388 };
    389 
    390 typedef void (*ena_aenq_handler)(void *data,
    391 	struct ena_admin_aenq_entry *aenq_e);
    392 
    393 /* Holds aenq handlers. Indexed by AENQ event group */
    394 struct ena_aenq_handlers {
    395 	ena_aenq_handler handlers[ENA_MAX_HANDLERS];
    396 	ena_aenq_handler unimplemented_handler;
    397 };
    398 
    399 /*****************************************************************************/
    400 /*****************************************************************************/
    401 #if defined(__cplusplus)
    402 extern "C" {
    403 #endif
    404 
    405 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
    406  * @ena_dev: ENA communication layer struct
    407  *
    408  * Initialize the register read mechanism.
    409  *
    410  * @note: This method must be the first stage in the initialization sequence.
    411  *
    412  * @return - 0 on success, negative value on failure.
    413  */
    414 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
    415 
    416 /* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
    417  * @ena_dev: ENA communication layer struct
    418  * @readless_supported: readless mode (enable/disable)
    419  */
    420 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
    421 				bool readless_supported);
    422 
    423 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
    424  * value physical address.
    425  * @ena_dev: ENA communication layer struct
    426  */
    427 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
    428 
    429 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
    430  * @ena_dev: ENA communication layer struct
    431  */
    432 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
    433 
    434 /* ena_com_admin_init - Init the admin and the async queues
    435  * @ena_dev: ENA communication layer struct
    436  * @aenq_handlers: Those handlers to be called upon event.
    437  * @init_spinlock: Indicate if this method should init the admin spinlock or
    438  * the spinlock was init before (for example, in a case of FLR).
    439  *
    440  * Initialize the admin submission and completion queues.
    441  * Initialize the asynchronous events notification queues.
    442  *
    443  * @return - 0 on success, negative value on failure.
    444  */
    445 int ena_com_admin_init(struct ena_com_dev *ena_dev,
    446 		       struct ena_aenq_handlers *aenq_handlers,
    447 		       bool init_spinlock);
    448 
    449 /* ena_com_admin_destroy - Destroy the admin and the async events queues.
    450  * @ena_dev: ENA communication layer struct
    451  *
    452  * @note: Before calling this method, the caller must validate that the device
    453  * won't send any additional admin completions/aenq.
    454  * To achieve that, a FLR is recommended.
    455  */
    456 void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
    457 
    458 /* ena_com_dev_reset - Perform device FLR to the device.
    459  * @ena_dev: ENA communication layer struct
    460  * @reset_reason: Specify what is the trigger for the reset in case of an error.
    461  *
    462  * @return - 0 on success, negative value on failure.
    463  */
    464 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
    465 		      enum ena_regs_reset_reason_types reset_reason);
    466 
    467 /* ena_com_create_io_queue - Create io queue.
    468  * @ena_dev: ENA communication layer struct
    469  * @ctx - create context structure
    470  *
    471  * Create the submission and the completion queues.
    472  *
    473  * @return - 0 on success, negative value on failure.
    474  */
    475 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
    476 			    struct ena_com_create_io_ctx *ctx);
    477 
    478 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
    479  * @ena_dev: ENA communication layer struct
    480  * @qid - the caller virtual queue id.
    481  */
    482 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
    483 
    484 /* ena_com_get_io_handlers - Return the io queue handlers
    485  * @ena_dev: ENA communication layer struct
    486  * @qid - the caller virtual queue id.
    487  * @io_sq - IO submission queue handler
    488  * @io_cq - IO completion queue handler.
    489  *
    490  * @return - 0 on success, negative value on failure.
    491  */
    492 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
    493 			    struct ena_com_io_sq **io_sq,
    494 			    struct ena_com_io_cq **io_cq);
    495 
    496 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
    497  * @ena_dev: ENA communication layer struct
    498  *
    499  * After this method, aenq event can be received via AENQ.
    500  */
    501 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
    502 
    503 /* ena_com_set_admin_running_state - Set the state of the admin queue
    504  * @ena_dev: ENA communication layer struct
    505  *
    506  * Change the state of the admin queue (enable/disable)
    507  */
    508 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
    509 
    510 /* ena_com_get_admin_running_state - Get the admin queue state
    511  * @ena_dev: ENA communication layer struct
    512  *
    513  * Retrieve the state of the admin queue (enable/disable)
    514  *
    515  * @return - current polling mode (enable/disable)
    516  */
    517 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
    518 
    519 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
    520  * @ena_dev: ENA communication layer struct
    521  * @polling: ENAble/Disable polling mode
    522  *
    523  * Set the admin completion mode.
    524  */
    525 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
    526 
    527 /* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
    528  * @ena_dev: ENA communication layer struct
    529  *
    530  * Get the admin completion mode.
    531  * If polling mode is on, ena_com_execute_admin_command will perform a
    532  * polling on the admin completion queue for the commands completion,
    533  * otherwise it will wait on wait event.
    534  *
    535  * @return state
    536  */
    537 bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
    538 
    539 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
    540  * @ena_dev: ENA communication layer struct
    541  *
    542  * This method go over the admin completion queue and wake up all the pending
    543  * threads that wait on the commands wait event.
    544  *
    545  * @note: Should be called after MSI-X interrupt.
    546  */
    547 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
    548 
    549 /* ena_com_aenq_intr_handler - AENQ interrupt handler
    550  * @ena_dev: ENA communication layer struct
    551  *
    552  * This method go over the async event notification queue and call the proper
    553  * aenq handler.
    554  */
    555 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
    556 
    557 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
    558  * @ena_dev: ENA communication layer struct
    559  *
    560  * This method aborts all the outstanding admin commands.
    561  * The caller should then call ena_com_wait_for_abort_completion to make sure
    562  * all the commands were completed.
    563  */
    564 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
    565 
    566 /* ena_com_wait_for_abort_completion - Wait for admin commands abort.
    567  * @ena_dev: ENA communication layer struct
    568  *
    569  * This method wait until all the outstanding admin commands will be completed.
    570  */
    571 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
    572 
    573 /* ena_com_validate_version - Validate the device parameters
    574  * @ena_dev: ENA communication layer struct
    575  *
    576  * This method validate the device parameters are the same as the saved
    577  * parameters in ena_dev.
    578  * This method is useful after device reset, to validate the device mac address
    579  * and the device offloads are the same as before the reset.
    580  *
    581  * @return - 0 on success negative value otherwise.
    582  */
    583 int ena_com_validate_version(struct ena_com_dev *ena_dev);
    584 
    585 /* ena_com_get_link_params - Retrieve physical link parameters.
    586  * @ena_dev: ENA communication layer struct
    587  * @resp: Link parameters
    588  *
    589  * Retrieve the physical link parameters,
    590  * like speed, auto-negotiation and full duplex support.
    591  *
    592  * @return - 0 on Success negative value otherwise.
    593  */
    594 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
    595 			    struct ena_admin_get_feat_resp *resp);
    596 
    597 /* ena_com_get_dma_width - Retrieve physical dma address width the device
    598  * supports.
    599  * @ena_dev: ENA communication layer struct
    600  *
    601  * Retrieve the maximum physical address bits the device can handle.
    602  *
    603  * @return: > 0 on Success and negative value otherwise.
    604  */
    605 int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
    606 
    607 /* ena_com_set_aenq_config - Set aenq groups configurations
    608  * @ena_dev: ENA communication layer struct
    609  * @groups flag: bit fields flags of enum ena_admin_aenq_group.
    610  *
    611  * Configure which aenq event group the driver would like to receive.
    612  *
    613  * @return: 0 on Success and negative value otherwise.
    614  */
    615 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
    616 
    617 /* ena_com_get_dev_attr_feat - Get device features
    618  * @ena_dev: ENA communication layer struct
    619  * @get_feat_ctx: returned context that contain the get features.
    620  *
    621  * @return: 0 on Success and negative value otherwise.
    622  */
    623 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
    624 			      struct ena_com_dev_get_features_ctx *get_feat_ctx);
    625 
    626 /* ena_com_get_dev_basic_stats - Get device basic statistics
    627  * @ena_dev: ENA communication layer struct
    628  * @stats: stats return value
    629  *
    630  * @return: 0 on Success and negative value otherwise.
    631  */
    632 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
    633 				struct ena_admin_basic_stats *stats);
    634 
    635 /* ena_com_set_dev_mtu - Configure the device mtu.
    636  * @ena_dev: ENA communication layer struct
    637  * @mtu: mtu value
    638  *
    639  * @return: 0 on Success and negative value otherwise.
    640  */
    641 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
    642 
    643 /* ena_com_get_offload_settings - Retrieve the device offloads capabilities
    644  * @ena_dev: ENA communication layer struct
    645  * @offlad: offload return value
    646  *
    647  * @return: 0 on Success and negative value otherwise.
    648  */
    649 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
    650 				 struct ena_admin_feature_offload_desc *offload);
    651 
    652 /* ena_com_rss_init - Init RSS
    653  * @ena_dev: ENA communication layer struct
    654  * @log_size: indirection log size
    655  *
    656  * Allocate RSS/RFS resources.
    657  * The caller then can configure rss using ena_com_set_hash_function,
    658  * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
    659  *
    660  * @return: 0 on Success and negative value otherwise.
    661  */
    662 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
    663 
    664 /* ena_com_rss_destroy - Destroy rss
    665  * @ena_dev: ENA communication layer struct
    666  *
    667  * Free all the RSS/RFS resources.
    668  */
    669 void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
    670 
    671 /* ena_com_fill_hash_function - Fill RSS hash function
    672  * @ena_dev: ENA communication layer struct
    673  * @func: The hash function (Toeplitz or crc)
    674  * @key: Hash key (for toeplitz hash)
    675  * @key_len: key length (max length 10 DW)
    676  * @init_val: initial value for the hash function
    677  *
    678  * Fill the ena_dev resources with the desire hash function, hash key, key_len
    679  * and key initial value (if needed by the hash function).
    680  * To flush the key into the device the caller should call
    681  * ena_com_set_hash_function.
    682  *
    683  * @return: 0 on Success and negative value otherwise.
    684  */
    685 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
    686 			       enum ena_admin_hash_functions func,
    687 			       const u8 *key, u16 key_len, u32 init_val);
    688 
    689 /* ena_com_set_hash_function - Flush the hash function and it dependencies to
    690  * the device.
    691  * @ena_dev: ENA communication layer struct
    692  *
    693  * Flush the hash function and it dependencies (key, key length and
    694  * initial value) if needed.
    695  *
    696  * @note: Prior to this method the caller should call ena_com_fill_hash_function
    697  *
    698  * @return: 0 on Success and negative value otherwise.
    699  */
    700 int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
    701 
    702 /* ena_com_get_hash_function - Retrieve the hash function and the hash key
    703  * from the device.
    704  * @ena_dev: ENA communication layer struct
    705  * @func: hash function
    706  * @key: hash key
    707  *
    708  * Retrieve the hash function and the hash key from the device.
    709  *
    710  * @note: If the caller called ena_com_fill_hash_function but didn't flash
    711  * it to the device, the new configuration will be lost.
    712  *
    713  * @return: 0 on Success and negative value otherwise.
    714  */
    715 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
    716 			      enum ena_admin_hash_functions *func,
    717 			      u8 *key);
    718 
    719 /* ena_com_fill_hash_ctrl - Fill RSS hash control
    720  * @ena_dev: ENA communication layer struct.
    721  * @proto: The protocol to configure.
    722  * @hash_fields: bit mask of ena_admin_flow_hash_fields
    723  *
    724  * Fill the ena_dev resources with the desire hash control (the ethernet
    725  * fields that take part of the hash) for a specific protocol.
    726  * To flush the hash control to the device, the caller should call
    727  * ena_com_set_hash_ctrl.
    728  *
    729  * @return: 0 on Success and negative value otherwise.
    730  */
    731 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
    732 			   enum ena_admin_flow_hash_proto proto,
    733 			   u16 hash_fields);
    734 
    735 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
    736  * @ena_dev: ENA communication layer struct
    737  *
    738  * Flush the hash control (the ethernet fields that take part of the hash)
    739  *
    740  * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
    741  *
    742  * @return: 0 on Success and negative value otherwise.
    743  */
    744 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
    745 
    746 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
    747  * @ena_dev: ENA communication layer struct
    748  * @proto: The protocol to retrieve.
    749  * @fields: bit mask of ena_admin_flow_hash_fields.
    750  *
    751  * Retrieve the hash control from the device.
    752  *
    753  * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
    754  * it to the device, the new configuration will be lost.
    755  *
    756  * @return: 0 on Success and negative value otherwise.
    757  */
    758 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
    759 			  enum ena_admin_flow_hash_proto proto,
    760 			  u16 *fields);
    761 
    762 /* ena_com_set_default_hash_ctrl - Set the hash control to a default
    763  * configuration.
    764  * @ena_dev: ENA communication layer struct
    765  *
    766  * Fill the ena_dev resources with the default hash control configuration.
    767  * To flush the hash control to the device, the caller should call
    768  * ena_com_set_hash_ctrl.
    769  *
    770  * @return: 0 on Success and negative value otherwise.
    771  */
    772 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
    773 
    774 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
    775  * indirection table
    776  * @ena_dev: ENA communication layer struct.
    777  * @entry_idx - indirection table entry.
    778  * @entry_value - redirection value
    779  *
    780  * Fill a single entry of the RSS indirection table in the ena_dev resources.
    781  * To flush the indirection table to the device, the called should call
    782  * ena_com_indirect_table_set.
    783  *
    784  * @return: 0 on Success and negative value otherwise.
    785  */
    786 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
    787 				      u16 entry_idx, u16 entry_value);
    788 
    789 /* ena_com_indirect_table_set - Flush the indirection table to the device.
    790  * @ena_dev: ENA communication layer struct
    791  *
    792  * Flush the indirection hash control to the device.
    793  * Prior to this method the caller should call ena_com_indirect_table_fill_entry
    794  *
    795  * @return: 0 on Success and negative value otherwise.
    796  */
    797 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
    798 
    799 /* ena_com_indirect_table_get - Retrieve the indirection table from the device.
    800  * @ena_dev: ENA communication layer struct
    801  * @ind_tbl: indirection table
    802  *
    803  * Retrieve the RSS indirection table from the device.
    804  *
    805  * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
    806  * it to the device, the new configuration will be lost.
    807  *
    808  * @return: 0 on Success and negative value otherwise.
    809  */
    810 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
    811 
    812 /* ena_com_allocate_host_info - Allocate host info resources.
    813  * @ena_dev: ENA communication layer struct
    814  *
    815  * @return: 0 on Success and negative value otherwise.
    816  */
    817 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
    818 
    819 /* ena_com_allocate_debug_area - Allocate debug area.
    820  * @ena_dev: ENA communication layer struct
    821  * @debug_area_size - debug area size.
    822  *
    823  * @return: 0 on Success and negative value otherwise.
    824  */
    825 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
    826 				u32 debug_area_size);
    827 
    828 /* ena_com_delete_debug_area - Free the debug area resources.
    829  * @ena_dev: ENA communication layer struct
    830  *
    831  * Free the allocate debug area.
    832  */
    833 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
    834 
    835 /* ena_com_delete_host_info - Free the host info resources.
    836  * @ena_dev: ENA communication layer struct
    837  *
    838  * Free the allocate host info.
    839  */
    840 void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
    841 
    842 /* ena_com_set_host_attributes - Update the device with the host
    843  * attributes (debug area and host info) base address.
    844  * @ena_dev: ENA communication layer struct
    845  *
    846  * @return: 0 on Success and negative value otherwise.
    847  */
    848 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
    849 
    850 /* ena_com_create_io_cq - Create io completion queue.
    851  * @ena_dev: ENA communication layer struct
    852  * @io_cq - io completion queue handler
    853 
    854  * Create IO completion queue.
    855  *
    856  * @return - 0 on success, negative value on failure.
    857  */
    858 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
    859 			 struct ena_com_io_cq *io_cq);
    860 
    861 /* ena_com_destroy_io_cq - Destroy io completion queue.
    862  * @ena_dev: ENA communication layer struct
    863  * @io_cq - io completion queue handler
    864 
    865  * Destroy IO completion queue.
    866  *
    867  * @return - 0 on success, negative value on failure.
    868  */
    869 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
    870 			  struct ena_com_io_cq *io_cq);
    871 
    872 /* ena_com_execute_admin_command - Execute admin command
    873  * @admin_queue: admin queue.
    874  * @cmd: the admin command to execute.
    875  * @cmd_size: the command size.
    876  * @cmd_completion: command completion return value.
    877  * @cmd_comp_size: command completion size.
    878 
    879  * Submit an admin command and then wait until the device will return a
    880  * completion.
    881  * The completion will be copyed into cmd_comp.
    882  *
    883  * @return - 0 on success, negative value on failure.
    884  */
    885 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
    886 				  struct ena_admin_aq_entry *cmd,
    887 				  size_t cmd_size,
    888 				  struct ena_admin_acq_entry *cmd_comp,
    889 				  size_t cmd_comp_size);
    890 
    891 /* ena_com_init_interrupt_moderation - Init interrupt moderation
    892  * @ena_dev: ENA communication layer struct
    893  *
    894  * @return - 0 on success, negative value on failure.
    895  */
    896 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
    897 
    898 /* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
    899  * @ena_dev: ENA communication layer struct
    900  */
    901 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
    902 
    903 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation
    904  * capability is supported by the device.
    905  *
    906  * @return - supported or not.
    907  */
    908 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
    909 
    910 /* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
    911  * moderation table back to the default parameters.
    912  * @ena_dev: ENA communication layer struct
    913  */
    914 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
    915 
    916 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the
    917  * non-adaptive interval in Tx direction.
    918  * @ena_dev: ENA communication layer struct
    919  * @tx_coalesce_usecs: Interval in usec.
    920  *
    921  * @return - 0 on success, negative value on failure.
    922  */
    923 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
    924 						      u32 tx_coalesce_usecs);
    925 
    926 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the
    927  * non-adaptive interval in Rx direction.
    928  * @ena_dev: ENA communication layer struct
    929  * @rx_coalesce_usecs: Interval in usec.
    930  *
    931  * @return - 0 on success, negative value on failure.
    932  */
    933 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
    934 						      u32 rx_coalesce_usecs);
    935 
    936 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
    937  * non-adaptive interval in Tx direction.
    938  * @ena_dev: ENA communication layer struct
    939  *
    940  * @return - interval in usec
    941  */
    942 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
    943 
    944 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
    945  * non-adaptive interval in Rx direction.
    946  * @ena_dev: ENA communication layer struct
    947  *
    948  * @return - interval in usec
    949  */
    950 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
    951 
    952 /* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
    953  * moderation table.
    954  * @ena_dev: ENA communication layer struct
    955  * @level: Interrupt moderation table level
    956  * @entry: Entry value
    957  *
    958  * Update a single entry in the interrupt moderation table.
    959  */
    960 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
    961 					enum ena_intr_moder_level level,
    962 					struct ena_intr_moder_entry *entry);
    963 
    964 /* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
    965  * @ena_dev: ENA communication layer struct
    966  * @level: Interrupt moderation table level
    967  * @entry: Entry to fill.
    968  *
    969  * Initialize the entry according to the adaptive interrupt moderation table.
    970  */
    971 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
    972 				       enum ena_intr_moder_level level,
    973 				       struct ena_intr_moder_entry *entry);
    974 
    975 
    976 /* ena_com_config_dev_mode - Configure the placement policy of the device.
    977  * @ena_dev: ENA communication layer struct
    978  * @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
    979  *
    980  */
    981 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
    982 			    struct ena_admin_feature_llq_desc *llq);
    983 
    984 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
    985 {
    986 	return ena_dev->adaptive_coalescing;
    987 }
    988 
    989 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
    990 {
    991 	ena_dev->adaptive_coalescing = true;
    992 }
    993 
    994 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
    995 {
    996 	ena_dev->adaptive_coalescing = false;
    997 }
    998 
    999 /* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
   1000  * @ena_dev: ENA communication layer struct
   1001  * @pkts: Number of packets since the last update
   1002  * @bytes: Number of bytes received since the last update.
   1003  * @smoothed_interval: Returned interval
   1004  * @moder_tbl_idx: Current table level as input update new level as return
   1005  * value.
   1006  */
   1007 static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
   1008 						     unsigned int pkts,
   1009 						     unsigned int bytes,
   1010 						     unsigned int *smoothed_interval,
   1011 						     unsigned int *moder_tbl_idx)
   1012 {
   1013 	enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
   1014 	struct ena_intr_moder_entry *curr_moder_entry;
   1015 	struct ena_intr_moder_entry *pred_moder_entry;
   1016 	struct ena_intr_moder_entry *new_moder_entry;
   1017 	struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
   1018 	unsigned int interval;
   1019 
   1020 	/* We apply adaptive moderation on Rx path only.
   1021 	 * Tx uses static interrupt moderation.
   1022 	 */
   1023 	if (!pkts || !bytes)
   1024 		/* Tx interrupt, or spurious interrupt,
   1025 		 * in both cases we just use same delay values
   1026 		 */
   1027 		return;
   1028 
   1029 	curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
   1030 	if (unlikely(curr_moder_idx >=  ENA_INTR_MAX_NUM_OF_LEVELS)) {
   1031 		ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
   1032 		return;
   1033 	}
   1034 
   1035 	curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
   1036 	new_moder_idx = curr_moder_idx;
   1037 
   1038 	if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
   1039 		if ((pkts > curr_moder_entry->pkts_per_interval) ||
   1040 		    (bytes > curr_moder_entry->bytes_per_interval))
   1041 			new_moder_idx =
   1042 				(enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
   1043 	} else {
   1044 		pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
   1045 
   1046 		if ((pkts <= pred_moder_entry->pkts_per_interval) ||
   1047 		    (bytes <= pred_moder_entry->bytes_per_interval))
   1048 			new_moder_idx =
   1049 				(enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
   1050 		else if ((pkts > curr_moder_entry->pkts_per_interval) ||
   1051 			 (bytes > curr_moder_entry->bytes_per_interval)) {
   1052 			if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
   1053 				new_moder_idx =
   1054 					(enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
   1055 		}
   1056 	}
   1057 	new_moder_entry = &intr_moder_tbl[new_moder_idx];
   1058 
   1059 	interval = new_moder_entry->intr_moder_interval;
   1060 	*smoothed_interval = (
   1061 		(interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
   1062 		ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
   1063 		10;
   1064 
   1065 	*moder_tbl_idx = new_moder_idx;
   1066 }
   1067 
   1068 /* ena_com_update_intr_reg - Prepare interrupt register
   1069  * @intr_reg: interrupt register to update.
   1070  * @rx_delay_interval: Rx interval in usecs
   1071  * @tx_delay_interval: Tx interval in usecs
   1072  * @unmask: unask enable/disable
   1073  *
   1074  * Prepare interrupt update register with the supplied parameters.
   1075  */
   1076 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
   1077 					   u32 rx_delay_interval,
   1078 					   u32 tx_delay_interval,
   1079 					   bool unmask)
   1080 {
   1081 	intr_reg->intr_control = 0;
   1082 	intr_reg->intr_control |= rx_delay_interval &
   1083 		ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
   1084 
   1085 	intr_reg->intr_control |=
   1086 		(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
   1087 		& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
   1088 
   1089 	if (unmask)
   1090 		intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
   1091 }
   1092 
   1093 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
   1094 {
   1095 	u16 size, buffers_num;
   1096 	u8 *buf;
   1097 
   1098 	size = bounce_buf_ctrl->buffer_size;
   1099 	buffers_num = bounce_buf_ctrl->buffers_num;
   1100 
   1101 	buf = bounce_buf_ctrl->base_buffer +
   1102 		(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
   1103 
   1104 	prefetch(bounce_buf_ctrl->base_buffer +
   1105 		(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
   1106 
   1107 	return buf;
   1108 }
   1109 
   1110 #ifdef ENA_EXTENDED_STATS
   1111 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
   1112 				   u32 len);
   1113 
   1114 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
   1115 					  u32 funct_queue);
   1116 #endif
   1117 #if defined(__cplusplus)
   1118 }
   1119 #endif /* __cplusplus */
   1120 #endif /* !(ENA_COM) */
   1121