Lines Matching defs:io_sq
67 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
72 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
74 offset = tail_masked * io_sq->desc_entry_size;
76 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
79 static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
82 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
87 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
96 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
100 io_sq->tail++;
103 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
104 io_sq->phase ^= 1;
107 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
111 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
112 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
116 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
120 llq_info->descs_num_before_header * io_sq->desc_entry_size;
137 static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
139 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
150 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
157 static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
159 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
160 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
162 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
167 ena_com_write_bounce_buffer_to_dev(io_sq,
170 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
171 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
179 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
181 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
182 return get_sq_desc_llq(io_sq);
184 return get_sq_desc_regular_queue(io_sq);
187 static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
189 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
190 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
193 ena_com_write_bounce_buffer_to_dev(io_sq,
197 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
198 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
206 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
210 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
213 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
214 ena_com_sq_update_llq_tail(io_sq);
218 io_sq->tail++;
221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
222 io_sq->phase ^= 1;
271 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
277 rc = memcmp(&io_sq->cached_tx_meta,
288 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
294 meta_desc = get_sq_desc(io_sq);
313 meta_desc->len_ctrl |= (io_sq->phase <<
331 memcpy(&io_sq->cached_tx_meta, ena_meta,
334 ena_com_sq_update_tail(io_sq);
370 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
379 u16 start_tail = io_sq->tail;
384 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
388 if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
393 if (unlikely(header_len > io_sq->tx_max_header_size)) {
395 header_len, io_sq->tx_max_header_size);
399 if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && !buffer_to_push))
402 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
406 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
409 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
413 ena_com_close_bounce_buffer(io_sq);
414 *nb_hw_desc = io_sq->tail - start_tail;
418 desc = get_sq_desc(io_sq);
430 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
472 ena_com_sq_update_tail(io_sq);
474 desc = get_sq_desc(io_sq);
480 desc->len_ctrl |= (io_sq->phase <<
489 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
500 ena_com_sq_update_tail(io_sq);
502 ena_com_close_bounce_buffer(io_sq);
504 *nb_hw_desc = io_sq->tail - start_tail;
509 struct ena_com_io_sq *io_sq,
545 io_sq->next_to_comp += nb_hw_desc;
548 io_sq->qid, io_sq->next_to_comp);
557 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
563 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
566 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
569 desc = get_sq_desc(io_sq);
579 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
586 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
588 ena_com_sq_update_tail(io_sq);