Lines Matching refs:io_sq
36 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) in get_sq_desc_regular_queue() argument
41 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
43 offset = tail_masked * io_sq->desc_entry_size; in get_sq_desc_regular_queue()
45 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); in get_sq_desc_regular_queue()
48 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, in ena_com_write_bounce_buffer_to_dev() argument
51 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_write_bounce_buffer_to_dev()
56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
59 if (is_llq_max_tx_burst_exists(io_sq)) { in ena_com_write_bounce_buffer_to_dev()
60 if (unlikely(!io_sq->entries_in_tx_burst_left)) { in ena_com_write_bounce_buffer_to_dev()
61 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_bounce_buffer_to_dev()
66 io_sq->entries_in_tx_burst_left--; in ena_com_write_bounce_buffer_to_dev()
67 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_bounce_buffer_to_dev()
69 io_sq->qid, io_sq->entries_in_tx_burst_left); in ena_com_write_bounce_buffer_to_dev()
78 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, in ena_com_write_bounce_buffer_to_dev()
81 io_sq->tail++; in ena_com_write_bounce_buffer_to_dev()
84 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
85 io_sq->phase ^= 1; in ena_com_write_bounce_buffer_to_dev()
90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, in ena_com_write_header_to_bounce() argument
94 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_write_header_to_bounce()
95 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_write_header_to_bounce()
99 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) in ena_com_write_header_to_bounce()
103 llq_info->descs_num_before_header * io_sq->desc_entry_size; in ena_com_write_header_to_bounce()
107 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_header_to_bounce()
113 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_header_to_bounce()
123 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) in get_sq_desc_llq() argument
125 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in get_sq_desc_llq()
132 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in get_sq_desc_llq()
137 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; in get_sq_desc_llq()
144 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) in ena_com_close_bounce_buffer() argument
146 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_close_bounce_buffer()
147 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_close_bounce_buffer()
150 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) in ena_com_close_bounce_buffer()
155 rc = ena_com_write_bounce_buffer_to_dev(io_sq, in ena_com_close_bounce_buffer()
158 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_close_bounce_buffer()
164 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_close_bounce_buffer()
165 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_close_bounce_buffer()
174 static void *get_sq_desc(struct ena_com_io_sq *io_sq) in get_sq_desc() argument
176 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in get_sq_desc()
177 return get_sq_desc_llq(io_sq); in get_sq_desc()
179 return get_sq_desc_regular_queue(io_sq); in get_sq_desc()
182 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) in ena_com_sq_update_llq_tail() argument
184 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_sq_update_llq_tail()
185 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_sq_update_llq_tail()
189 rc = ena_com_write_bounce_buffer_to_dev(io_sq, in ena_com_sq_update_llq_tail()
192 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_sq_update_llq_tail()
198 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_sq_update_llq_tail()
199 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_sq_update_llq_tail()
207 llq_info->desc_list_entry_size / io_sq->desc_entry_size; in ena_com_sq_update_llq_tail()
213 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) in ena_com_sq_update_tail() argument
215 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_com_sq_update_tail()
216 return ena_com_sq_update_llq_tail(io_sq); in ena_com_sq_update_tail()
218 io_sq->tail++; in ena_com_sq_update_tail()
221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
222 io_sq->phase ^= 1; in ena_com_sq_update_tail()
275 static int ena_com_create_meta(struct ena_com_io_sq *io_sq, in ena_com_create_meta() argument
280 meta_desc = get_sq_desc(io_sq); in ena_com_create_meta()
301 meta_desc->len_ctrl |= ((u32)io_sq->phase << in ena_com_create_meta()
318 return ena_com_sq_update_tail(io_sq); in ena_com_create_meta()
321 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, in ena_com_create_and_store_tx_meta_desc() argument
330 if (io_sq->disable_meta_caching) { in ena_com_create_and_store_tx_meta_desc()
335 return ena_com_create_meta(io_sq, ena_meta); in ena_com_create_and_store_tx_meta_desc()
338 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { in ena_com_create_and_store_tx_meta_desc()
341 memcpy(&io_sq->cached_tx_meta, ena_meta, in ena_com_create_and_store_tx_meta_desc()
343 return ena_com_create_meta(io_sq, ena_meta); in ena_com_create_and_store_tx_meta_desc()
384 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, in ena_com_prepare_tx() argument
393 u16 start_tail = io_sq->tail; in ena_com_prepare_tx()
398 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); in ena_com_prepare_tx()
401 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { in ena_com_prepare_tx()
402 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
407 if (unlikely(header_len > io_sq->tx_max_header_size)) { in ena_com_prepare_tx()
408 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
410 header_len, io_sq->tx_max_header_size); in ena_com_prepare_tx()
414 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && in ena_com_prepare_tx()
416 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
421 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); in ena_com_prepare_tx()
425 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); in ena_com_prepare_tx()
427 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
434 rc = ena_com_close_bounce_buffer(io_sq); in ena_com_prepare_tx()
436 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
438 *nb_hw_desc = io_sq->tail - start_tail; in ena_com_prepare_tx()
442 desc = get_sq_desc(io_sq); in ena_com_prepare_tx()
454 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & in ena_com_prepare_tx()
496 rc = ena_com_sq_update_tail(io_sq); in ena_com_prepare_tx()
498 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
503 desc = get_sq_desc(io_sq); in ena_com_prepare_tx()
509 desc->len_ctrl |= ((u32)io_sq->phase << in ena_com_prepare_tx()
518 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); in ena_com_prepare_tx()
529 rc = ena_com_sq_update_tail(io_sq); in ena_com_prepare_tx()
531 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
536 rc = ena_com_close_bounce_buffer(io_sq); in ena_com_prepare_tx()
538 *nb_hw_desc = io_sq->tail - start_tail; in ena_com_prepare_tx()
543 struct ena_com_io_sq *io_sq, in ena_com_rx_pkt() argument
589 io_sq->next_to_comp += nb_hw_desc; in ena_com_rx_pkt()
593 io_sq->qid, io_sq->next_to_comp); in ena_com_rx_pkt()
603 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, in ena_com_add_single_rx_desc() argument
609 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); in ena_com_add_single_rx_desc()
611 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) in ena_com_add_single_rx_desc()
614 desc = get_sq_desc(io_sq); in ena_com_add_single_rx_desc()
625 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); in ena_com_add_single_rx_desc()
629 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_add_single_rx_desc()
631 __func__, io_sq->qid, req_id); in ena_com_add_single_rx_desc()
635 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); in ena_com_add_single_rx_desc()
637 return ena_com_sq_update_tail(io_sq); in ena_com_add_single_rx_desc()