Home
last modified time | relevance | path

Searched refs:sq (Results 1 – 25 of 241) sorted by relevance

12345678910

/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/
Den_tx.c46 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument
52 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err()
54 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); in mlx5e_dma_unmap_wqe_err()
119 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, in mlx5e_txwqe_build_eseg_csum() argument
123 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) in mlx5e_txwqe_build_eseg_csum()
131 sq->stats->csum_partial_inner++; in mlx5e_txwqe_build_eseg_csum()
134 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum()
139 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum()
142 sq->stats->csum_none++; in mlx5e_txwqe_build_eseg_csum()
149 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop) in mlx5e_tx_get_gso_ihs() argument
[all …]
Den_txrx.c49 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) in mlx5e_handle_tx_dim() argument
51 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_handle_tx_dim()
54 if (unlikely(!test_bit(MLX5E_SQ_STATE_DIM, &sq->state))) in mlx5e_handle_tx_dim()
57 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); in mlx5e_handle_tx_dim()
58 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim()
73 void mlx5e_trigger_irq(struct mlx5e_icosq *sq) in mlx5e_trigger_irq() argument
75 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq()
77 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq()
79 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { in mlx5e_trigger_irq()
84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq()
[all …]
/linux-6.6.21/drivers/nvme/target/
Dfabrics-cmd-auth.c17 struct nvmet_sq *sq = container_of(to_delayed_work(work), in nvmet_auth_expired_work() local
21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work()
22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; in nvmet_auth_expired_work()
23 sq->dhchap_tid = -1; in nvmet_auth_expired_work()
26 void nvmet_auth_sq_init(struct nvmet_sq *sq) in nvmet_auth_sq_init() argument
29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work); in nvmet_auth_sq_init()
30 sq->authenticated = false; in nvmet_auth_sq_init()
31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; in nvmet_auth_sq_init()
36 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_auth_negotiate()
41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
[all …]
Dauth.c222 void nvmet_auth_sq_free(struct nvmet_sq *sq) in nvmet_auth_sq_free() argument
224 cancel_delayed_work(&sq->auth_expired_work); in nvmet_auth_sq_free()
225 kfree(sq->dhchap_c1); in nvmet_auth_sq_free()
226 sq->dhchap_c1 = NULL; in nvmet_auth_sq_free()
227 kfree(sq->dhchap_c2); in nvmet_auth_sq_free()
228 sq->dhchap_c2 = NULL; in nvmet_auth_sq_free()
229 kfree(sq->dhchap_skey); in nvmet_auth_sq_free()
230 sq->dhchap_skey = NULL; in nvmet_auth_sq_free()
257 if (req->sq->ctrl->host_key && in nvmet_check_auth_status()
258 !req->sq->authenticated) in nvmet_check_auth_status()
[all …]
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c61 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, in mlx5e_xmit_xdp_buff() argument
96 dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len, in mlx5e_xmit_xdp_buff()
98 if (dma_mapping_error(sq->pdev, dma_addr)) { in mlx5e_xmit_xdp_buff()
105 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, in mlx5e_xmit_xdp_buff()
106 mlx5e_xmit_xdp_frame, sq, xdptxd, 0))) in mlx5e_xmit_xdp_buff()
110 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, in mlx5e_xmit_xdp_buff()
112 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, in mlx5e_xmit_xdp_buff()
114 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, in mlx5e_xmit_xdp_buff()
126 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL); in mlx5e_xmit_xdp_buff()
140 dma_sync_single_for_device(sq->pdev, addr, len, in mlx5e_xmit_xdp_buff()
[all …]
Dreporter_tx.c22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) in mlx5e_wait_for_sq_flush() argument
24 struct mlx5_core_dev *dev = sq->mdev; in mlx5e_wait_for_sq_flush()
30 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush()
36 netdev_err(sq->netdev, in mlx5e_wait_for_sq_flush()
38 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush()
43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) in mlx5e_reset_txqsq_cc_pc() argument
45 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc()
47 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc()
48 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc()
49 sq->dma_fifo_cc = 0; in mlx5e_reset_txqsq_cc_pc()
[all …]
Dxdp.h106 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
108 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
109 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
116 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
119 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
122 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
123 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
153 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument
155 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell()
156 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell()
[all …]
Dtxrx.h61 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
95 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
119 #define MLX5E_TX_FETCH_WQE(sq, pi) \ argument
120 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
168 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) in mlx5e_txqsq_get_next_pi() argument
170 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txqsq_get_next_pi()
173 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_txqsq_get_next_pi()
178 wi = &sq->db.wqe_info[pi]; in mlx5e_txqsq_get_next_pi()
186 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi()
188 sq->stats->nop += contig_wqebbs; in mlx5e_txqsq_get_next_pi()
[all …]
Dqos.c79 struct mlx5e_txqsq *sq; in mlx5e_open_qos_sq() local
115 sq = kzalloc(sizeof(*sq), GFP_KERNEL); in mlx5e_open_qos_sq()
117 if (!sq) in mlx5e_open_qos_sq()
126 err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq); in mlx5e_open_qos_sq()
130 &param_sq, sq, 0, hw_id, in mlx5e_open_qos_sq()
135 rcu_assign_pointer(qos_sqs[qid], sq); in mlx5e_open_qos_sq()
140 mlx5e_close_cq(&sq->cq); in mlx5e_open_qos_sq()
142 kfree(sq); in mlx5e_open_qos_sq()
156 struct mlx5e_txqsq *sq; in mlx5e_activate_qos_sq() local
159 sq = mlx5e_get_qos_sq(priv, node_qid); in mlx5e_activate_qos_sq()
[all …]
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Dtx.c46 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, in mlx5e_xsk_tx_post_err() argument
49 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err()
50 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; in mlx5e_xsk_tx_post_err()
56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err()
57 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi); in mlx5e_xsk_tx_post_err()
58 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err()
61 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) in mlx5e_xsk_tx() argument
63 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
71 int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check, in mlx5e_xsk_tx()
74 sq); in mlx5e_xsk_tx()
[all …]
/linux-6.6.21/sound/oss/dmasound/
Ddmasound_core.c411 static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) in sq_allocate_buffers() argument
415 if (sq->buffers) in sq_allocate_buffers()
417 sq->numBufs = num; in sq_allocate_buffers()
418 sq->bufSize = size; in sq_allocate_buffers()
419 sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL); in sq_allocate_buffers()
420 if (!sq->buffers) in sq_allocate_buffers()
423 sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); in sq_allocate_buffers()
424 if (!sq->buffers[i]) { in sq_allocate_buffers()
426 dmasound.mach.dma_free(sq->buffers[i], size); in sq_allocate_buffers()
427 kfree(sq->buffers); in sq_allocate_buffers()
[all …]
/linux-6.6.21/drivers/net/ethernet/marvell/octeontx2/nic/
Dqos_sq.c38 struct otx2_snd_queue *sq; in otx2_qos_sq_aura_pool_init() local
76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init()
77 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init()
78 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); in otx2_qos_sq_aura_pool_init()
79 if (!sq->sqb_ptrs) { in otx2_qos_sq_aura_pool_init()
89 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; in otx2_qos_sq_aura_pool_init()
96 if (!sq->sqb_ptrs[ptr]) in otx2_qos_sq_aura_pool_init()
98 iova = sq->sqb_ptrs[ptr]; in otx2_qos_sq_aura_pool_init()
106 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init()
107 kfree(sq->sqb_ptrs); in otx2_qos_sq_aura_pool_init()
[all …]
Dotx2_txrx.c116 struct otx2_snd_queue *sq, in otx2_xdp_snd_pkt_handler() argument
124 sg = &sq->sg[snd_comp->sqe_id]; in otx2_xdp_snd_pkt_handler()
135 struct otx2_snd_queue *sq, in otx2_snd_pkt_handler() argument
151 sg = &sq->sg[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
157 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
451 struct otx2_snd_queue *sq; in otx2_tx_napi_handler() local
463 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
476 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); in otx2_tx_napi_handler()
478 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], in otx2_tx_napi_handler()
485 sq->cons_head++; in otx2_tx_napi_handler()
[all …]
/linux-6.6.21/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c20 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
505 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument
509 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue()
514 sq->desc = sq->dmem.base; in nicvf_init_snd_queue()
515 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
516 if (!sq->skbuff) in nicvf_init_snd_queue()
519 sq->head = 0; in nicvf_init_snd_queue()
520 sq->tail = 0; in nicvf_init_snd_queue()
521 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue()
528 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
[all …]
/linux-6.6.21/drivers/net/ethernet/intel/ice/
Dice_controlq.c8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
[all …]
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dktls_rx.c129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, in icosq_fill_wi() argument
132 sq->db.wqe_info[pi] = *wi; in icosq_fill_wi()
136 post_static_params(struct mlx5e_icosq *sq, in post_static_params() argument
144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) in post_static_params()
147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); in post_static_params()
148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params()
149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params()
159 icosq_fill_wi(sq, pi, &wi); in post_static_params()
160 sq->pc += num_wqebbs; in post_static_params()
166 post_progress_params(struct mlx5e_icosq *sq, in post_progress_params() argument
[all …]
Dktls_tx.c525 static void tx_fill_wi(struct mlx5e_txqsq *sq, in tx_fill_wi() argument
529 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; in tx_fill_wi()
549 post_static_params(struct mlx5e_txqsq *sq, in post_static_params() argument
557 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_static_params()
558 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params()
559 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params()
563 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); in post_static_params()
564 sq->pc += num_wqebbs; in post_static_params()
568 post_progress_params(struct mlx5e_txqsq *sq, in post_progress_params() argument
576 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_progress_params()
[all …]
/linux-6.6.21/drivers/infiniband/hw/erdma/
Derdma_cmdq.c26 u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi); in kick_cmdq_db()
28 *cmdq->sq.db_record = db_data; in kick_cmdq_db()
91 struct erdma_cmdq_sq *sq = &cmdq->sq; in erdma_cmdq_sq_init() local
94 sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE); in erdma_cmdq_sq_init()
95 sq->depth = cmdq->max_outstandings * sq->wqebb_cnt; in erdma_cmdq_sq_init()
97 buf_size = sq->depth << SQEBB_SHIFT; in erdma_cmdq_sq_init()
99 sq->qbuf = in erdma_cmdq_sq_init()
101 &sq->qbuf_dma_addr, GFP_KERNEL); in erdma_cmdq_sq_init()
102 if (!sq->qbuf) in erdma_cmdq_sq_init()
105 sq->db_record = (u64 *)(sq->qbuf + buf_size); in erdma_cmdq_sq_init()
[all …]
/linux-6.6.21/drivers/soc/qcom/
Dqmi_interface.c19 struct sockaddr_qrtr *sq);
168 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local
178 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup()
179 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup()
180 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup()
182 msg.msg_name = &sq; in qmi_send_new_lookup()
183 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup()
231 struct sockaddr_qrtr sq; in qmi_send_new_server() local
240 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server()
241 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server()
[all …]
/linux-6.6.21/tools/testing/selftests/net/
Dio_uring_zerocopy_tx.c108 struct io_uring_sq sq; member
169 struct io_uring_sq *sq, struct io_uring_cq *cq) in io_uring_mmap() argument
175 sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); in io_uring_mmap()
176 ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, in io_uring_mmap()
180 sq->khead = ptr + p->sq_off.head; in io_uring_mmap()
181 sq->ktail = ptr + p->sq_off.tail; in io_uring_mmap()
182 sq->kring_mask = ptr + p->sq_off.ring_mask; in io_uring_mmap()
183 sq->kring_entries = ptr + p->sq_off.ring_entries; in io_uring_mmap()
184 sq->kflags = ptr + p->sq_off.flags; in io_uring_mmap()
185 sq->kdropped = ptr + p->sq_off.dropped; in io_uring_mmap()
[all …]
/linux-6.6.21/net/qrtr/
Dns.c53 struct sockaddr_qrtr sq; member
195 static int announce_servers(struct sockaddr_qrtr *sq) in announce_servers() argument
208 ret = service_announce_new(sq, srv); in announce_servers()
288 lookup_notify(&lookup->sq, srv, false); in server_del()
320 static int ctrl_cmd_hello(struct sockaddr_qrtr *sq) in ctrl_cmd_hello() argument
324 ret = say_hello(sq); in ctrl_cmd_hello()
328 return announce_servers(sq); in ctrl_cmd_hello()
336 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local
364 sq.sq_family = AF_QIPCRTR; in ctrl_cmd_bye()
365 sq.sq_node = srv->node; in ctrl_cmd_bye()
[all …]
/linux-6.6.21/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.c59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument
61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument
93 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument
100 wq = sq->wq; in hinic_sq_prepare_ctxt()
219 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument
221 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr()
224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
225 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr()
226 if (!sq->saved_skb) in alloc_sq_skb_arr()
236 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument
[all …]
Dhinic_tx.c47 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument
503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
512 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
516 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
533 hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame()
534 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame()
539 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame()
564 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
593 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
600 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
[all …]
Dhinic_hw_qp.h57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument
58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
122 struct hinic_sq sq; member
133 struct hinic_sq *sq, u16 global_qid);
138 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
142 void hinic_clean_sq(struct hinic_sq *sq);
149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
181 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
184 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
[all …]
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/lib/
Daso.c159 void *sqc_data, struct mlx5_aso *sq) in mlx5_aso_alloc_sq() argument
162 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5_aso_alloc_sq()
166 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; in mlx5_aso_alloc_sq()
170 err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl); in mlx5_aso_alloc_sq()
179 void *sqc_data, struct mlx5_aso *sq) in create_aso_sq() argument
186 sizeof(u64) * sq->wq_ctrl.buf.npages; in create_aso_sq()
195 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); in create_aso_sq()
207 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - in create_aso_sq()
209 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); in create_aso_sq()
211 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, in create_aso_sq()
[all …]

12345678910