Home
last modified time | relevance | path

Searched refs:sqe (Results 1 – 25 of 44) sorted by relevance

12

/linux-5.19.10/tools/io_uring/
Dliburing.h98 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) in io_uring_sqe_set_data() argument
100 sqe->user_data = (unsigned long) data; in io_uring_sqe_set_data()
108 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, in io_uring_prep_rw() argument
112 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_rw()
113 sqe->opcode = op; in io_uring_prep_rw()
114 sqe->fd = fd; in io_uring_prep_rw()
115 sqe->off = offset; in io_uring_prep_rw()
116 sqe->addr = (unsigned long) addr; in io_uring_prep_rw()
117 sqe->len = len; in io_uring_prep_rw()
120 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, in io_uring_prep_readv() argument
[all …]
Dio_uring-cp.c71 struct io_uring_sqe *sqe; in queue_prepped() local
73 sqe = io_uring_get_sqe(ring); in queue_prepped()
74 assert(sqe); in queue_prepped()
77 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); in queue_prepped()
79 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); in queue_prepped()
81 io_uring_sqe_set_data(sqe, data); in queue_prepped()
86 struct io_uring_sqe *sqe; in queue_read() local
93 sqe = io_uring_get_sqe(ring); in queue_read()
94 if (!sqe) { in queue_read()
106 io_uring_prep_readv(sqe, infd, &data->iov, 1, offset); in queue_read()
[all …]
Dio_uring-bench.c145 struct io_uring_sqe *sqe = &s->sqes[index]; in init_io() local
151 sqe->opcode = IORING_OP_NOP; in init_io()
172 sqe->flags = IOSQE_FIXED_FILE; in init_io()
173 sqe->fd = f->fixed_fd; in init_io()
175 sqe->flags = 0; in init_io()
176 sqe->fd = f->real_fd; in init_io()
179 sqe->opcode = IORING_OP_READ_FIXED; in init_io()
180 sqe->addr = (unsigned long) s->iovecs[index].iov_base; in init_io()
181 sqe->len = BS; in init_io()
182 sqe->buf_index = index; in init_io()
[all …]
Dqueue.c145 struct io_uring_sqe *sqe; in io_uring_get_sqe() local
153 sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; in io_uring_get_sqe()
155 return sqe; in io_uring_get_sqe()
/linux-5.19.10/drivers/infiniband/sw/siw/
Dsiw_qp.c275 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
289 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
293 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
300 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
[all …]
Dsiw_qp_tx.c43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
140 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
141 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx()
142 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx()
185 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
197 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
[all …]
Dsiw_verbs.c641 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument
644 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl()
647 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl()
648 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl()
666 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl()
667 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl()
676 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local
680 sqe.id = wr->wr_id; in siw_sq_flush_wr()
681 sqe.opcode = wr->opcode; in siw_sq_flush_wr()
682 rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); in siw_sq_flush_wr()
[all …]
Dsiw.h192 struct siw_sqe sqe; member
476 #define tx_type(wqe) ((wqe)->sqe.opcode)
478 #define tx_flags(wqe) ((wqe)->sqe.flags)
523 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
524 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
627 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty() local
629 return READ_ONCE(sqe->flags) == 0; in siw_sq_empty()
634 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next() local
636 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) in sq_get_next()
637 return sqe; in sq_get_next()
Dsiw_qp_rx.c176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh()
177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh()
695 resp = &tx_work->sqe; in siw_init_rresp()
758 wqe->sqe.id = orqe->id; in siw_orqe_start_rx()
759 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx()
760 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx()
761 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx()
762 wqe->sqe.sge[0].length = orqe->sge[0].length; in siw_orqe_start_rx()
763 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx()
764 wqe->sqe.num_sge = 1; in siw_orqe_start_rx()
[all …]
/linux-5.19.10/drivers/crypto/hisilicon/zip/
Dzip_crypto.c97 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
98 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
99 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
100 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
101 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
102 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
103 u32 (*get_tag)(struct hisi_zip_sqe *sqe);
104 u32 (*get_status)(struct hisi_zip_sqe *sqe);
105 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
260 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) in hisi_zip_fill_addr() argument
[all …]
/linux-5.19.10/drivers/net/ethernet/qlogic/qed/
Dqed_nvmetcp_fw_funcs.c68 if (!task_params->sqe) in init_sqe()
71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe()
72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe()
79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe()
95 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); in init_sqe()
99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe()
[all …]
/linux-5.19.10/include/trace/events/
Dio_uring.h529 TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error),
531 TP_ARGS(sqe, ctx, req, error),
551 __string( op_str, io_uring_get_opcode(sqe->opcode) )
557 __entry->user_data = sqe->user_data;
558 __entry->opcode = sqe->opcode;
559 __entry->flags = sqe->flags;
560 __entry->ioprio = sqe->ioprio;
561 __entry->off = sqe->off;
562 __entry->addr = sqe->addr;
563 __entry->len = sqe->len;
[all …]
/linux-5.19.10/io_uring/
Dio_uring.c3407 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
3413 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
3415 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
3429 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
3440 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
3441 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3442 req->rw.flags = READ_ONCE(sqe->rw_flags); in io_prep_rw()
4349 const struct io_uring_sqe *sqe) in io_renameat_prep() argument
4354 if (sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep()
4359 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep()
[all …]
/linux-5.19.10/drivers/scsi/qedf/
Ddrv_fcoe_fw_funcs.c13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe()
14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe()
16 task_params->sqe->task_id = task_params->itid; in init_common_sqe()
167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task()
169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task()
171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task()
193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
Dqedf_io.c588 struct fcoe_wqe *sqe) in qedf_init_task() argument
624 io_req->task_params->sqe = sqe; in qedf_init_task()
677 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) in qedf_init_mp_task() argument
703 io_req->task_params->sqe = sqe; in qedf_init_mp_task()
854 struct fcoe_wqe *sqe; in qedf_post_io_req() local
901 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req()
902 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_post_io_req()
915 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req()
1860 struct fcoe_wqe *sqe; in qedf_initiate_abts() local
1938 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts()
[all …]
Dqedf_els.c23 struct fcoe_wqe *sqe; in qedf_initiate_els() local
120 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_els()
121 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_els()
125 qedf_init_mp_task(els_req, task, sqe); in qedf_initiate_els()
702 struct fcoe_wqe *sqe; in qedf_initiate_seq_cleanup() local
732 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_seq_cleanup()
733 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_seq_cleanup()
734 orig_io_req->task_params->sqe = sqe; in qedf_initiate_seq_cleanup()
/linux-5.19.10/drivers/scsi/qedi/
Dqedi_fw_api.c98 if (!task_params->sqe) in init_sqe()
101 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe()
102 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe()
104 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe()
115 init_dif_context_flags(&task_params->sqe->prot_flags, in init_sqe()
118 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe()
134 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, in init_sqe()
136 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, in init_sqe()
141 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe()
147 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe()
[all …]
/linux-5.19.10/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c1774 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; in bnxt_qplib_post_send() local
1778 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send()
1779 sqe->flags = wqe->flags; in bnxt_qplib_post_send()
1780 sqe->wqe_size = wqe_sz; in bnxt_qplib_post_send()
1781 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); in bnxt_qplib_post_send()
1782 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); in bnxt_qplib_post_send()
1783 sqe->length = cpu_to_le32(data_len); in bnxt_qplib_post_send()
1795 struct sq_send_hdr *sqe = base_hdr; in bnxt_qplib_post_send() local
1797 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send()
1798 sqe->flags = wqe->flags; in bnxt_qplib_post_send()
[all …]
/linux-5.19.10/drivers/dma/
Dhisi_dma.c85 struct hisi_dma_sqe sqe; member
247 desc->sqe.length = cpu_to_le32(len); in hisi_dma_prep_dma_memcpy()
248 desc->sqe.src_addr = cpu_to_le64(src); in hisi_dma_prep_dma_memcpy()
249 desc->sqe.dst_addr = cpu_to_le64(dst); in hisi_dma_prep_dma_memcpy()
263 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; in hisi_dma_start_transfer() local
278 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); in hisi_dma_start_transfer()
281 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); in hisi_dma_start_transfer()
282 sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); in hisi_dma_start_transfer()
/linux-5.19.10/drivers/infiniband/hw/cxgb4/
Drestrack.c96 struct t4_swsqe *sqe) in fill_swsqe() argument
100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) in fill_swsqe()
102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) in fill_swsqe()
104 if (sqe->complete && in fill_swsqe()
105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) in fill_swsqe()
107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) in fill_swsqe()
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
/linux-5.19.10/drivers/scsi/bnx2i/
Dbnx2i.h498 struct sqe { struct
634 struct sqe *sq_virt;
638 struct sqe *sq_prod_qe;
639 struct sqe *sq_cons_qe;
640 struct sqe *sq_first_qe;
641 struct sqe *sq_last_qe;
/linux-5.19.10/drivers/nvme/host/
Drdma.c66 struct nvme_rdma_qe sqe; member
294 kfree(req->sqe.data); in nvme_rdma_exit_request()
307 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
308 if (!req->sqe.data) in nvme_rdma_init_request()
318 nvme_req(rq)->cmd = req->sqe.data; in nvme_rdma_init_request()
1656 container_of(qe, struct nvme_rdma_request, sqe); in nvme_rdma_send_done()
1741 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event() local
1742 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event()
1746 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1754 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
[all …]
Dfc.c1929 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done() local
2033 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done()
2043 sqe->common.command_id, in nvme_fc_fcpio_done()
2150 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; in nvme_fc_init_request()
2159 struct nvme_command *sqe; in nvme_fc_init_aen_ops() local
2173 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops()
2185 memset(sqe, 0, sizeof(*sqe)); in nvme_fc_init_aen_ops()
2186 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops()
2188 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops()
2574 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout() local
[all …]
/linux-5.19.10/drivers/crypto/hisilicon/hpre/
Dhpre_crypto.c54 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
302 struct hpre_sqe *sqe = &req->req; in hpre_hw_data_clr_all() local
305 tmp = le64_to_cpu(sqe->in); in hpre_hw_data_clr_all()
316 tmp = le64_to_cpu(sqe->out); in hpre_hw_data_clr_all()
330 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, in hpre_alg_res_post_hf() argument
342 id = (int)le16_to_cpu(sqe->tag); in hpre_alg_res_post_hf()
347 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & in hpre_alg_res_post_hf()
350 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & in hpre_alg_res_post_hf()
356 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; in hpre_alg_res_post_hf()
462 struct hpre_sqe *sqe = resp; in hpre_alg_cb() local
[all …]
/linux-5.19.10/drivers/scsi/lpfc/
Dlpfc_nvme.c1014 cid = cp->sqe.common.command_id; in lpfc_nvme_io_cmd_cmpl()
1083 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl()
1084 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl()
1096 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl()
1097 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl()
1210 struct nvme_common_command *sqe; in lpfc_nvme_prep_io_cmd() local
1270 sqe = &((struct nvme_fc_cmd_iu *) in lpfc_nvme_prep_io_cmd()
1271 nCmd->cmdaddr)->sqe.common; in lpfc_nvme_prep_io_cmd()
1272 if (sqe->opcode == nvme_admin_async_event) in lpfc_nvme_prep_io_cmd()
1537 struct nvme_common_command *sqe; in lpfc_nvme_fcp_io_submit() local
[all …]

12