/linux-6.1.9/drivers/scsi/qedf/ |
D | qedf_io.c | 11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, in qedf_cmd_timer_set() argument 14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, in qedf_cmd_timer_set() 21 struct qedf_ioreq *io_req = in qedf_cmd_timeout() local 26 fcport = io_req->fcport; in qedf_cmd_timeout() 27 if (io_req->fcport == NULL) { in qedf_cmd_timeout() 34 switch (io_req->cmd_type) { in qedf_cmd_timeout() 39 io_req->xid); in qedf_cmd_timeout() 44 io_req->xid); in qedf_cmd_timeout() 46 qedf_initiate_cleanup(io_req, true); in qedf_cmd_timeout() 47 complete(&io_req->abts_done); in qedf_cmd_timeout() [all …]
|
D | qedf.h | 72 struct qedf_ioreq *io_req; member 192 struct qedf_ioreq *io_req; member 434 struct qedf_ioreq *io_req; member 495 struct qedf_ioreq *io_req); 497 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 499 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 502 extern int qedf_initiate_abts(struct qedf_ioreq *io_req, 505 struct qedf_ioreq *io_req); 510 extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 512 extern int qedf_init_mp_req(struct qedf_ioreq *io_req); [all …]
|
D | qedf_main.c | 720 struct qedf_ioreq *io_req; in qedf_eh_abort() local 743 io_req = qedf_priv(sc_cmd)->io_req; in qedf_eh_abort() 744 if (!io_req) { in qedf_eh_abort() 753 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ in qedf_eh_abort() 758 if (!rval || io_req->sc_cmd != sc_cmd) { in qedf_eh_abort() 761 io_req->sc_cmd, sc_cmd, rdata->ids.port_id); in qedf_eh_abort() 767 refcount = kref_read(&io_req->refcount); in qedf_eh_abort() 770 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], in qedf_eh_abort() 783 io_req->xid, rdata->ids.port_id); in qedf_eh_abort() 784 while (io_req->sc_cmd && (wait_count != 0)) { in qedf_eh_abort() [all …]
|
D | qedf_els.c | 72 cb_arg->io_req = els_req; in qedf_initiate_els() 198 rrq_req = cb_arg->io_req; in qedf_rrq_compl() 430 els_req = cb_arg->io_req; in qedf_l2_els_compl() 552 srr_req = cb_arg->io_req; in qedf_srr_compl() 744 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) in qedf_process_seq_cleanup_compl() argument 749 cb_arg = io_req->cb_arg; in qedf_process_seq_cleanup_compl() 752 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { in qedf_process_seq_cleanup_compl() 754 "cqe is NULL or timeout event (0x%x)", io_req->event); in qedf_process_seq_cleanup_compl() 759 cancel_delayed_work_sync(&io_req->timeout_work); in qedf_process_seq_cleanup_compl() 761 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); in qedf_process_seq_cleanup_compl() [all …]
|
/linux-6.1.9/drivers/scsi/bnx2fc/ |
D | bnx2fc_io.c | 19 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 21 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 22 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 23 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, in bnx2fc_cmd_timer_set() argument 32 struct bnx2fc_interface *interface = io_req->port->priv; in bnx2fc_cmd_timer_set() 35 &io_req->timeout_work, in bnx2fc_cmd_timer_set() 37 kref_get(&io_req->refcount); in bnx2fc_cmd_timer_set() [all …]
|
D | bnx2fc_tgt.c | 168 struct bnx2fc_cmd *io_req; in bnx2fc_flush_active_ios() local 178 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { in bnx2fc_flush_active_ios() 180 list_del_init(&io_req->link); in bnx2fc_flush_active_ios() 181 io_req->on_active_queue = 0; in bnx2fc_flush_active_ios() 182 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); in bnx2fc_flush_active_ios() 184 if (cancel_delayed_work(&io_req->timeout_work)) { in bnx2fc_flush_active_ios() 186 &io_req->req_flags)) { in bnx2fc_flush_active_ios() 188 BNX2FC_IO_DBG(io_req, "eh_abort for IO " in bnx2fc_flush_active_ios() 190 complete(&io_req->abts_done); in bnx2fc_flush_active_ios() 192 kref_put(&io_req->refcount, in bnx2fc_flush_active_ios() [all …]
|
D | bnx2fc_debug.c | 17 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) in BNX2FC_IO_DBG() argument 30 if (io_req && io_req->port && io_req->port->lport && in BNX2FC_IO_DBG() 31 io_req->port->lport->host) in BNX2FC_IO_DBG() 32 shost_printk(KERN_INFO, io_req->port->lport->host, in BNX2FC_IO_DBG() 34 io_req->xid, &vaf); in BNX2FC_IO_DBG()
|
D | bnx2fc_hwi.c | 635 struct bnx2fc_cmd *io_req = NULL; in bnx2fc_process_unsol_compl() local 710 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; in bnx2fc_process_unsol_compl() 711 if (!io_req) in bnx2fc_process_unsol_compl() 714 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { in bnx2fc_process_unsol_compl() 720 &io_req->req_flags)) { in bnx2fc_process_unsol_compl() 721 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " in bnx2fc_process_unsol_compl() 743 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { in bnx2fc_process_unsol_compl() 760 memcpy(&io_req->err_entry, err_entry, in bnx2fc_process_unsol_compl() 763 &io_req->req_flags)) { in bnx2fc_process_unsol_compl() 765 rc = bnx2fc_send_rec(io_req); in bnx2fc_process_unsol_compl() [all …]
|
D | bnx2fc.h | 406 struct bnx2fc_cmd *io_req; member 473 struct bnx2fc_cmd *io_req; member 495 struct bnx2fc_cmd *io_req; member 534 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req); 535 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req); 536 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 538 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); 539 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 546 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 548 void bnx2fc_init_task(struct bnx2fc_cmd *io_req, [all …]
|
D | bnx2fc_debug.h | 41 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
|
/linux-6.1.9/drivers/scsi/fnic/ |
D | fnic_scsi.c | 114 struct fnic_io_req *io_req, in fnic_release_ioreq_buf() argument 117 if (io_req->sgl_list_pa) in fnic_release_ioreq_buf() 118 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_release_ioreq_buf() 119 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, in fnic_release_ioreq_buf() 123 if (io_req->sgl_cnt) in fnic_release_ioreq_buf() 124 mempool_free(io_req->sgl_list_alloc, in fnic_release_ioreq_buf() 125 fnic->io_sgl_pool[io_req->sgl_type]); in fnic_release_ioreq_buf() 126 if (io_req->sense_buf_pa) in fnic_release_ioreq_buf() 127 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, in fnic_release_ioreq_buf() 307 struct fnic_io_req *io_req, in fnic_queue_wq_copy_desc() argument [all …]
|
/linux-6.1.9/arch/um/drivers/ |
D | ubd_kern.c | 481 struct io_thread_req *io_req = (*irq_req_buffer)[count]; in ubd_handler() local 483 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler() 484 blk_queue_max_discard_sectors(io_req->req->q, 0); in ubd_handler() 485 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); in ubd_handler() 487 blk_mq_end_request(io_req->req, io_req->error); in ubd_handler() 488 kfree(io_req); in ubd_handler() 1258 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, in ubd_map_req() argument 1264 unsigned long byte_offset = io_req->offset; in ubd_map_req() 1268 io_req->io_desc[0].buffer = NULL; in ubd_map_req() 1269 io_req->io_desc[0].length = blk_rq_bytes(req); in ubd_map_req() [all …]
|
/linux-6.1.9/drivers/md/ |
D | dm-io.c | 474 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, in dp_init() argument 482 switch (io_req->mem.type) { in dp_init() 484 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); in dp_init() 488 bio_dp_init(dp, io_req->mem.ptr.bio); in dp_init() 492 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); in dp_init() 493 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { in dp_init() 494 dp->vma_invalidate_address = io_req->mem.ptr.vma; in dp_init() 497 vm_dp_init(dp, io_req->mem.ptr.vma); in dp_init() 501 km_dp_init(dp, io_req->mem.ptr.addr); in dp_init() 511 int dm_io(struct dm_io_request *io_req, unsigned num_regions, in dm_io() argument [all …]
|
D | dm-log.c | 240 struct dm_io_request io_req; member 296 lc->io_req.bi_opf = op; in rw_header() 298 return dm_io(&lc->io_req, 1, &lc->header_location, NULL); in rw_header() 309 lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in flush_header() 311 return dm_io(&lc->io_req, 1, &null_location, NULL); in flush_header() 456 lc->io_req.mem.type = DM_IO_VMA; in create_log_context() 457 lc->io_req.notify.fn = NULL; in create_log_context() 458 lc->io_req.client = dm_io_client_create(); in create_log_context() 459 if (IS_ERR(lc->io_req.client)) { in create_log_context() 460 r = PTR_ERR(lc->io_req.client); in create_log_context() [all …]
|
D | dm-integrity.c | 557 struct dm_io_request io_req; in sync_rw_sb() local 562 io_req.bi_opf = opf; in sync_rw_sb() 563 io_req.mem.type = DM_IO_KMEM; in sync_rw_sb() 564 io_req.mem.ptr.addr = ic->sb; in sync_rw_sb() 565 io_req.notify.fn = NULL; in sync_rw_sb() 566 io_req.client = ic->io; in sync_rw_sb() 580 r = dm_io(&io_req, 1, &io_loc, NULL); in sync_rw_sb() 1058 struct dm_io_request io_req; in rw_journal_sectors() local 1072 io_req.bi_opf = opf; in rw_journal_sectors() 1073 io_req.mem.type = DM_IO_PAGE_LIST; in rw_journal_sectors() [all …]
|
D | dm-snap-persistent.c | 214 struct dm_io_request *io_req; member 223 req->result = dm_io(req->io_req, 1, req->where, NULL); in do_metadata() 237 struct dm_io_request io_req = { in chunk_io() local 247 return dm_io(&io_req, 1, &where, NULL); in chunk_io() 250 req.io_req = &io_req; in chunk_io()
|
D | dm-raid1.c | 262 struct dm_io_request io_req = { in mirror_flush() local 276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); in mirror_flush() 536 struct dm_io_request io_req = { in read_async_bio() local 547 BUG_ON(dm_io(&io_req, 1, &io, NULL)); in read_async_bio() 650 struct dm_io_request io_req = { in do_write() local 660 io_req.bi_opf = REQ_OP_DISCARD | op_flags; in do_write() 661 io_req.mem.type = DM_IO_KMEM; in do_write() 662 io_req.mem.ptr.addr = NULL; in do_write() 674 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); in do_write()
|
D | dm-bufio.c | 597 struct dm_io_request io_req = { in use_dmio() local 610 io_req.mem.type = DM_IO_KMEM; in use_dmio() 611 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio() 613 io_req.mem.type = DM_IO_VMA; in use_dmio() 614 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio() 617 r = dm_io(&io_req, 1, ®ion, NULL); in use_dmio() 1364 struct dm_io_request io_req = { in dm_bufio_issue_flush() local 1378 return dm_io(&io_req, 1, &io_reg, NULL); in dm_bufio_issue_flush() 1387 struct dm_io_request io_req = { in dm_bufio_issue_discard() local 1401 return dm_io(&io_req, 1, &io_reg, NULL); in dm_bufio_issue_discard()
|
/linux-6.1.9/samples/acrn/ |
D | vm-sample.c | 53 struct acrn_io_request *io_req; in main() local 113 io_req = &io_req_buf[vcpu_id]; in main() 114 if ((__sync_add_and_fetch(&io_req->processed, 0) == ACRN_IOREQ_STATE_PROCESSING) in main() 115 && (!io_req->kernel_handled)) in main() 116 if (io_req->type == ACRN_IOREQ_TYPE_PORTIO) { in main() 119 port = io_req->reqs.pio_request.address; in main() 120 bytes = io_req->reqs.pio_request.size; in main() 121 in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ); in main()
|
/linux-6.1.9/drivers/staging/rtl8723bs/include/ |
D | rtw_io.h | 109 struct io_req { struct 117 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt); argument 158 extern void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue); 162 extern uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue); 163 extern struct io_req *alloc_ioreq(struct io_queue *pio_q); 193 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt); 195 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt); 197 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt); 203 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt); 205 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt); [all …]
|
/linux-6.1.9/drivers/staging/r8188eu/include/ |
D | rtw_io.h | 87 struct io_req { struct 97 struct io_req *pio_req, u8 *cnxt); argument 212 void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue); 214 uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue); 215 struct io_req *alloc_ioreq(struct io_queue *pio_q); 251 struct io_req *pio_req, 255 struct io_req *pio_req, 259 struct io_req *pio_req, 267 struct io_req *pio_req, 271 struct io_req *pio_req, [all …]
|
/linux-6.1.9/drivers/scsi/csiostor/ |
D | csio_lnode.c | 1435 struct csio_ioreq *io_req = NULL; in csio_ln_mgmt_wr_handler() local 1448 io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); in csio_ln_mgmt_wr_handler() 1449 io_req->wr_status = csio_wr_status(wr_cmd); in csio_ln_mgmt_wr_handler() 1453 if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { in csio_ln_mgmt_wr_handler() 1456 io_req); in csio_ln_mgmt_wr_handler() 1465 list_del_init(&io_req->sm.sm_list); in csio_ln_mgmt_wr_handler() 1470 if (io_req->io_cbfn) in csio_ln_mgmt_wr_handler() 1471 io_req->io_cbfn(hw, io_req); in csio_ln_mgmt_wr_handler() 1680 csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, in csio_ln_prep_ecwr() argument 1697 wr->cookie = io_req->fw_handle; in csio_ln_prep_ecwr() [all …]
|
D | csio_hw.c | 4087 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) in csio_mgmt_req_lookup() argument 4093 if (io_req == (struct csio_ioreq *)tmp) in csio_mgmt_req_lookup() 4112 struct csio_ioreq *io_req; in csio_mgmt_tmo_handler() local 4119 io_req = (struct csio_ioreq *) tmp; in csio_mgmt_tmo_handler() 4120 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); in csio_mgmt_tmo_handler() 4122 if (!io_req->tmo) { in csio_mgmt_tmo_handler() 4125 list_del_init(&io_req->sm.sm_list); in csio_mgmt_tmo_handler() 4126 if (io_req->io_cbfn) { in csio_mgmt_tmo_handler() 4128 io_req->wr_status = -ETIMEDOUT; in csio_mgmt_tmo_handler() 4129 io_req->io_cbfn(mgmtm->hw, io_req); in csio_mgmt_tmo_handler() [all …]
|
/linux-6.1.9/drivers/staging/rtl8712/ |
D | rtl871x_io.c | 101 struct io_req *pio_req; in r8712_alloc_io_queue() 111 (sizeof(struct io_req)) + 4, in r8712_alloc_io_queue() 118 pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf); in r8712_alloc_io_queue()
|
/linux-6.1.9/include/linux/ |
D | dm-io.h | 81 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|