Lines Matching refs:tgtport
34 struct nvmet_fc_tgtport *tgtport; member
51 struct nvmet_fc_tgtport *tgtport; member
90 struct nvmet_fc_tgtport *tgtport; member
117 struct nvmet_fc_tgtport *tgtport; member
153 struct nvmet_fc_tgtport *tgtport; member
164 struct nvmet_fc_tgtport *tgtport; member
178 return (iodptr - iodptr->tgtport->iod); in nvmet_fc_iodnum()
250 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
251 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
252 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
255 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
354 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req() local
358 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
361 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
369 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
371 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
375 nvmet_fc_tgtport_put(tgtport); in __nvmet_fc_finish_ls_req()
379 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, in __nvmet_fc_send_ls_req() argument
387 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
390 if (!nvmet_fc_tgtport_get(tgtport)) in __nvmet_fc_send_ls_req()
397 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
400 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
406 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
408 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
412 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
414 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
423 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
426 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
427 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
431 nvmet_fc_tgtport_put(tgtport); in __nvmet_fc_send_ls_req()
437 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_send_ls_req_async() argument
443 return __nvmet_fc_send_ls_req(tgtport, lsop, done); in nvmet_fc_send_ls_req_async()
479 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc() local
491 if (!tgtport->ops->ls_req || !assoc->hostport || in nvmet_fc_xmt_disconnect_assoc()
497 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
499 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
501 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
508 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
513 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
519 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, in nvmet_fc_xmt_disconnect_assoc()
522 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
524 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
534 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_alloc_ls_iodlist() argument
544 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
548 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
549 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
559 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
562 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
572 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
584 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_free_ls_iodlist() argument
586 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
590 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
596 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
600 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_alloc_ls_iod() argument
605 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
606 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
609 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
610 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
616 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_free_ls_iod() argument
621 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
622 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
623 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
627 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_prep_fcp_iodlist() argument
635 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
644 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
646 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
649 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
662 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_destroy_fcp_iodlist() argument
670 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
698 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_queue_fcp_req() argument
709 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
711 nvmet_fc_handle_fcp_rqst(tgtport, fod); in nvmet_fc_queue_fcp_req()
721 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
730 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod() local
734 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
745 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
780 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
808 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
838 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
856 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
881 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue() local
908 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
909 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
931 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
934 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
937 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
957 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_find_target_queue() argument
969 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
989 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free() local
992 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
994 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
995 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
996 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
998 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_hostport_free()
1024 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) in nvmet_fc_match_hostport() argument
1028 lockdep_assert_held(&tgtport->lock); in nvmet_fc_match_hostport()
1030 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_match_hostport()
1041 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) in nvmet_fc_alloc_hostport() argument
1054 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_alloc_hostport()
1057 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1058 match = nvmet_fc_match_hostport(tgtport, hosthandle); in nvmet_fc_alloc_hostport()
1059 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1063 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_hostport()
1070 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_hostport()
1074 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1075 match = nvmet_fc_match_hostport(tgtport, hosthandle); in nvmet_fc_alloc_hostport()
1081 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_hostport()
1083 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1088 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1090 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1106 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) in nvmet_fc_alloc_target_assoc() argument
1118 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1122 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_alloc_target_assoc()
1125 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1129 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1140 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1142 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { in nvmet_fc_alloc_target_assoc()
1150 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1152 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1158 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_alloc_target_assoc()
1160 ida_free(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1171 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free() local
1179 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1182 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1185 nvmet_fc_xmt_ls_rsp(tgtport, oldls); in nvmet_fc_target_assoc_free()
1186 ida_free(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1187 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1189 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1191 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_target_assoc_free()
1209 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc() local
1237 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1239 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1245 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_find_target_assoc() argument
1252 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1266 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_portentry_bind() argument
1272 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1273 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1278 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1279 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1291 if (pe->tgtport) in nvmet_fc_portentry_unbind()
1292 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1303 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_portentry_unbind_tgt() argument
1309 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1311 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1312 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1325 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_portentry_rebind_tgt() argument
1332 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1333 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1334 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1335 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1336 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1449 struct nvmet_fc_tgtport *tgtport = in nvmet_fc_free_tgtport() local
1451 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1455 list_del(&tgtport->tgt_list); in nvmet_fc_free_tgtport()
1458 nvmet_fc_free_ls_iodlist(tgtport); in nvmet_fc_free_tgtport()
1461 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1464 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1466 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1468 kfree(tgtport); in nvmet_fc_free_tgtport()
1474 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_tgtport_put() argument
1476 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1480 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) in nvmet_fc_tgtport_get() argument
1482 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1486 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) in __nvmet_fc_free_assocs() argument
1491 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1534 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_invalidate_host() local
1539 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1541 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1553 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1556 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1557 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1567 struct nvmet_fc_tgtport *tgtport, *next; in nvmet_fc_delete_ctrl() local
1575 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, in nvmet_fc_delete_ctrl()
1577 if (!nvmet_fc_tgtport_get(tgtport)) in nvmet_fc_delete_ctrl()
1582 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1592 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_delete_ctrl()
1620 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_unregister_targetport() local
1622 nvmet_fc_portentry_unbind_tgt(tgtport); in nvmet_fc_unregister_targetport()
1625 __nvmet_fc_free_assocs(tgtport); in nvmet_fc_unregister_targetport()
1634 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_unregister_targetport()
1645 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_create_association() argument
1682 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1694 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1708 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1710 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1734 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_create_connection() argument
1770 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1787 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1824 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_ls_disconnect() argument
1841 assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_disconnect()
1849 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1882 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1885 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1890 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1893 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1901 nvmet_fc_xmt_ls_rsp(tgtport, oldls); in nvmet_fc_ls_disconnect()
1919 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done() local
1921 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1923 nvmet_fc_free_ls_iod(tgtport, iod); in nvmet_fc_xmt_ls_rsp_done()
1924 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_xmt_ls_rsp_done()
1928 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_xmt_ls_rsp() argument
1933 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1936 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1945 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_handle_ls_rqst() argument
1968 nvmet_fc_ls_create_association(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1972 nvmet_fc_ls_create_connection(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1976 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1985 nvmet_fc_xmt_ls_rsp(tgtport, iod); in nvmet_fc_handle_ls_rqst()
1996 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work() local
1998 nvmet_fc_handle_ls_rqst(tgtport, iod); in nvmet_fc_handle_ls_rqst_work()
2026 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_rcv_ls_req() local
2031 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2039 if (!nvmet_fc_tgtport_get(tgtport)) { in nvmet_fc_rcv_ls_req()
2040 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2047 iod = nvmet_fc_alloc_ls_iod(tgtport); in nvmet_fc_rcv_ls_req()
2049 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2053 nvmet_fc_tgtport_put(tgtport); in nvmet_fc_rcv_ls_req()
2088 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2106 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2132 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_prep_fcp_rsp() argument
2190 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2197 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_abort_op() argument
2211 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2217 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_xmt_fcp_rsp() argument
2225 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2227 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2229 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2233 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_transfer_fcp_data() argument
2259 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2286 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2288 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_transfer_fcp_data()
2291 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2317 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort() local
2326 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fod_op_abort()
2340 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done() local
2371 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2386 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_fod_op_done()
2402 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2412 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in nvmet_fc_fod_op_done()
2439 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, in __nvmet_fc_fcp_nvme_cmd_done() argument
2456 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2477 nvmet_fc_transfer_fcp_data(tgtport, fod, in __nvmet_fc_fcp_nvme_cmd_done()
2488 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2496 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done() local
2498 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); in nvmet_fc_fcp_nvme_cmd_done()
2506 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, in nvmet_fc_handle_fcp_rqst() argument
2540 if (tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2541 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2577 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); in nvmet_fc_handle_fcp_rqst()
2591 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_handle_fcp_rqst()
2646 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); in nvmet_fc_rcv_fcp_req() local
2660 queue = nvmet_fc_find_target_queue(tgtport, in nvmet_fc_rcv_fcp_req()
2683 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); in nvmet_fc_rcv_fcp_req()
2688 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2851 struct nvmet_fc_tgtport *tgtport; in nvmet_fc_add_port() local
2875 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { in nvmet_fc_add_port()
2876 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2877 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2879 if (!tgtport->pe) { in nvmet_fc_add_port()
2880 nvmet_fc_portentry_bind(tgtport, pe, port); in nvmet_fc_add_port()
2909 struct nvmet_fc_tgtport *tgtport = pe->tgtport; in nvmet_fc_discovery_chg() local
2911 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2912 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()