Lines Matching refs:csk

79 	struct cxgbi_sock *csk;  in cxgbi_device_portmap_cleanup()  local
84 csk = pmap->port_csk[i]; in cxgbi_device_portmap_cleanup()
88 csk, cdev); in cxgbi_device_portmap_cleanup()
89 spin_lock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
90 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); in cxgbi_device_portmap_cleanup()
91 cxgbi_sock_closed(csk); in cxgbi_device_portmap_cleanup()
92 spin_unlock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
93 cxgbi_sock_put(csk); in cxgbi_device_portmap_cleanup()
317 static int sock_get_port(struct cxgbi_sock *csk) in sock_get_port() argument
319 struct cxgbi_device *cdev = csk->cdev; in sock_get_port()
326 cdev, csk->port_id, cdev->ports[csk->port_id]->name); in sock_get_port()
330 if (csk->saddr.sin_port) { in sock_get_port()
332 ntohs(csk->saddr.sin_port)); in sock_get_port()
340 cdev, csk->port_id, cdev->ports[csk->port_id]->name); in sock_get_port()
350 csk->saddr.sin_port = in sock_get_port()
353 pmap->port_csk[idx] = csk; in sock_get_port()
355 cxgbi_sock_get(csk); in sock_get_port()
358 cdev, csk->port_id, in sock_get_port()
359 cdev->ports[csk->port_id]->name, in sock_get_port()
368 cdev, csk->port_id, cdev->ports[csk->port_id]->name, in sock_get_port()
373 static void sock_put_port(struct cxgbi_sock *csk) in sock_put_port() argument
375 struct cxgbi_device *cdev = csk->cdev; in sock_put_port()
378 if (csk->saddr.sin_port) { in sock_put_port()
379 int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; in sock_put_port()
381 csk->saddr.sin_port = 0; in sock_put_port()
384 cdev, csk->port_id, in sock_put_port()
385 cdev->ports[csk->port_id]->name, in sock_put_port()
386 ntohs(csk->saddr.sin_port)); in sock_put_port()
397 cdev, csk->port_id, cdev->ports[csk->port_id]->name, in sock_put_port()
400 cxgbi_sock_put(csk); in sock_put_port()
407 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) in cxgbi_sock_free_cpl_skbs() argument
409 if (csk->cpl_close) { in cxgbi_sock_free_cpl_skbs()
410 kfree_skb(csk->cpl_close); in cxgbi_sock_free_cpl_skbs()
411 csk->cpl_close = NULL; in cxgbi_sock_free_cpl_skbs()
413 if (csk->cpl_abort_req) { in cxgbi_sock_free_cpl_skbs()
414 kfree_skb(csk->cpl_abort_req); in cxgbi_sock_free_cpl_skbs()
415 csk->cpl_abort_req = NULL; in cxgbi_sock_free_cpl_skbs()
417 if (csk->cpl_abort_rpl) { in cxgbi_sock_free_cpl_skbs()
418 kfree_skb(csk->cpl_abort_rpl); in cxgbi_sock_free_cpl_skbs()
419 csk->cpl_abort_rpl = NULL; in cxgbi_sock_free_cpl_skbs()
426 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); in cxgbi_sock_create() local
428 if (!csk) { in cxgbi_sock_create()
429 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); in cxgbi_sock_create()
433 if (cdev->csk_alloc_cpls(csk) < 0) { in cxgbi_sock_create()
434 pr_info("csk 0x%p, alloc cpls failed.\n", csk); in cxgbi_sock_create()
435 kfree(csk); in cxgbi_sock_create()
439 spin_lock_init(&csk->lock); in cxgbi_sock_create()
440 kref_init(&csk->refcnt); in cxgbi_sock_create()
441 skb_queue_head_init(&csk->receive_queue); in cxgbi_sock_create()
442 skb_queue_head_init(&csk->write_queue); in cxgbi_sock_create()
443 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); in cxgbi_sock_create()
444 rwlock_init(&csk->callback_lock); in cxgbi_sock_create()
445 csk->cdev = cdev; in cxgbi_sock_create()
446 csk->flags = 0; in cxgbi_sock_create()
447 cxgbi_sock_set_state(csk, CTP_CLOSED); in cxgbi_sock_create()
449 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); in cxgbi_sock_create()
451 return csk; in cxgbi_sock_create()
477 struct cxgbi_sock *csk = NULL; in cxgbi_check_route() local
531 csk = cxgbi_sock_create(cdev); in cxgbi_check_route()
532 if (!csk) { in cxgbi_check_route()
536 csk->cdev = cdev; in cxgbi_check_route()
537 csk->port_id = port; in cxgbi_check_route()
538 csk->mtu = mtu; in cxgbi_check_route()
539 csk->dst = dst; in cxgbi_check_route()
540 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; in cxgbi_check_route()
541 csk->daddr.sin_port = daddr->sin_port; in cxgbi_check_route()
542 csk->daddr.sin_family = daddr->sin_family; in cxgbi_check_route()
543 csk->saddr.sin_addr.s_addr = fl4.saddr; in cxgbi_check_route()
545 return csk; in cxgbi_check_route()
549 if (csk) in cxgbi_check_route()
550 cxgbi_sock_closed(csk); in cxgbi_check_route()
555 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, in cxgbi_sock_established() argument
558 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; in cxgbi_sock_established()
559 dst_confirm(csk->dst); in cxgbi_sock_established()
561 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); in cxgbi_sock_established()
565 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) in cxgbi_inform_iscsi_conn_closing() argument
569 csk, csk->state, csk->flags, csk->user_data); in cxgbi_inform_iscsi_conn_closing()
571 if (csk->state != CTP_ESTABLISHED) { in cxgbi_inform_iscsi_conn_closing()
572 read_lock_bh(&csk->callback_lock); in cxgbi_inform_iscsi_conn_closing()
573 if (csk->user_data) in cxgbi_inform_iscsi_conn_closing()
574 iscsi_conn_failure(csk->user_data, in cxgbi_inform_iscsi_conn_closing()
576 read_unlock_bh(&csk->callback_lock); in cxgbi_inform_iscsi_conn_closing()
580 void cxgbi_sock_closed(struct cxgbi_sock *csk) in cxgbi_sock_closed() argument
583 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_closed()
584 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); in cxgbi_sock_closed()
585 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) in cxgbi_sock_closed()
587 if (csk->saddr.sin_port) in cxgbi_sock_closed()
588 sock_put_port(csk); in cxgbi_sock_closed()
589 if (csk->dst) in cxgbi_sock_closed()
590 dst_release(csk->dst); in cxgbi_sock_closed()
591 csk->cdev->csk_release_offload_resources(csk); in cxgbi_sock_closed()
592 cxgbi_sock_set_state(csk, CTP_CLOSED); in cxgbi_sock_closed()
593 cxgbi_inform_iscsi_conn_closing(csk); in cxgbi_sock_closed()
594 cxgbi_sock_put(csk); in cxgbi_sock_closed()
598 static void need_active_close(struct cxgbi_sock *csk) in need_active_close() argument
604 csk, (csk)->state, (csk)->flags, (csk)->tid); in need_active_close()
605 spin_lock_bh(&csk->lock); in need_active_close()
606 dst_confirm(csk->dst); in need_active_close()
607 data_lost = skb_queue_len(&csk->receive_queue); in need_active_close()
608 __skb_queue_purge(&csk->receive_queue); in need_active_close()
610 if (csk->state == CTP_ACTIVE_OPEN) in need_active_close()
611 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); in need_active_close()
612 else if (csk->state == CTP_ESTABLISHED) { in need_active_close()
614 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); in need_active_close()
615 } else if (csk->state == CTP_PASSIVE_CLOSE) { in need_active_close()
617 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); in need_active_close()
622 csk->cdev->csk_send_abort_req(csk); in need_active_close()
624 csk->cdev->csk_send_close_req(csk); in need_active_close()
627 spin_unlock_bh(&csk->lock); in need_active_close()
630 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) in cxgbi_sock_fail_act_open() argument
633 csk, csk->state, csk->flags, in cxgbi_sock_fail_act_open()
634 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, in cxgbi_sock_fail_act_open()
635 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, in cxgbi_sock_fail_act_open()
638 cxgbi_sock_set_state(csk, CTP_CONNECTING); in cxgbi_sock_fail_act_open()
639 csk->err = errno; in cxgbi_sock_fail_act_open()
640 cxgbi_sock_closed(csk); in cxgbi_sock_fail_act_open()
646 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; in cxgbi_sock_act_open_req_arp_failure() local
649 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_act_open_req_arp_failure()
650 cxgbi_sock_get(csk); in cxgbi_sock_act_open_req_arp_failure()
651 spin_lock_bh(&csk->lock); in cxgbi_sock_act_open_req_arp_failure()
652 if (csk->state == CTP_ACTIVE_OPEN) in cxgbi_sock_act_open_req_arp_failure()
653 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); in cxgbi_sock_act_open_req_arp_failure()
654 spin_unlock_bh(&csk->lock); in cxgbi_sock_act_open_req_arp_failure()
655 cxgbi_sock_put(csk); in cxgbi_sock_act_open_req_arp_failure()
660 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) in cxgbi_sock_rcv_abort_rpl() argument
662 cxgbi_sock_get(csk); in cxgbi_sock_rcv_abort_rpl()
663 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_abort_rpl()
664 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in cxgbi_sock_rcv_abort_rpl()
665 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) in cxgbi_sock_rcv_abort_rpl()
666 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); in cxgbi_sock_rcv_abort_rpl()
668 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); in cxgbi_sock_rcv_abort_rpl()
669 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); in cxgbi_sock_rcv_abort_rpl()
670 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) in cxgbi_sock_rcv_abort_rpl()
672 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_abort_rpl()
673 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_abort_rpl()
676 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_abort_rpl()
677 cxgbi_sock_put(csk); in cxgbi_sock_rcv_abort_rpl()
681 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) in cxgbi_sock_rcv_peer_close() argument
684 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_rcv_peer_close()
685 cxgbi_sock_get(csk); in cxgbi_sock_rcv_peer_close()
686 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_peer_close()
688 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) in cxgbi_sock_rcv_peer_close()
691 switch (csk->state) { in cxgbi_sock_rcv_peer_close()
693 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); in cxgbi_sock_rcv_peer_close()
696 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); in cxgbi_sock_rcv_peer_close()
699 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_peer_close()
705 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_peer_close()
707 cxgbi_inform_iscsi_conn_closing(csk); in cxgbi_sock_rcv_peer_close()
709 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_peer_close()
710 cxgbi_sock_put(csk); in cxgbi_sock_rcv_peer_close()
714 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) in cxgbi_sock_rcv_close_conn_rpl() argument
717 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_rcv_close_conn_rpl()
718 cxgbi_sock_get(csk); in cxgbi_sock_rcv_close_conn_rpl()
719 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_close_conn_rpl()
721 csk->snd_una = snd_nxt - 1; in cxgbi_sock_rcv_close_conn_rpl()
722 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) in cxgbi_sock_rcv_close_conn_rpl()
725 switch (csk->state) { in cxgbi_sock_rcv_close_conn_rpl()
727 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); in cxgbi_sock_rcv_close_conn_rpl()
731 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_close_conn_rpl()
737 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_close_conn_rpl()
740 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_close_conn_rpl()
741 cxgbi_sock_put(csk); in cxgbi_sock_rcv_close_conn_rpl()
745 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, in cxgbi_sock_rcv_wr_ack() argument
750 csk, csk->state, csk->flags, csk->tid, credits, in cxgbi_sock_rcv_wr_ack()
751 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); in cxgbi_sock_rcv_wr_ack()
753 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_wr_ack()
755 csk->wr_cred += credits; in cxgbi_sock_rcv_wr_ack()
756 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) in cxgbi_sock_rcv_wr_ack()
757 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; in cxgbi_sock_rcv_wr_ack()
760 struct sk_buff *p = cxgbi_sock_peek_wr(csk); in cxgbi_sock_rcv_wr_ack()
764 csk, csk->state, csk->flags, csk->tid, credits, in cxgbi_sock_rcv_wr_ack()
765 csk->wr_cred, csk->wr_una_cred); in cxgbi_sock_rcv_wr_ack()
771 csk, csk->state, csk->flags, csk->tid, in cxgbi_sock_rcv_wr_ack()
772 credits, csk->wr_cred, csk->wr_una_cred, in cxgbi_sock_rcv_wr_ack()
777 cxgbi_sock_dequeue_wr(csk); in cxgbi_sock_rcv_wr_ack()
783 cxgbi_sock_check_wr_invariants(csk); in cxgbi_sock_rcv_wr_ack()
786 if (unlikely(before(snd_una, csk->snd_una))) { in cxgbi_sock_rcv_wr_ack()
788 csk, csk->state, csk->flags, csk->tid, snd_una, in cxgbi_sock_rcv_wr_ack()
789 csk->snd_una); in cxgbi_sock_rcv_wr_ack()
793 if (csk->snd_una != snd_una) { in cxgbi_sock_rcv_wr_ack()
794 csk->snd_una = snd_una; in cxgbi_sock_rcv_wr_ack()
795 dst_confirm(csk->dst); in cxgbi_sock_rcv_wr_ack()
799 if (skb_queue_len(&csk->write_queue)) { in cxgbi_sock_rcv_wr_ack()
800 if (csk->cdev->csk_push_tx_frames(csk, 0)) in cxgbi_sock_rcv_wr_ack()
801 cxgbi_conn_tx_open(csk); in cxgbi_sock_rcv_wr_ack()
803 cxgbi_conn_tx_open(csk); in cxgbi_sock_rcv_wr_ack()
805 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_wr_ack()
809 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, in cxgbi_sock_find_best_mtu() argument
814 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) in cxgbi_sock_find_best_mtu()
820 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) in cxgbi_sock_select_mss() argument
823 struct dst_entry *dst = csk->dst; in cxgbi_sock_select_mss()
825 csk->advmss = dst_metric_advmss(dst); in cxgbi_sock_select_mss()
827 if (csk->advmss > pmtu - 40) in cxgbi_sock_select_mss()
828 csk->advmss = pmtu - 40; in cxgbi_sock_select_mss()
829 if (csk->advmss < csk->cdev->mtus[0] - 40) in cxgbi_sock_select_mss()
830 csk->advmss = csk->cdev->mtus[0] - 40; in cxgbi_sock_select_mss()
831 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); in cxgbi_sock_select_mss()
837 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) in cxgbi_sock_skb_entail() argument
839 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; in cxgbi_sock_skb_entail()
840 __skb_queue_tail(&csk->write_queue, skb); in cxgbi_sock_skb_entail()
844 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) in cxgbi_sock_purge_wr_queue() argument
848 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) in cxgbi_sock_purge_wr_queue()
853 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) in cxgbi_sock_check_wr_invariants() argument
855 int pending = cxgbi_sock_count_pending_wrs(csk); in cxgbi_sock_check_wr_invariants()
857 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) in cxgbi_sock_check_wr_invariants()
859 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); in cxgbi_sock_check_wr_invariants()
863 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) in cxgbi_sock_send_pdus() argument
865 struct cxgbi_device *cdev = csk->cdev; in cxgbi_sock_send_pdus()
869 spin_lock_bh(&csk->lock); in cxgbi_sock_send_pdus()
871 if (csk->state != CTP_ESTABLISHED) { in cxgbi_sock_send_pdus()
874 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_send_pdus()
879 if (csk->err) { in cxgbi_sock_send_pdus()
882 csk, csk->state, csk->flags, csk->tid, csk->err); in cxgbi_sock_send_pdus()
887 if (csk->write_seq - csk->snd_una >= cdev->snd_win) { in cxgbi_sock_send_pdus()
890 csk, csk->state, csk->flags, csk->tid, csk->write_seq, in cxgbi_sock_send_pdus()
891 csk->snd_una, cdev->snd_win); in cxgbi_sock_send_pdus()
902 csk, skb_headroom(skb), cdev->skb_tx_rsvd); in cxgbi_sock_send_pdus()
909 csk, skb_shinfo(skb)->nr_frags, skb->len, in cxgbi_sock_send_pdus()
918 cxgbi_sock_skb_entail(csk, skb); in cxgbi_sock_send_pdus()
920 csk->write_seq += skb->len + in cxgbi_sock_send_pdus()
925 if (likely(skb_queue_len(&csk->write_queue))) in cxgbi_sock_send_pdus()
926 cdev->csk_push_tx_frames(csk, 1); in cxgbi_sock_send_pdus()
927 spin_unlock_bh(&csk->lock); in cxgbi_sock_send_pdus()
932 copied = csk->err ? csk->err : -EPIPE; in cxgbi_sock_send_pdus()
1243 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, in ddp_tag_reserve() argument
1247 struct cxgbi_device *cdev = csk->cdev; in ddp_tag_reserve()
1286 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); in ddp_tag_reserve()
1303 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, in cxgbi_ddp_reserve() argument
1307 struct cxgbi_device *cdev = csk->cdev; in cxgbi_ddp_reserve()
1329 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); in cxgbi_ddp_reserve()
1474 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, in task_reserve_itt()
1482 cconn->cep->csk, task, scsi_in(sc)->length, in task_reserve_itt()
1518 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) in cxgbi_conn_tx_open() argument
1520 struct iscsi_conn *conn = csk->user_data; in cxgbi_conn_tx_open()
1524 "csk 0x%p, cid %d.\n", csk, conn->id); in cxgbi_conn_tx_open()
1636 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) in csk_return_rx_credits() argument
1638 struct cxgbi_device *cdev = csk->cdev; in csk_return_rx_credits()
1644 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, in csk_return_rx_credits()
1645 csk->rcv_wup, cdev->rx_credit_thres, in csk_return_rx_credits()
1648 if (csk->state != CTP_ESTABLISHED) in csk_return_rx_credits()
1651 credits = csk->copied_seq - csk->rcv_wup; in csk_return_rx_credits()
1659 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); in csk_return_rx_credits()
1662 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) in cxgbi_conn_pdu_ready() argument
1664 struct cxgbi_device *cdev = csk->cdev; in cxgbi_conn_pdu_ready()
1665 struct iscsi_conn *conn = csk->user_data; in cxgbi_conn_pdu_ready()
1671 "csk 0x%p, conn 0x%p.\n", csk, conn); in cxgbi_conn_pdu_ready()
1676 csk, conn, conn ? conn->id : 0xFF, in cxgbi_conn_pdu_ready()
1682 skb = skb_peek(&csk->receive_queue); in cxgbi_conn_pdu_ready()
1691 __skb_unlink(skb, &csk->receive_queue); in cxgbi_conn_pdu_ready()
1696 csk, skb, skb->len, cxgbi_skcb_flags(skb), in cxgbi_conn_pdu_ready()
1704 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1714 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1722 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1731 dskb = skb_peek(&csk->receive_queue); in cxgbi_conn_pdu_ready()
1735 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1741 __skb_unlink(dskb, &csk->receive_queue); in cxgbi_conn_pdu_ready()
1748 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1763 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); in cxgbi_conn_pdu_ready()
1765 csk->copied_seq += read; in cxgbi_conn_pdu_ready()
1766 csk_return_rx_credits(csk, read); in cxgbi_conn_pdu_ready()
1772 csk, conn, err, read); in cxgbi_conn_pdu_ready()
1871 struct cxgbi_sock *csk = cconn->cep->csk; in cxgbi_conn_alloc_pdu() local
1872 struct net_device *ndev = cdev->ports[csk->port_id]; in cxgbi_conn_alloc_pdu()
2028 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); in cxgbi_conn_xmit_pdu()
2152 struct cxgbi_sock *csk = cconn->cep->csk; in cxgbi_set_conn_param() local
2163 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, in cxgbi_set_conn_param()
2170 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, in cxgbi_set_conn_param()
2197 struct cxgbi_sock *csk; in cxgbi_get_ep_param() local
2209 csk = cep->csk; in cxgbi_get_ep_param()
2210 if (!csk) in cxgbi_get_ep_param()
2214 &csk->daddr, param, buf); in cxgbi_get_ep_param()
2256 struct cxgbi_sock *csk; in cxgbi_bind_conn() local
2265 csk = cep->csk; in cxgbi_bind_conn()
2266 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); in cxgbi_bind_conn()
2277 write_lock_bh(&csk->callback_lock); in cxgbi_bind_conn()
2278 csk->user_data = conn; in cxgbi_bind_conn()
2282 write_unlock_bh(&csk->callback_lock); in cxgbi_bind_conn()
2289 cls_session, cls_conn, ep, cconn, csk); in cxgbi_bind_conn()
2431 struct cxgbi_sock *csk; in cxgbi_ep_connect() local
2446 csk = cxgbi_check_route(dst_addr); in cxgbi_ep_connect()
2447 if (IS_ERR(csk)) in cxgbi_ep_connect()
2448 return (struct iscsi_endpoint *)csk; in cxgbi_ep_connect()
2449 cxgbi_sock_get(csk); in cxgbi_ep_connect()
2452 hba = csk->cdev->hbas[csk->port_id]; in cxgbi_ep_connect()
2453 else if (hba != csk->cdev->hbas[csk->port_id]) { in cxgbi_ep_connect()
2457 csk->cdev->hbas[csk->port_id], csk->port_id); in cxgbi_ep_connect()
2462 err = sock_get_port(csk); in cxgbi_ep_connect()
2466 cxgbi_sock_set_state(csk, CTP_CONNECTING); in cxgbi_ep_connect()
2467 err = csk->cdev->csk_init_act_open(csk); in cxgbi_ep_connect()
2471 if (cxgbi_sock_is_closing(csk)) { in cxgbi_ep_connect()
2473 pr_info("csk 0x%p is closing.\n", csk); in cxgbi_ep_connect()
2485 cep->csk = csk; in cxgbi_ep_connect()
2490 ep, cep, csk, hba, hba->ndev->name); in cxgbi_ep_connect()
2494 cxgbi_sock_put(csk); in cxgbi_ep_connect()
2495 cxgbi_sock_closed(csk); in cxgbi_ep_connect()
2504 struct cxgbi_sock *csk = cep->csk; in cxgbi_ep_poll() local
2506 if (!cxgbi_sock_is_established(csk)) in cxgbi_ep_poll()
2516 struct cxgbi_sock *csk = cep->csk; in cxgbi_ep_disconnect() local
2520 ep, cep, cconn, csk, csk->state, csk->flags); in cxgbi_ep_disconnect()
2524 write_lock_bh(&csk->callback_lock); in cxgbi_ep_disconnect()
2525 cep->csk->user_data = NULL; in cxgbi_ep_disconnect()
2527 write_unlock_bh(&csk->callback_lock); in cxgbi_ep_disconnect()
2531 if (likely(csk->state >= CTP_ESTABLISHED)) in cxgbi_ep_disconnect()
2532 need_active_close(csk); in cxgbi_ep_disconnect()
2534 cxgbi_sock_closed(csk); in cxgbi_ep_disconnect()
2536 cxgbi_sock_put(csk); in cxgbi_ep_disconnect()