Lines Matching refs:csk

172 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
174 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
183 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); in send_act_open_req()
184 req->local_port = csk->saddr.sin_port; in send_act_open_req()
185 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
186 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
187 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
190 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | in send_act_open_req()
197 csk, csk->state, csk->flags, csk->atid, in send_act_open_req()
200 csk->mss_idx, e->idx, e->smt_idx); in send_act_open_req()
202 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_act_open_req()
216 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
218 struct sk_buff *skb = csk->cpl_close; in send_close_req()
220 unsigned int tid = csk->tid; in send_close_req()
224 csk, csk->state, csk->flags, csk->tid); in send_close_req()
226 csk->cpl_close = NULL; in send_close_req()
230 req->rsvd = htonl(csk->write_seq); in send_close_req()
232 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
233 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
234 push_tx_frames(csk, 1); in send_close_req()
255 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
257 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
260 if (unlikely(csk->state == CTP_ABORTING || !skb)) in send_abort_req()
262 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
263 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
265 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
267 csk->cpl_abort_req = NULL; in send_abort_req()
272 req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_req()
273 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
274 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
275 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
280 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
283 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_abort_req()
291 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
293 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
298 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
300 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
303 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_rpl()
304 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
306 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_abort_rpl()
314 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
322 csk, csk->state, csk->flags, csk->tid, credits, dack); in send_rx_credits()
326 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
331 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); in send_rx_credits()
335 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_rx_credits()
367 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
371 struct l2t_entry *l2t = csk->l2t; in make_tx_data_wr()
377 req->wr_lo = htonl(V_WR_TID(csk->tid)); in make_tx_data_wr()
382 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); in make_tx_data_wr()
383 req->sndseq = htonl(csk->snd_nxt); in make_tx_data_wr()
386 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in make_tx_data_wr()
388 V_TX_CPU_IDX(csk->rss_qid)); in make_tx_data_wr()
391 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in make_tx_data_wr()
411 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
416 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
417 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
420 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
424 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
434 if (csk->wr_cred < wrs_needed) { in push_tx_frames()
437 csk, skb->len, skb->data_len, frags, in push_tx_frames()
438 wrs_needed, csk->wr_cred); in push_tx_frames()
442 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
445 csk->wr_cred -= wrs_needed; in push_tx_frames()
446 csk->wr_una_cred += wrs_needed; in push_tx_frames()
447 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
452 csk, skb->len, skb->data_len, frags, skb->csum, in push_tx_frames()
453 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
457 csk->wr_una_cred == wrs_needed) || in push_tx_frames()
458 csk->wr_una_cred >= csk->wr_max_cred / 2) { in push_tx_frames()
460 csk->wr_una_cred = 0; in push_tx_frames()
463 make_tx_data_wr(csk, skb, len, req_completion); in push_tx_frames()
464 csk->snd_nxt += len; in push_tx_frames()
470 csk, csk->tid, skb); in push_tx_frames()
472 l2t_send(csk->cdev->lldev, skb, csk->l2t); in push_tx_frames()
483 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
485 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
486 cxgb3_free_atid(csk->cdev->lldev, csk->atid); in free_atid()
487 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
488 cxgbi_sock_put(csk); in free_atid()
494 struct cxgbi_sock *csk = ctx; in do_act_establish() local
502 atid, atid, csk, csk->state, csk->flags, rcv_isn); in do_act_establish()
504 cxgbi_sock_get(csk); in do_act_establish()
505 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
506 csk->tid = tid; in do_act_establish()
507 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); in do_act_establish()
509 free_atid(csk); in do_act_establish()
511 csk->rss_qid = G_QNUM(ntohs(skb->csum)); in do_act_establish()
513 spin_lock_bh(&csk->lock); in do_act_establish()
514 if (csk->retry_timer.function) { in do_act_establish()
515 del_timer(&csk->retry_timer); in do_act_establish()
516 csk->retry_timer.function = NULL; in do_act_establish()
519 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
521 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
523 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
525 csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); in do_act_establish()
527 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
529 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
531 send_abort_req(csk); in do_act_establish()
533 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
534 push_tx_frames(csk, 1); in do_act_establish()
535 cxgbi_conn_tx_open(csk); in do_act_establish()
538 spin_unlock_bh(&csk->lock); in do_act_establish()
568 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; in act_open_retry_timer() local
572 csk, csk->state, csk->flags, csk->tid); in act_open_retry_timer()
574 cxgbi_sock_get(csk); in act_open_retry_timer()
575 spin_lock_bh(&csk->lock); in act_open_retry_timer()
578 cxgbi_sock_fail_act_open(csk, -ENOMEM); in act_open_retry_timer()
580 skb->sk = (struct sock *)csk; in act_open_retry_timer()
582 send_act_open_req(csk, skb, csk->l2t); in act_open_retry_timer()
584 spin_unlock_bh(&csk->lock); in act_open_retry_timer()
585 cxgbi_sock_put(csk); in act_open_retry_timer()
590 struct cxgbi_sock *csk = ctx; in do_act_open_rpl() local
594 csk, csk->state, csk->flags, csk->atid, rpl->status, in do_act_open_rpl()
595 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in do_act_open_rpl()
596 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); in do_act_open_rpl()
603 cxgbi_sock_get(csk); in do_act_open_rpl()
604 spin_lock_bh(&csk->lock); in do_act_open_rpl()
606 csk->retry_timer.function != act_open_retry_timer) { in do_act_open_rpl()
607 csk->retry_timer.function = act_open_retry_timer; in do_act_open_rpl()
608 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
610 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
613 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
614 cxgbi_sock_put(csk); in do_act_open_rpl()
625 struct cxgbi_sock *csk = ctx; in do_peer_close() local
629 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
631 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
643 struct cxgbi_sock *csk = ctx; in do_close_con_rpl() local
648 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
650 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
661 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
667 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; in abort_status_to_errno()
681 struct cxgbi_sock *csk = ctx; in do_abort_req() local
686 csk, csk->state, csk->flags, csk->tid); in do_abort_req()
693 cxgbi_sock_get(csk); in do_abort_req()
694 spin_lock_bh(&csk->lock); in do_abort_req()
696 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { in do_abort_req()
697 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req()
698 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req()
702 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req()
703 send_abort_rpl(csk, rst_status); in do_abort_req()
705 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req()
706 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req()
707 cxgbi_sock_closed(csk); in do_abort_req()
711 spin_unlock_bh(&csk->lock); in do_abort_req()
712 cxgbi_sock_put(csk); in do_abort_req()
728 struct cxgbi_sock *csk = ctx; in do_abort_rpl() local
732 rpl->status, csk, csk ? csk->state : 0, in do_abort_rpl()
733 csk ? csk->flags : 0UL); in do_abort_rpl()
749 if (csk) in do_abort_rpl()
750 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl()
763 struct cxgbi_sock *csk = ctx; in do_iscsi_hdr() local
773 csk, csk->state, csk->flags, csk->tid, skb, skb->len); in do_iscsi_hdr()
775 spin_lock_bh(&csk->lock); in do_iscsi_hdr()
777 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_iscsi_hdr()
780 csk, csk->state, csk->flags, csk->tid); in do_iscsi_hdr()
781 if (csk->state != CTP_ABORTING) in do_iscsi_hdr()
797 csk->cdev->ports[csk->port_id]->name, csk->tid, in do_iscsi_hdr()
807 csk->cdev->ports[csk->port_id]->name, csk->tid, in do_iscsi_hdr()
819 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); in do_iscsi_hdr()
832 csk->cdev->ports[csk->port_id]->name, in do_iscsi_hdr()
833 csk->tid, sizeof(data_cpl), skb->len, err); in do_iscsi_hdr()
844 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); in do_iscsi_hdr()
846 __skb_queue_tail(&csk->receive_queue, skb); in do_iscsi_hdr()
847 cxgbi_conn_pdu_ready(csk); in do_iscsi_hdr()
849 spin_unlock_bh(&csk->lock); in do_iscsi_hdr()
853 send_abort_req(csk); in do_iscsi_hdr()
855 spin_unlock_bh(&csk->lock); in do_iscsi_hdr()
867 struct cxgbi_sock *csk = ctx; in do_wr_ack() local
872 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); in do_wr_ack()
874 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); in do_wr_ack()
883 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
885 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, in alloc_cpls()
887 if (!csk->cpl_close) in alloc_cpls()
889 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, in alloc_cpls()
891 if (!csk->cpl_abort_req) in alloc_cpls()
894 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, in alloc_cpls()
896 if (!csk->cpl_abort_rpl) in alloc_cpls()
902 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
911 static void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; in l2t_put()
915 if (csk->l2t) { in l2t_put()
916 l2t_release(L2DATA(t3dev), csk->l2t); in l2t_put()
917 csk->l2t = NULL; in l2t_put()
918 cxgbi_sock_put(csk); in l2t_put()
922 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
924 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; in release_offload_resources()
928 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
930 csk->rss_qid = 0; in release_offload_resources()
931 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
933 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
934 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
935 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
937 l2t_put(csk); in release_offload_resources()
938 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
939 free_atid(csk); in release_offload_resources()
940 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
941 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); in release_offload_resources()
942 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
943 cxgbi_sock_put(csk); in release_offload_resources()
945 csk->dst = NULL; in release_offload_resources()
946 csk->cdev = NULL; in release_offload_resources()
971 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
973 struct dst_entry *dst = csk->dst; in init_act_open()
974 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
976 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
977 struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; in init_act_open()
981 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); in init_act_open()
985 csk->saddr.sin_addr.s_addr = chba->ipv4addr; in init_act_open()
987 csk->rss_qid = 0; in init_act_open()
988 csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); in init_act_open()
989 if (!csk->l2t) { in init_act_open()
993 cxgbi_sock_get(csk); in init_act_open()
995 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); in init_act_open()
996 if (csk->atid < 0) { in init_act_open()
1000 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1001 cxgbi_sock_get(csk); in init_act_open()
1006 skb->sk = (struct sock *)csk; in init_act_open()
1009 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; in init_act_open()
1010 csk->wr_una_cred = 0; in init_act_open()
1011 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); in init_act_open()
1012 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1013 csk->err = 0; in init_act_open()
1017 csk, csk->state, csk->flags, in init_act_open()
1018 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in init_act_open()
1019 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); in init_act_open()
1021 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1022 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1098 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, in ddp_set_map() argument
1102 struct cxgbi_device *cdev = csk->cdev; in ddp_set_map()
1109 csk, idx, npods, gl); in ddp_set_map()
1155 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, in ddp_setup_conn_pgidx() argument
1164 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); in ddp_setup_conn_pgidx()
1179 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_pgidx()
1192 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
1201 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); in ddp_setup_conn_digest()
1216 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_digest()