Lines Matching refs:csk

177 				const struct cxgbi_sock *csk)  in set_queue()  argument
197 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
201 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
204 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
205 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
209 MSS_IDX(csk->mss_idx) | in send_act_open_req()
210 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
211 TX_CHAN(csk->tx_chan) | in send_act_open_req()
212 SMAC_SEL(csk->smac_idx) | in send_act_open_req()
218 RSS_QUEUE(csk->rss_qid); in send_act_open_req()
220 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req()
226 req->local_port = csk->saddr.sin_port; in send_act_open_req()
227 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
228 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
229 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
236 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
238 csk->atid, csk->rss_qid); in send_act_open_req()
240 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req()
243 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
245 struct sk_buff *skb = csk->cpl_close; in send_close_req()
247 unsigned int tid = csk->tid; in send_close_req()
251 csk, csk->state, csk->flags, csk->tid); in send_close_req()
252 csk->cpl_close = NULL; in send_close_req()
253 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_close_req()
258 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
259 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
260 push_tx_frames(csk, 1); in send_close_req()
265 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; in abort_arp_failure() local
270 csk, csk->state, csk->flags, csk->tid); in abort_arp_failure()
273 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in abort_arp_failure()
276 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
279 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
281 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) in send_abort_req()
283 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
284 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
285 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
287 csk->cpl_abort_req = NULL; in send_abort_req()
289 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_abort_req()
291 t4_set_arp_err_handler(skb, csk, abort_arp_failure); in send_abort_req()
292 INIT_TP_WR(req, csk->tid); in send_abort_req()
293 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
294 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
295 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
299 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
302 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_abort_req()
305 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
307 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
312 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
314 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
315 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_abort_rpl()
316 INIT_TP_WR(rpl, csk->tid); in send_abort_rpl()
317 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
319 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_abort_rpl()
327 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
334 csk, csk->state, csk->flags, csk->tid, credits); in send_rx_credits()
338 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
343 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); in send_rx_credits()
344 INIT_TP_WR(req, csk->tid); in send_rx_credits()
346 csk->tid)); in send_rx_credits()
348 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_rx_credits()
385 static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) in send_tx_flowc_wr() argument
398 FW_WR_FLOWID(csk->tid)); in send_tx_flowc_wr()
400 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); in send_tx_flowc_wr()
402 flowc->mnemval[1].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
404 flowc->mnemval[2].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
406 flowc->mnemval[3].val = htonl(csk->rss_qid); in send_tx_flowc_wr()
408 flowc->mnemval[4].val = htonl(csk->snd_nxt); in send_tx_flowc_wr()
410 flowc->mnemval[5].val = htonl(csk->rcv_nxt); in send_tx_flowc_wr()
414 flowc->mnemval[7].val = htonl(csk->advmss); in send_tx_flowc_wr()
422 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_tx_flowc_wr()
426 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, in send_tx_flowc_wr()
427 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, in send_tx_flowc_wr()
428 csk->advmss); in send_tx_flowc_wr()
430 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_tx_flowc_wr()
433 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
446 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | in make_tx_data_wr()
454 cpu_to_be32(FW_WR_FLOWID(csk->tid) | in make_tx_data_wr()
461 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1); in make_tx_data_wr()
463 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) in make_tx_data_wr()
464 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in make_tx_data_wr()
472 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
477 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
478 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
482 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
486 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
500 if (csk->wr_cred < credits_needed) { in push_tx_frames()
503 csk, skb->len, skb->data_len, in push_tx_frames()
504 credits_needed, csk->wr_cred); in push_tx_frames()
507 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
508 set_queue(skb, CPL_PRIORITY_DATA, csk); in push_tx_frames()
510 csk->wr_cred -= credits_needed; in push_tx_frames()
511 csk->wr_una_cred += credits_needed; in push_tx_frames()
512 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
516 csk, skb->len, skb->data_len, credits_needed, in push_tx_frames()
517 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
520 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in push_tx_frames()
521 send_tx_flowc_wr(csk); in push_tx_frames()
523 csk->wr_cred -= 5; in push_tx_frames()
524 csk->wr_una_cred += 5; in push_tx_frames()
527 make_tx_data_wr(csk, skb, dlen, len, credits_needed, in push_tx_frames()
529 csk->snd_nxt += len; in push_tx_frames()
533 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); in push_tx_frames()
537 csk, csk->state, csk->flags, csk->tid, skb, len); in push_tx_frames()
539 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in push_tx_frames()
544 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
546 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid()
548 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
549 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid()
550 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
551 cxgbi_sock_put(csk); in free_atid()
557 struct cxgbi_sock *csk; in do_act_establish() local
566 csk = lookup_atid(t, atid); in do_act_establish()
567 if (unlikely(!csk)) { in do_act_establish()
572 if (csk->atid != atid) { in do_act_establish()
574 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); in do_act_establish()
580 csk, csk->state, csk->flags, tid, atid, rcv_isn); in do_act_establish()
582 cxgbi_sock_get(csk); in do_act_establish()
583 csk->tid = tid; in do_act_establish()
584 cxgb4_insert_tid(lldi->tids, csk, tid); in do_act_establish()
585 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
587 free_atid(csk); in do_act_establish()
589 spin_lock_bh(&csk->lock); in do_act_establish()
590 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
592 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
594 if (csk->retry_timer.function) { in do_act_establish()
595 del_timer(&csk->retry_timer); in do_act_establish()
596 csk->retry_timer.function = NULL; in do_act_establish()
599 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
605 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); in do_act_establish()
607 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; in do_act_establish()
609 csk->advmss -= 12; in do_act_establish()
610 if (csk->advmss < 128) in do_act_establish()
611 csk->advmss = 128; in do_act_establish()
615 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); in do_act_establish()
617 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
619 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
620 send_abort_req(csk); in do_act_establish()
622 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
623 push_tx_frames(csk, 0); in do_act_establish()
624 cxgbi_conn_tx_open(csk); in do_act_establish()
626 spin_unlock_bh(&csk->lock); in do_act_establish()
653 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; in csk_act_open_retry_timer() local
657 csk, csk->state, csk->flags, csk->tid); in csk_act_open_retry_timer()
659 cxgbi_sock_get(csk); in csk_act_open_retry_timer()
660 spin_lock_bh(&csk->lock); in csk_act_open_retry_timer()
663 cxgbi_sock_fail_act_open(csk, -ENOMEM); in csk_act_open_retry_timer()
665 skb->sk = (struct sock *)csk; in csk_act_open_retry_timer()
666 t4_set_arp_err_handler(skb, csk, in csk_act_open_retry_timer()
668 send_act_open_req(csk, skb, csk->l2t); in csk_act_open_retry_timer()
670 spin_unlock_bh(&csk->lock); in csk_act_open_retry_timer()
671 cxgbi_sock_put(csk); in csk_act_open_retry_timer()
676 struct cxgbi_sock *csk; in do_act_open_rpl() local
685 csk = lookup_atid(t, atid); in do_act_open_rpl()
686 if (unlikely(!csk)) { in do_act_open_rpl()
692 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in do_act_open_rpl()
693 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), in do_act_open_rpl()
694 atid, tid, status, csk, csk->state, csk->flags); in do_act_open_rpl()
702 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); in do_act_open_rpl()
704 cxgbi_sock_get(csk); in do_act_open_rpl()
705 spin_lock_bh(&csk->lock); in do_act_open_rpl()
708 csk->retry_timer.function != csk_act_open_retry_timer) { in do_act_open_rpl()
709 csk->retry_timer.function = csk_act_open_retry_timer; in do_act_open_rpl()
710 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
712 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
715 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
716 cxgbi_sock_put(csk); in do_act_open_rpl()
723 struct cxgbi_sock *csk; in do_peer_close() local
729 csk = lookup_tid(t, tid); in do_peer_close()
730 if (unlikely(!csk)) { in do_peer_close()
736 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
737 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
744 struct cxgbi_sock *csk; in do_close_con_rpl() local
750 csk = lookup_tid(t, tid); in do_close_con_rpl()
751 if (unlikely(!csk)) { in do_close_con_rpl()
757 csk, csk->state, csk->flags, csk->tid); in do_close_con_rpl()
758 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
763 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
769 return csk->state > CTP_ESTABLISHED ? in abort_status_to_errno()
783 struct cxgbi_sock *csk; in do_abort_req_rss() local
790 csk = lookup_tid(t, tid); in do_abort_req_rss()
791 if (unlikely(!csk)) { in do_abort_req_rss()
798 csk, csk->state, csk->flags, csk->tid, req->status); in do_abort_req_rss()
804 cxgbi_sock_get(csk); in do_abort_req_rss()
805 spin_lock_bh(&csk->lock); in do_abort_req_rss()
807 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { in do_abort_req_rss()
808 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
809 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req_rss()
813 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
814 send_abort_rpl(csk, rst_status); in do_abort_req_rss()
816 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req_rss()
817 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req_rss()
818 cxgbi_sock_closed(csk); in do_abort_req_rss()
821 spin_unlock_bh(&csk->lock); in do_abort_req_rss()
822 cxgbi_sock_put(csk); in do_abort_req_rss()
829 struct cxgbi_sock *csk; in do_abort_rpl_rss() local
835 csk = lookup_tid(t, tid); in do_abort_rpl_rss()
836 if (!csk) in do_abort_rpl_rss()
841 rpl->status, csk, csk ? csk->state : 0, in do_abort_rpl_rss()
842 csk ? csk->flags : 0UL); in do_abort_rpl_rss()
847 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl_rss()
854 struct cxgbi_sock *csk; in do_rx_iscsi_hdr() local
861 csk = lookup_tid(t, tid); in do_rx_iscsi_hdr()
862 if (unlikely(!csk)) { in do_rx_iscsi_hdr()
869 csk, csk->state, csk->flags, csk->tid, skb, skb->len, in do_rx_iscsi_hdr()
872 spin_lock_bh(&csk->lock); in do_rx_iscsi_hdr()
874 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_hdr()
877 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_hdr()
878 if (csk->state != CTP_ABORTING) in do_rx_iscsi_hdr()
891 if (!csk->skb_ulp_lhdr) { in do_rx_iscsi_hdr()
897 csk, csk->state, csk->flags, csk->tid, skb); in do_rx_iscsi_hdr()
898 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_hdr()
901 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { in do_rx_iscsi_hdr()
903 csk->tid, cxgbi_skcb_tcp_seq(skb), in do_rx_iscsi_hdr()
904 csk->rcv_nxt); in do_rx_iscsi_hdr()
915 csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40, in do_rx_iscsi_hdr()
922 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; in do_rx_iscsi_hdr()
923 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); in do_rx_iscsi_hdr()
927 csk, skb, *bhs, hlen, dlen, in do_rx_iscsi_hdr()
932 struct sk_buff *lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_hdr()
937 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_hdr()
940 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_hdr()
941 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
945 send_abort_req(csk); in do_rx_iscsi_hdr()
947 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
955 struct cxgbi_sock *csk; in do_rx_data_ddp() local
963 csk = lookup_tid(t, tid); in do_rx_data_ddp()
964 if (unlikely(!csk)) { in do_rx_data_ddp()
971 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); in do_rx_data_ddp()
973 spin_lock_bh(&csk->lock); in do_rx_data_ddp()
975 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_data_ddp()
978 csk, csk->state, csk->flags, csk->tid); in do_rx_data_ddp()
979 if (csk->state != CTP_ABORTING) in do_rx_data_ddp()
985 if (!csk->skb_ulp_lhdr) { in do_rx_data_ddp()
986 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); in do_rx_data_ddp()
990 lskb = csk->skb_ulp_lhdr; in do_rx_data_ddp()
991 csk->skb_ulp_lhdr = NULL; in do_rx_data_ddp()
997 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); in do_rx_data_ddp()
1001 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1006 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1012 csk, lskb, status); in do_rx_data_ddp()
1019 csk, lskb, status); in do_rx_data_ddp()
1024 csk, lskb, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1027 cxgbi_conn_pdu_ready(csk); in do_rx_data_ddp()
1028 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1032 send_abort_req(csk); in do_rx_data_ddp()
1034 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1041 struct cxgbi_sock *csk; in do_fw4_ack() local
1047 csk = lookup_tid(t, tid); in do_fw4_ack()
1048 if (unlikely(!csk)) in do_fw4_ack()
1053 csk, csk->state, csk->flags, csk->tid); in do_fw4_ack()
1054 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), in do_fw4_ack()
1066 struct cxgbi_sock *csk; in do_set_tcb_rpl() local
1068 csk = lookup_tid(t, tid); in do_set_tcb_rpl()
1069 if (!csk) in do_set_tcb_rpl()
1074 csk, csk->state, csk->flags, csk->tid, rpl->status); in do_set_tcb_rpl()
1078 csk, tid, rpl->status); in do_set_tcb_rpl()
1083 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
1085 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), in alloc_cpls()
1087 if (!csk->cpl_close) in alloc_cpls()
1090 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), in alloc_cpls()
1092 if (!csk->cpl_abort_req) in alloc_cpls()
1095 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), in alloc_cpls()
1097 if (!csk->cpl_abort_rpl) in alloc_cpls()
1102 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
1106 static inline void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
1108 if (csk->l2t) { in l2t_put()
1109 cxgb4_l2t_release(csk->l2t); in l2t_put()
1110 csk->l2t = NULL; in l2t_put()
1111 cxgbi_sock_put(csk); in l2t_put()
1115 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
1121 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
1123 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
1124 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
1125 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
1126 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
1129 l2t_put(csk); in release_offload_resources()
1130 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
1131 free_atid(csk); in release_offload_resources()
1132 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
1133 lldi = cxgbi_cdev_priv(csk->cdev); in release_offload_resources()
1134 cxgb4_remove_tid(lldi->tids, 0, csk->tid); in release_offload_resources()
1135 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
1136 cxgbi_sock_put(csk); in release_offload_resources()
1138 csk->dst = NULL; in release_offload_resources()
1139 csk->cdev = NULL; in release_offload_resources()
1142 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
1144 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
1146 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
1153 csk, csk->state, csk->flags, csk->tid); in init_act_open()
1155 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); in init_act_open()
1156 if (csk->atid < 0) { in init_act_open()
1160 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1161 cxgbi_sock_get(csk); in init_act_open()
1163 csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0); in init_act_open()
1164 if (!csk->l2t) { in init_act_open()
1168 cxgbi_sock_get(csk); in init_act_open()
1173 skb->sk = (struct sock *)csk; in init_act_open()
1174 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); in init_act_open()
1176 if (!csk->mtu) in init_act_open()
1177 csk->mtu = dst_mtu(csk->dst); in init_act_open()
1178 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); in init_act_open()
1179 csk->tx_chan = cxgb4_port_chan(ndev); in init_act_open()
1181 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; in init_act_open()
1183 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open()
1185 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; in init_act_open()
1186 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; in init_act_open()
1187 csk->wr_una_cred = 0; in init_act_open()
1188 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1189 csk->err = 0; in init_act_open()
1192 csk, pi->port_id, ndev->name, csk->tx_chan, in init_act_open()
1193 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, in init_act_open()
1194 csk->smac_idx); in init_act_open()
1196 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1197 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1304 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, in ddp_set_map() argument
1315 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, in ddp_set_map()
1340 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_pgidx() argument
1355 INIT_TP_WR(req, csk->tid); in ddp_setup_conn_pgidx()
1356 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in ddp_setup_conn_pgidx()
1357 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); in ddp_setup_conn_pgidx()
1361 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_pgidx()
1364 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); in ddp_setup_conn_pgidx()
1366 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_pgidx()
1370 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
1383 csk->hcrc_len = (hcrc ? 4 : 0); in ddp_setup_conn_digest()
1384 csk->dcrc_len = (dcrc ? 4 : 0); in ddp_setup_conn_digest()
1389 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); in ddp_setup_conn_digest()
1394 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_digest()
1397 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); in ddp_setup_conn_digest()
1399 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_digest()