Lines Matching refs:csk

158 				const struct cxgbi_sock *csk)  in set_queue()  argument
178 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
182 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
185 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
186 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
190 MSS_IDX(csk->mss_idx) | in send_act_open_req()
191 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
192 TX_CHAN(csk->tx_chan) | in send_act_open_req()
193 SMAC_SEL(csk->smac_idx) | in send_act_open_req()
199 RSS_QUEUE(csk->rss_qid); in send_act_open_req()
201 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req()
207 req->local_port = csk->saddr.sin_port; in send_act_open_req()
208 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
209 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
210 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
217 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
219 csk->atid, csk->rss_qid); in send_act_open_req()
221 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req()
224 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
226 struct sk_buff *skb = csk->cpl_close; in send_close_req()
228 unsigned int tid = csk->tid; in send_close_req()
232 csk, csk->state, csk->flags, csk->tid); in send_close_req()
233 csk->cpl_close = NULL; in send_close_req()
234 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_close_req()
239 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
240 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
241 push_tx_frames(csk, 1); in send_close_req()
246 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; in abort_arp_failure() local
251 csk, csk->state, csk->flags, csk->tid); in abort_arp_failure()
254 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in abort_arp_failure()
257 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
260 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
262 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) in send_abort_req()
264 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
265 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
266 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
268 csk->cpl_abort_req = NULL; in send_abort_req()
270 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_abort_req()
272 t4_set_arp_err_handler(skb, csk, abort_arp_failure); in send_abort_req()
273 INIT_TP_WR(req, csk->tid); in send_abort_req()
274 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
275 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
276 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
280 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
283 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_abort_req()
286 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
288 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
293 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
295 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
296 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_abort_rpl()
297 INIT_TP_WR(rpl, csk->tid); in send_abort_rpl()
298 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
300 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_abort_rpl()
308 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
315 csk, csk->state, csk->flags, csk->tid, credits); in send_rx_credits()
319 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
324 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); in send_rx_credits()
325 INIT_TP_WR(req, csk->tid); in send_rx_credits()
327 csk->tid)); in send_rx_credits()
329 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_rx_credits()
366 static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) in send_tx_flowc_wr() argument
379 FW_WR_FLOWID(csk->tid)); in send_tx_flowc_wr()
381 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); in send_tx_flowc_wr()
383 flowc->mnemval[1].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
385 flowc->mnemval[2].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
387 flowc->mnemval[3].val = htonl(csk->rss_qid); in send_tx_flowc_wr()
389 flowc->mnemval[4].val = htonl(csk->snd_nxt); in send_tx_flowc_wr()
391 flowc->mnemval[5].val = htonl(csk->rcv_nxt); in send_tx_flowc_wr()
395 flowc->mnemval[7].val = htonl(csk->advmss); in send_tx_flowc_wr()
403 set_queue(skb, CPL_PRIORITY_DATA, csk); in send_tx_flowc_wr()
407 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, in send_tx_flowc_wr()
408 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, in send_tx_flowc_wr()
409 csk->advmss); in send_tx_flowc_wr()
411 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_tx_flowc_wr()
414 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
427 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | in make_tx_data_wr()
435 cpu_to_be32(FW_WR_FLOWID(csk->tid) | in make_tx_data_wr()
442 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1); in make_tx_data_wr()
444 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) in make_tx_data_wr()
445 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in make_tx_data_wr()
453 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
458 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
459 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
463 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
467 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
481 if (csk->wr_cred < credits_needed) { in push_tx_frames()
484 csk, skb->len, skb->data_len, in push_tx_frames()
485 credits_needed, csk->wr_cred); in push_tx_frames()
488 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
489 set_queue(skb, CPL_PRIORITY_DATA, csk); in push_tx_frames()
491 csk->wr_cred -= credits_needed; in push_tx_frames()
492 csk->wr_una_cred += credits_needed; in push_tx_frames()
493 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
497 csk, skb->len, skb->data_len, credits_needed, in push_tx_frames()
498 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
501 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in push_tx_frames()
502 send_tx_flowc_wr(csk); in push_tx_frames()
504 csk->wr_cred -= 5; in push_tx_frames()
505 csk->wr_una_cred += 5; in push_tx_frames()
508 make_tx_data_wr(csk, skb, dlen, len, credits_needed, in push_tx_frames()
510 csk->snd_nxt += len; in push_tx_frames()
514 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); in push_tx_frames()
518 csk, csk->state, csk->flags, csk->tid, skb, len); in push_tx_frames()
520 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in push_tx_frames()
525 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
527 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid()
529 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
530 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid()
531 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
532 cxgbi_sock_put(csk); in free_atid()
538 struct cxgbi_sock *csk; in do_act_establish() local
547 csk = lookup_atid(t, atid); in do_act_establish()
548 if (unlikely(!csk)) { in do_act_establish()
553 if (csk->atid != atid) { in do_act_establish()
555 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); in do_act_establish()
561 csk, csk->state, csk->flags, tid, atid, rcv_isn); in do_act_establish()
563 cxgbi_sock_get(csk); in do_act_establish()
564 csk->tid = tid; in do_act_establish()
565 cxgb4_insert_tid(lldi->tids, csk, tid); in do_act_establish()
566 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
568 free_atid(csk); in do_act_establish()
570 spin_lock_bh(&csk->lock); in do_act_establish()
571 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
573 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
575 if (csk->retry_timer.function) { in do_act_establish()
576 del_timer(&csk->retry_timer); in do_act_establish()
577 csk->retry_timer.function = NULL; in do_act_establish()
580 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
586 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); in do_act_establish()
588 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; in do_act_establish()
590 csk->advmss -= 12; in do_act_establish()
591 if (csk->advmss < 128) in do_act_establish()
592 csk->advmss = 128; in do_act_establish()
596 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); in do_act_establish()
598 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
600 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
601 send_abort_req(csk); in do_act_establish()
603 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
604 push_tx_frames(csk, 0); in do_act_establish()
605 cxgbi_conn_tx_open(csk); in do_act_establish()
607 spin_unlock_bh(&csk->lock); in do_act_establish()
634 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; in csk_act_open_retry_timer() local
638 csk, csk->state, csk->flags, csk->tid); in csk_act_open_retry_timer()
640 cxgbi_sock_get(csk); in csk_act_open_retry_timer()
641 spin_lock_bh(&csk->lock); in csk_act_open_retry_timer()
644 cxgbi_sock_fail_act_open(csk, -ENOMEM); in csk_act_open_retry_timer()
646 skb->sk = (struct sock *)csk; in csk_act_open_retry_timer()
647 t4_set_arp_err_handler(skb, csk, in csk_act_open_retry_timer()
649 send_act_open_req(csk, skb, csk->l2t); in csk_act_open_retry_timer()
651 spin_unlock_bh(&csk->lock); in csk_act_open_retry_timer()
652 cxgbi_sock_put(csk); in csk_act_open_retry_timer()
657 struct cxgbi_sock *csk; in do_act_open_rpl() local
666 csk = lookup_atid(t, atid); in do_act_open_rpl()
667 if (unlikely(!csk)) { in do_act_open_rpl()
673 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in do_act_open_rpl()
674 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), in do_act_open_rpl()
675 atid, tid, status, csk, csk->state, csk->flags); in do_act_open_rpl()
683 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); in do_act_open_rpl()
685 cxgbi_sock_get(csk); in do_act_open_rpl()
686 spin_lock_bh(&csk->lock); in do_act_open_rpl()
689 csk->retry_timer.function != csk_act_open_retry_timer) { in do_act_open_rpl()
690 csk->retry_timer.function = csk_act_open_retry_timer; in do_act_open_rpl()
691 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
693 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
696 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
697 cxgbi_sock_put(csk); in do_act_open_rpl()
704 struct cxgbi_sock *csk; in do_peer_close() local
710 csk = lookup_tid(t, tid); in do_peer_close()
711 if (unlikely(!csk)) { in do_peer_close()
717 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
718 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
725 struct cxgbi_sock *csk; in do_close_con_rpl() local
731 csk = lookup_tid(t, tid); in do_close_con_rpl()
732 if (unlikely(!csk)) { in do_close_con_rpl()
738 csk, csk->state, csk->flags, csk->tid); in do_close_con_rpl()
739 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
744 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
750 return csk->state > CTP_ESTABLISHED ? in abort_status_to_errno()
764 struct cxgbi_sock *csk; in do_abort_req_rss() local
771 csk = lookup_tid(t, tid); in do_abort_req_rss()
772 if (unlikely(!csk)) { in do_abort_req_rss()
779 csk, csk->state, csk->flags, csk->tid, req->status); in do_abort_req_rss()
785 cxgbi_sock_get(csk); in do_abort_req_rss()
786 spin_lock_bh(&csk->lock); in do_abort_req_rss()
788 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { in do_abort_req_rss()
789 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
790 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req_rss()
794 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
795 send_abort_rpl(csk, rst_status); in do_abort_req_rss()
797 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req_rss()
798 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req_rss()
799 cxgbi_sock_closed(csk); in do_abort_req_rss()
802 spin_unlock_bh(&csk->lock); in do_abort_req_rss()
803 cxgbi_sock_put(csk); in do_abort_req_rss()
810 struct cxgbi_sock *csk; in do_abort_rpl_rss() local
816 csk = lookup_tid(t, tid); in do_abort_rpl_rss()
817 if (!csk) in do_abort_rpl_rss()
822 rpl->status, csk, csk ? csk->state : 0, in do_abort_rpl_rss()
823 csk ? csk->flags : 0UL); in do_abort_rpl_rss()
828 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl_rss()
835 struct cxgbi_sock *csk; in do_rx_iscsi_hdr() local
842 csk = lookup_tid(t, tid); in do_rx_iscsi_hdr()
843 if (unlikely(!csk)) { in do_rx_iscsi_hdr()
850 csk, csk->state, csk->flags, csk->tid, skb, skb->len, in do_rx_iscsi_hdr()
853 spin_lock_bh(&csk->lock); in do_rx_iscsi_hdr()
855 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_hdr()
858 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_hdr()
859 if (csk->state != CTP_ABORTING) in do_rx_iscsi_hdr()
872 if (!csk->skb_ulp_lhdr) { in do_rx_iscsi_hdr()
878 csk, csk->state, csk->flags, csk->tid, skb); in do_rx_iscsi_hdr()
879 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_hdr()
882 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { in do_rx_iscsi_hdr()
884 csk->tid, cxgbi_skcb_tcp_seq(skb), in do_rx_iscsi_hdr()
885 csk->rcv_nxt); in do_rx_iscsi_hdr()
896 csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40, in do_rx_iscsi_hdr()
903 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; in do_rx_iscsi_hdr()
904 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); in do_rx_iscsi_hdr()
908 csk, skb, *bhs, hlen, dlen, in do_rx_iscsi_hdr()
913 struct sk_buff *lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_hdr()
918 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_hdr()
921 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_hdr()
922 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
926 send_abort_req(csk); in do_rx_iscsi_hdr()
928 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
936 struct cxgbi_sock *csk; in do_rx_data_ddp() local
944 csk = lookup_tid(t, tid); in do_rx_data_ddp()
945 if (unlikely(!csk)) { in do_rx_data_ddp()
952 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); in do_rx_data_ddp()
954 spin_lock_bh(&csk->lock); in do_rx_data_ddp()
956 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_data_ddp()
959 csk, csk->state, csk->flags, csk->tid); in do_rx_data_ddp()
960 if (csk->state != CTP_ABORTING) in do_rx_data_ddp()
966 if (!csk->skb_ulp_lhdr) { in do_rx_data_ddp()
967 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); in do_rx_data_ddp()
971 lskb = csk->skb_ulp_lhdr; in do_rx_data_ddp()
972 csk->skb_ulp_lhdr = NULL; in do_rx_data_ddp()
978 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); in do_rx_data_ddp()
982 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
987 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
993 csk, lskb, status); in do_rx_data_ddp()
1000 csk, lskb, status); in do_rx_data_ddp()
1005 csk, lskb, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1008 cxgbi_conn_pdu_ready(csk); in do_rx_data_ddp()
1009 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1013 send_abort_req(csk); in do_rx_data_ddp()
1015 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1022 struct cxgbi_sock *csk; in do_fw4_ack() local
1028 csk = lookup_tid(t, tid); in do_fw4_ack()
1029 if (unlikely(!csk)) in do_fw4_ack()
1034 csk, csk->state, csk->flags, csk->tid); in do_fw4_ack()
1035 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), in do_fw4_ack()
1047 struct cxgbi_sock *csk; in do_set_tcb_rpl() local
1049 csk = lookup_tid(t, tid); in do_set_tcb_rpl()
1050 if (!csk) in do_set_tcb_rpl()
1055 csk, csk->state, csk->flags, csk->tid, rpl->status); in do_set_tcb_rpl()
1059 csk, tid, rpl->status); in do_set_tcb_rpl()
1064 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
1066 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), in alloc_cpls()
1068 if (!csk->cpl_close) in alloc_cpls()
1071 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), in alloc_cpls()
1073 if (!csk->cpl_abort_req) in alloc_cpls()
1076 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), in alloc_cpls()
1078 if (!csk->cpl_abort_rpl) in alloc_cpls()
1083 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
1087 static inline void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
1089 if (csk->l2t) { in l2t_put()
1090 cxgb4_l2t_release(csk->l2t); in l2t_put()
1091 csk->l2t = NULL; in l2t_put()
1092 cxgbi_sock_put(csk); in l2t_put()
1096 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
1102 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
1104 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
1105 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
1106 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
1107 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
1110 l2t_put(csk); in release_offload_resources()
1111 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
1112 free_atid(csk); in release_offload_resources()
1113 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
1114 lldi = cxgbi_cdev_priv(csk->cdev); in release_offload_resources()
1115 cxgb4_remove_tid(lldi->tids, 0, csk->tid); in release_offload_resources()
1116 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
1117 cxgbi_sock_put(csk); in release_offload_resources()
1119 csk->dst = NULL; in release_offload_resources()
1120 csk->cdev = NULL; in release_offload_resources()
1123 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
1125 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
1127 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
1135 csk, csk->state, csk->flags, csk->tid); in init_act_open()
1137 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); in init_act_open()
1138 if (csk->atid < 0) { in init_act_open()
1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1143 cxgbi_sock_get(csk); in init_act_open()
1145 n = dst_get_neighbour_noref(csk->dst); in init_act_open()
1150 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); in init_act_open()
1151 if (!csk->l2t) { in init_act_open()
1155 cxgbi_sock_get(csk); in init_act_open()
1160 skb->sk = (struct sock *)csk; in init_act_open()
1161 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); in init_act_open()
1163 if (!csk->mtu) in init_act_open()
1164 csk->mtu = dst_mtu(csk->dst); in init_act_open()
1165 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); in init_act_open()
1166 csk->tx_chan = cxgb4_port_chan(ndev); in init_act_open()
1168 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; in init_act_open()
1170 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open()
1172 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; in init_act_open()
1173 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; in init_act_open()
1174 csk->wr_una_cred = 0; in init_act_open()
1175 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1176 csk->err = 0; in init_act_open()
1179 csk, pi->port_id, ndev->name, csk->tx_chan, in init_act_open()
1180 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, in init_act_open()
1181 csk->smac_idx); in init_act_open()
1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1184 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1291 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, in ddp_set_map() argument
1302 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, in ddp_set_map()
1327 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_pgidx() argument
1342 INIT_TP_WR(req, csk->tid); in ddp_setup_conn_pgidx()
1343 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in ddp_setup_conn_pgidx()
1344 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); in ddp_setup_conn_pgidx()
1348 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_pgidx()
1351 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); in ddp_setup_conn_pgidx()
1353 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_pgidx()
1357 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
1370 csk->hcrc_len = (hcrc ? 4 : 0); in ddp_setup_conn_digest()
1371 csk->dcrc_len = (dcrc ? 4 : 0); in ddp_setup_conn_digest()
1376 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); in ddp_setup_conn_digest()
1381 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_digest()
1384 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); in ddp_setup_conn_digest()
1386 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_digest()