Lines Matching refs:iucv

165 	struct iucv_sock *iucv = iucv_sk(sk);  in iucv_below_msglim()  local
169 if (iucv->transport == AF_IUCV_TRANS_IUCV) in iucv_below_msglim()
170 return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); in iucv_below_msglim()
172 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && in iucv_below_msglim()
173 (atomic_read(&iucv->pendings) <= 0)); in iucv_below_msglim()
197 struct iucv_sock *iucv = iucv_sk(sock); in afiucv_hs_send() local
209 phs_hdr->window = iucv->msglimit; in afiucv_hs_send()
211 confirm_recv = atomic_read(&iucv->msg_recv); in afiucv_hs_send()
216 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); in afiucv_hs_send()
217 memcpy(phs_hdr->destAppName, iucv->dst_name, 8); in afiucv_hs_send()
218 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); in afiucv_hs_send()
219 memcpy(phs_hdr->srcAppName, iucv->src_name, 8); in afiucv_hs_send()
227 skb->dev = iucv->hs_dev; in afiucv_hs_send()
250 atomic_inc(&iucv->skbs_in_xmit); in afiucv_hs_send()
253 atomic_dec(&iucv->skbs_in_xmit); in afiucv_hs_send()
255 atomic_sub(confirm_recv, &iucv->msg_recv); in afiucv_hs_send()
256 WARN_ON(atomic_read(&iucv->msg_recv) < 0); in afiucv_hs_send()
335 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path() local
336 struct iucv_path *path = iucv->path; in iucv_sever_path()
338 if (iucv->path) { in iucv_sever_path()
339 iucv->path = NULL; in iucv_sever_path()
341 low_nmcpy(user_data, iucv->src_name); in iucv_sever_path()
342 high_nmcpy(user_data, iucv->dst_name); in iucv_sever_path()
354 struct iucv_sock *iucv = iucv_sk(sk); in iucv_send_ctrl() local
361 LL_RESERVED_SPACE(iucv->hs_dev); in iucv_send_ctrl()
380 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close() local
392 if (iucv->transport == AF_IUCV_TRANS_HIPER) { in iucv_sock_close()
403 if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { in iucv_sock_close()
421 skb_queue_purge(&iucv->send_skb_q); in iucv_sock_close()
422 skb_queue_purge(&iucv->backlog_skb_q); in iucv_sock_close()
429 if (iucv->hs_dev) { in iucv_sock_close()
430 dev_put(iucv->hs_dev); in iucv_sock_close()
431 iucv->hs_dev = NULL; in iucv_sock_close()
452 struct iucv_sock *iucv; in iucv_sock_alloc() local
457 iucv = iucv_sk(sk); in iucv_sock_alloc()
460 INIT_LIST_HEAD(&iucv->accept_q); in iucv_sock_alloc()
461 spin_lock_init(&iucv->accept_q_lock); in iucv_sock_alloc()
462 skb_queue_head_init(&iucv->send_skb_q); in iucv_sock_alloc()
463 INIT_LIST_HEAD(&iucv->message_q.list); in iucv_sock_alloc()
464 spin_lock_init(&iucv->message_q.lock); in iucv_sock_alloc()
465 skb_queue_head_init(&iucv->backlog_skb_q); in iucv_sock_alloc()
466 iucv->send_tag = 0; in iucv_sock_alloc()
467 atomic_set(&iucv->pendings, 0); in iucv_sock_alloc()
468 iucv->flags = 0; in iucv_sock_alloc()
469 iucv->msglimit = 0; in iucv_sock_alloc()
470 atomic_set(&iucv->skbs_in_xmit, 0); in iucv_sock_alloc()
471 atomic_set(&iucv->msg_sent, 0); in iucv_sock_alloc()
472 atomic_set(&iucv->msg_recv, 0); in iucv_sock_alloc()
473 iucv->path = NULL; in iucv_sock_alloc()
474 iucv->sk_txnotify = afiucv_hs_callback_txnotify; in iucv_sock_alloc()
475 memset(&iucv->init, 0, sizeof(iucv->init)); in iucv_sock_alloc()
477 iucv->transport = AF_IUCV_TRANS_IUCV; in iucv_sock_alloc()
479 iucv->transport = AF_IUCV_TRANS_HIPER; in iucv_sock_alloc()
551 static void __iucv_auto_name(struct iucv_sock *iucv) in __iucv_auto_name() argument
560 memcpy(iucv->src_name, name, 8); in __iucv_auto_name()
570 struct iucv_sock *iucv; in iucv_sock_bind() local
587 iucv = iucv_sk(sk); in iucv_sock_bind()
592 if (iucv->path) in iucv_sock_bind()
606 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); in iucv_sock_bind()
609 __iucv_auto_name(iucv); in iucv_sock_bind()
611 memcpy(iucv->src_name, sa->siucv_name, 8); in iucv_sock_bind()
613 iucv->hs_dev = dev; in iucv_sock_bind()
616 iucv->transport = AF_IUCV_TRANS_HIPER; in iucv_sock_bind()
617 if (!iucv->msglimit) in iucv_sock_bind()
618 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; in iucv_sock_bind()
627 memcpy(iucv->src_name, sa->siucv_name, 8); in iucv_sock_bind()
628 memcpy(iucv->src_user_id, iucv_userid, 8); in iucv_sock_bind()
630 iucv->transport = AF_IUCV_TRANS_IUCV; in iucv_sock_bind()
632 if (!iucv->msglimit) in iucv_sock_bind()
633 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; in iucv_sock_bind()
649 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind() local
655 memcpy(iucv->src_user_id, iucv_userid, 8); in iucv_sock_autobind()
656 iucv->transport = AF_IUCV_TRANS_IUCV; in iucv_sock_autobind()
660 __iucv_auto_name(iucv); in iucv_sock_autobind()
663 if (!iucv->msglimit) in iucv_sock_autobind()
664 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; in iucv_sock_autobind()
673 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect() local
678 low_nmcpy(user_data, iucv->src_name); in afiucv_path_connect()
682 iucv->path = iucv_path_alloc(iucv->msglimit, in afiucv_path_connect()
684 if (!iucv->path) { in afiucv_path_connect()
688 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, in afiucv_path_connect()
692 iucv_path_free(iucv->path); in afiucv_path_connect()
693 iucv->path = NULL; in afiucv_path_connect()
720 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect() local
730 iucv->transport == AF_IUCV_TRANS_HIPER) in iucv_sock_connect()
745 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); in iucv_sock_connect()
746 memcpy(iucv->dst_name, sa->siucv_name, 8); in iucv_sock_connect()
748 if (iucv->transport == AF_IUCV_TRANS_HIPER) in iucv_sock_connect()
763 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) in iucv_sock_connect()
856 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname() local
861 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); in iucv_sock_getname()
862 memcpy(siucv->siucv_name, iucv->dst_name, 8); in iucv_sock_getname()
864 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); in iucv_sock_getname()
865 memcpy(siucv->siucv_name, iucv->src_name, 8); in iucv_sock_getname()
902 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg() local
981 if (iucv->transport == AF_IUCV_TRANS_HIPER) { in iucv_sock_sendmsg()
983 LL_RESERVED_SPACE(iucv->hs_dev); in iucv_sock_sendmsg()
1023 txmsg.tag = iucv->send_tag++; in iucv_sock_sendmsg()
1026 if (iucv->transport == AF_IUCV_TRANS_HIPER) { in iucv_sock_sendmsg()
1027 atomic_inc(&iucv->msg_sent); in iucv_sock_sendmsg()
1030 atomic_dec(&iucv->msg_sent); in iucv_sock_sendmsg()
1034 skb_queue_tail(&iucv->send_skb_q, skb); in iucv_sock_sendmsg()
1035 atomic_inc(&iucv->skbs_in_xmit); in iucv_sock_sendmsg()
1037 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && in iucv_sock_sendmsg()
1039 err = iucv_send_iprm(iucv->path, &txmsg, skb); in iucv_sock_sendmsg()
1044 atomic_dec(&iucv->skbs_in_xmit); in iucv_sock_sendmsg()
1045 skb_unlink(skb, &iucv->send_skb_q); in iucv_sock_sendmsg()
1052 pr_iucv->path_sever(iucv->path, NULL); in iucv_sock_sendmsg()
1053 atomic_dec(&iucv->skbs_in_xmit); in iucv_sock_sendmsg()
1054 skb_unlink(skb, &iucv->send_skb_q); in iucv_sock_sendmsg()
1072 err = pr_iucv->message_send(iucv->path, &txmsg, in iucv_sock_sendmsg()
1076 err = pr_iucv->message_send(iucv->path, &txmsg, in iucv_sock_sendmsg()
1082 memcpy(user_id, iucv->dst_user_id, 8); in iucv_sock_sendmsg()
1084 memcpy(appl_id, iucv->dst_name, 8); in iucv_sock_sendmsg()
1093 atomic_dec(&iucv->skbs_in_xmit); in iucv_sock_sendmsg()
1094 skb_unlink(skb, &iucv->send_skb_q); in iucv_sock_sendmsg()
1205 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q() local
1209 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { in iucv_process_message_q()
1216 if (!skb_queue_empty(&iucv->backlog_skb_q)) in iucv_process_message_q()
1225 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg() local
1232 skb_queue_empty(&iucv->backlog_skb_q) && in iucv_sock_recvmsg()
1234 list_empty(&iucv->message_q.list)) in iucv_sock_recvmsg()
1295 if (iucv->transport == AF_IUCV_TRANS_HIPER) { in iucv_sock_recvmsg()
1296 atomic_inc(&iucv->msg_recv); in iucv_sock_recvmsg()
1297 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { in iucv_sock_recvmsg()
1305 spin_lock_bh(&iucv->message_q.lock); in iucv_sock_recvmsg()
1306 rskb = skb_dequeue(&iucv->backlog_skb_q); in iucv_sock_recvmsg()
1311 skb_queue_head(&iucv->backlog_skb_q, in iucv_sock_recvmsg()
1315 rskb = skb_dequeue(&iucv->backlog_skb_q); in iucv_sock_recvmsg()
1317 if (skb_queue_empty(&iucv->backlog_skb_q)) { in iucv_sock_recvmsg()
1318 if (!list_empty(&iucv->message_q.list)) in iucv_sock_recvmsg()
1320 if (atomic_read(&iucv->msg_recv) >= in iucv_sock_recvmsg()
1321 iucv->msglimit / 2) { in iucv_sock_recvmsg()
1329 spin_unlock_bh(&iucv->message_q.lock); in iucv_sock_recvmsg()
1397 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown() local
1420 if (iucv->transport == AF_IUCV_TRANS_IUCV) { in iucv_sock_shutdown()
1423 err = pr_iucv->message_send(iucv->path, &txmsg, in iucv_sock_shutdown()
1444 if ((iucv->transport == AF_IUCV_TRANS_IUCV) && in iucv_sock_shutdown()
1445 iucv->path) { in iucv_sock_shutdown()
1446 err = pr_iucv->path_quiesce(iucv->path, NULL); in iucv_sock_shutdown()
1482 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt() local
1501 iucv->flags |= IUCV_IPRMDATA; in iucv_sock_setsockopt()
1503 iucv->flags &= ~IUCV_IPRMDATA; in iucv_sock_setsockopt()
1512 iucv->msglimit = val; in iucv_sock_setsockopt()
1532 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt() local
1549 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; in iucv_sock_getsockopt()
1553 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ in iucv_sock_getsockopt()
1554 : iucv->msglimit; /* default */ in iucv_sock_getsockopt()
1560 val = (iucv->hs_dev) ? iucv->hs_dev->mtu - in iucv_sock_getsockopt()
1585 struct iucv_sock *iucv, *niucv; in iucv_callback_connreq() local
1592 iucv = NULL; in iucv_callback_connreq()
1601 iucv = iucv_sk(sk); in iucv_callback_connreq()
1605 if (!iucv) in iucv_callback_connreq()
1612 low_nmcpy(user_data, iucv->src_name); in iucv_callback_connreq()
1613 high_nmcpy(user_data, iucv->dst_name); in iucv_callback_connreq()
1645 memcpy(niucv->src_name, iucv->src_name, 8); in iucv_callback_connreq()
1646 memcpy(niucv->src_user_id, iucv->src_user_id, 8); in iucv_callback_connreq()
1655 niucv->msglimit = iucv->msglimit; in iucv_callback_connreq()
1656 path->msglim = iucv->msglimit; in iucv_callback_connreq()
1686 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx() local
1696 spin_lock(&iucv->message_q.lock); in iucv_callback_rx()
1698 if (!list_empty(&iucv->message_q.list) || in iucv_callback_rx()
1699 !skb_queue_empty(&iucv->backlog_skb_q)) in iucv_callback_rx()
1721 list_add_tail(&save_msg->list, &iucv->message_q.list); in iucv_callback_rx()
1724 spin_unlock(&iucv->message_q.lock); in iucv_callback_rx()
1734 struct iucv_sock *iucv; in iucv_callback_txdone() local
1737 iucv = iucv_sk(sk); in iucv_callback_txdone()
1738 list = &iucv->send_skb_q; in iucv_callback_txdone()
1750 atomic_dec(&iucv->skbs_in_xmit); in iucv_callback_txdone()
1763 if (atomic_read(&iucv->skbs_in_xmit) == 0) { in iucv_callback_txdone()
1839 struct iucv_sock *iucv, *niucv; in afiucv_hs_callback_syn() local
1842 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1843 if (!iucv) { in afiucv_hs_callback_syn()
1868 niucv->msglimit = iucv->msglimit; in afiucv_hs_callback_syn()
1875 memcpy(niucv->src_name, iucv->src_name, 8); in afiucv_hs_callback_syn()
1876 memcpy(niucv->src_user_id, iucv->src_user_id, 8); in afiucv_hs_callback_syn()
1878 niucv->hs_dev = iucv->hs_dev; in afiucv_hs_callback_syn()
1902 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack() local
1904 if (!iucv || sk->sk_state != IUCV_BOUND) { in afiucv_hs_callback_synack()
1910 iucv->msglimit_peer = iucv_trans_hdr(skb)->window; in afiucv_hs_callback_synack()
1923 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin() local
1925 if (!iucv || sk->sk_state != IUCV_BOUND) { in afiucv_hs_callback_synfin()
1943 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin() local
1946 if (!iucv) { in afiucv_hs_callback_fin()
1966 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win() local
1968 if (!iucv) in afiucv_hs_callback_win()
1974 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); in afiucv_hs_callback_win()
1984 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx() local
1986 if (!iucv) { in afiucv_hs_callback_rx()
2012 spin_lock(&iucv->message_q.lock); in afiucv_hs_callback_rx()
2013 if (skb_queue_empty(&iucv->backlog_skb_q)) { in afiucv_hs_callback_rx()
2016 skb_queue_tail(&iucv->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2019 spin_unlock(&iucv->message_q.lock); in afiucv_hs_callback_rx()
2032 struct iucv_sock *iucv; in afiucv_hs_rcv() local
2048 iucv = NULL; in afiucv_hs_rcv()
2060 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2072 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2078 if (!iucv) in afiucv_hs_rcv()
2135 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify() local
2142 atomic_dec(&iucv->skbs_in_xmit); in afiucv_hs_callback_txnotify()
2146 atomic_inc(&iucv->pendings); in afiucv_hs_callback_txnotify()
2149 atomic_dec(&iucv->skbs_in_xmit); in afiucv_hs_callback_txnotify()
2150 if (atomic_dec_return(&iucv->pendings) <= 0) in afiucv_hs_callback_txnotify()
2154 atomic_dec(&iucv->skbs_in_xmit); in afiucv_hs_callback_txnotify()
2162 if (atomic_read(&iucv->skbs_in_xmit) == 0) { in afiucv_hs_callback_txnotify()
2177 struct iucv_sock *iucv; in afiucv_netdev_event() local
2183 iucv = iucv_sk(sk); in afiucv_netdev_event()
2184 if ((iucv->hs_dev == event_dev) && in afiucv_netdev_event()