/linux-6.1.9/net/atm/ |
D | atm_misc.c | 18 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) in atm_charge() 33 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { in atm_alloc_charge()
|
/linux-6.1.9/include/trace/events/ |
D | sock.h | 80 __field(int, sk_rcvbuf) 86 __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf); 90 __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
|
/linux-6.1.9/net/tipc/ |
D | group.h | 61 int *sk_rcvbuf, struct tipc_msg *hdr,
|
D | group.c | 197 void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf) in tipc_group_join() argument 209 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); in tipc_group_join() 857 int *sk_rcvbuf, in tipc_group_member_evt() argument 923 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); in tipc_group_member_evt()
|
/linux-6.1.9/net/x25/ |
D | x25_dev.c | 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data()
|
D | x25_subr.c | 376 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && in x25_check_rbuf()
|
D | x25_in.c | 292 (sk->sk_rcvbuf >> 1)) in x25_state3_machine()
|
/linux-6.1.9/net/ax25/ |
D | ax25_std_timer.c | 63 (sk->sk_rcvbuf >> 1) && in ax25_std_heartbeat_expiry()
|
D | ax25_ds_timer.c | 125 (sk->sk_rcvbuf >> 1) && in ax25_ds_heartbeat_expiry()
|
D | ax25_in.c | 264 sk->sk_rcvbuf) { in ax25_rcv()
|
/linux-6.1.9/include/crypto/ |
D | if_alg.h | 213 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - in af_alg_rcvbuf()
|
/linux-6.1.9/net/mptcp/ |
D | sockopt.c | 101 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); in mptcp_sol_socket_sync_intval() 1268 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); in sync_socket_options()
|
D | protocol.c | 640 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow() 643 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow() 646 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); in __mptcp_move_skbs_from_subflow() 805 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in mptcp_data_ready() 806 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in mptcp_data_ready() 1963 if (rcvbuf > sk->sk_rcvbuf) { in mptcp_rcv_space_adjust() 1967 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust() 1980 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust() 2738 sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); in mptcp_init_sock()
|
/linux-6.1.9/net/rose/ |
D | rose_timer.c | 142 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in rose_heartbeat_expiry()
|
D | rose_in.c | 183 (sk->sk_rcvbuf >> 1)) in rose_state3_machine()
|
/linux-6.1.9/net/bluetooth/rfcomm/ |
D | sock.c | 60 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in rfcomm_sk_data_ready() 299 sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; in rfcomm_sock_alloc() 619 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) in rfcomm_sock_recvmsg()
|
/linux-6.1.9/net/netrom/ |
D | nr_timer.c | 135 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in nr_heartbeat_expiry()
|
/linux-6.1.9/net/vmw_vsock/ |
D | hyperv_transport.c | 379 rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE); in hvs_open_connection() 454 sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE; in hvs_sock_init()
|
/linux-6.1.9/net/sctp/ |
D | associola.c | 162 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) in sctp_association_init() 165 asoc->rwnd = sk->sk_rcvbuf/2; in sctp_association_init() 1462 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), in sctp_peer_needs_update() 1552 if (rx_count >= asoc->base.sk->sk_rcvbuf) in sctp_assoc_rwnd_decrease()
|
D | input.c | 327 if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) in sctp_backlog_rcv() 342 if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) in sctp_backlog_rcv() 369 ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in sctp_add_backlog()
|
D | proc.c | 287 sk->sk_rcvbuf); in sctp_assocs_seq_show()
|
/linux-6.1.9/include/net/ |
D | llc_c_ev.h | 222 (unsigned int)sk->sk_rcvbuf; in llc_conn_space()
|
/linux-6.1.9/net/ipv4/ |
D | tcp_input.c | 582 if (sk->sk_rcvbuf < rmem2 && in tcp_clamp_window() 586 WRITE_ONCE(sk->sk_rcvbuf, in tcp_clamp_window() 589 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window() 750 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust() 751 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); in tcp_rcv_space_adjust() 4773 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule() 5314 if (sum_tiny > sk->sk_rcvbuf >> 3) in tcp_collapse_ofo_queue() 5349 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue() 5358 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in tcp_prune_ofo_queue() 5361 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue() [all …]
|
/linux-6.1.9/net/netlink/ |
D | af_netlink.c | 1235 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb() 1249 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb() 1404 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in netlink_broadcast_deliver() 1408 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); in netlink_broadcast_deliver() 2011 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { in netlink_recvmsg() 2239 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in netlink_dump()
|
/linux-6.1.9/net/core/ |
D | sock.c | 481 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { in __sock_queue_rcv_skb() 551 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in __sk_receive_skb() 568 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { in __sk_receive_skb() 966 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); in __sock_set_rcvbuf() 1631 v.val = sk->sk_rcvbuf; in sk_getsockopt() 3368 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); in sock_init_data() 3687 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); in sk_get_meminfo()
|