/linux-6.6.21/net/atm/ |
D | atm_misc.c | 18 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) in atm_charge() 33 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { in atm_alloc_charge()
|
/linux-6.6.21/include/trace/events/ |
D | sock.h | 80 __field(int, sk_rcvbuf) 86 __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf); 90 __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
|
/linux-6.6.21/net/tipc/ |
D | group.h | 61 int *sk_rcvbuf, struct tipc_msg *hdr,
|
D | group.c | 197 void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf) in tipc_group_join() argument 209 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); in tipc_group_join() 857 int *sk_rcvbuf, in tipc_group_member_evt() argument 923 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); in tipc_group_member_evt()
|
/linux-6.6.21/net/x25/ |
D | x25_dev.c | 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data()
|
D | x25_subr.c | 376 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && in x25_check_rbuf()
|
D | x25_in.c | 292 (sk->sk_rcvbuf >> 1)) in x25_state3_machine()
|
/linux-6.6.21/net/ax25/ |
D | ax25_std_timer.c | 63 (sk->sk_rcvbuf >> 1) && in ax25_std_heartbeat_expiry()
|
D | ax25_ds_timer.c | 125 (sk->sk_rcvbuf >> 1) && in ax25_ds_heartbeat_expiry()
|
D | ax25_in.c | 264 sk->sk_rcvbuf) { in ax25_rcv()
|
/linux-6.6.21/include/crypto/ |
D | if_alg.h | 213 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - in af_alg_rcvbuf()
|
/linux-6.6.21/net/rose/ |
D | rose_timer.c | 142 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in rose_heartbeat_expiry()
|
D | rose_in.c | 183 (sk->sk_rcvbuf >> 1)) in rose_state3_machine()
|
/linux-6.6.21/net/mptcp/ |
D | sockopt.c | 103 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); in mptcp_sol_socket_sync_intval() 1427 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); in sync_socket_options()
|
D | protocol.c | 647 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow() 650 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow() 653 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); in __mptcp_move_skbs_from_subflow() 853 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in mptcp_data_ready() 854 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in mptcp_data_ready() 2022 if (rcvbuf > sk->sk_rcvbuf) { in mptcp_rcv_space_adjust() 2026 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust() 2039 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust() 2822 sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]); in mptcp_init_sock()
|
/linux-6.6.21/net/bluetooth/rfcomm/ |
D | sock.c | 60 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in rfcomm_sk_data_ready() 297 sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; in rfcomm_sock_alloc() 612 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) in rfcomm_sock_recvmsg()
|
/linux-6.6.21/net/netrom/ |
D | nr_timer.c | 135 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in nr_heartbeat_expiry()
|
/linux-6.6.21/net/vmw_vsock/ |
D | hyperv_transport.c | 379 rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE); in hvs_open_connection() 454 sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE; in hvs_sock_init()
|
/linux-6.6.21/net/sctp/ |
D | associola.c | 162 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) in sctp_association_init() 165 asoc->rwnd = sk->sk_rcvbuf/2; in sctp_association_init() 1459 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), in sctp_peer_needs_update() 1549 if (rx_count >= asoc->base.sk->sk_rcvbuf) in sctp_assoc_rwnd_decrease()
|
D | input.c | 312 if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) in sctp_backlog_rcv() 327 if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) in sctp_backlog_rcv() 354 ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in sctp_add_backlog()
|
D | proc.c | 287 sk->sk_rcvbuf); in sctp_assocs_seq_show()
|
/linux-6.6.21/include/net/ |
D | llc_c_ev.h | 221 (unsigned int)sk->sk_rcvbuf; in llc_conn_space()
|
/linux-6.6.21/net/ipv4/ |
D | tcp_input.c | 604 if (sk->sk_rcvbuf < rmem2 && in tcp_clamp_window() 608 WRITE_ONCE(sk->sk_rcvbuf, in tcp_clamp_window() 611 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window() 767 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust() 768 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); in tcp_rcv_space_adjust() 4807 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule() 5354 if (sum_tiny > sk->sk_rcvbuf >> 3) in tcp_collapse_ofo_queue() 5391 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue() 5407 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in tcp_prune_ofo_queue() 5410 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue() [all …]
|
/linux-6.6.21/net/netlink/ |
D | af_netlink.c | 1249 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb() 1263 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb() 1416 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in netlink_broadcast_deliver() 1420 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); in netlink_broadcast_deliver() 1991 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { in netlink_recvmsg() 2219 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in netlink_dump()
|
/linux-6.6.21/net/core/ |
D | sock.c | 488 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { in __sock_queue_rcv_skb() 558 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in __sk_receive_skb() 575 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { in __sk_receive_skb() 981 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); in __sock_set_rcvbuf() 1649 v.val = READ_ONCE(sk->sk_rcvbuf); in sk_getsockopt() 3426 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); in sock_init_data_uid() 3752 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); in sk_get_meminfo()
|