/linux-6.6.21/net/rxrpc/ |
D | call_object.c | 46 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what) in rxrpc_poke_call() argument 48 struct rxrpc_local *local = call->local; in rxrpc_poke_call() 51 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) { in rxrpc_poke_call() 53 busy = !list_empty(&call->attend_link); in rxrpc_poke_call() 54 trace_rxrpc_poke_call(call, busy, what); in rxrpc_poke_call() 55 if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke)) in rxrpc_poke_call() 58 list_add_tail(&call->attend_link, &local->call_attend_q); in rxrpc_poke_call() 68 struct rxrpc_call *call = from_timer(call, t, timer); in rxrpc_call_timer_expired() local 70 _enter("%d", call->debug_id); in rxrpc_call_timer_expired() 72 if (!__rxrpc_call_is_complete(call)) { in rxrpc_call_timer_expired() [all …]
|
D | call_event.c | 23 void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial, in rxrpc_propose_ping() argument 29 if (time_before(ping_at, call->ping_at)) { in rxrpc_propose_ping() 30 WRITE_ONCE(call->ping_at, ping_at); in rxrpc_propose_ping() 31 rxrpc_reduce_call_timer(call, ping_at, now, in rxrpc_propose_ping() 33 trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial); in rxrpc_propose_ping() 40 void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial, in rxrpc_propose_delay_ACK() argument 48 if (call->peer->srtt_us != 0) in rxrpc_propose_delay_ACK() 49 ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3); in rxrpc_propose_delay_ACK() 53 ack_at += READ_ONCE(call->tx_backoff); in rxrpc_propose_delay_ACK() 55 if (time_before(ack_at, call->delay_ack_at)) { in rxrpc_propose_delay_ACK() [all …]
|
D | input.c | 12 static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq, in rxrpc_proto_abort() argument 15 rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why); in rxrpc_proto_abort() 21 static void rxrpc_congestion_management(struct rxrpc_call *call, in rxrpc_congestion_management() argument 27 unsigned int cumulative_acks = call->cong_cumul_acks; in rxrpc_congestion_management() 28 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management() 32 (call->tx_top - call->acks_hard_ack) - summary->nr_acks; in rxrpc_congestion_management() 34 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { in rxrpc_congestion_management() 36 call->cong_ssthresh = max_t(unsigned int, in rxrpc_congestion_management() 39 if (cwnd >= call->cong_ssthresh && in rxrpc_congestion_management() 40 call->cong_mode == RXRPC_CALL_SLOW_START) { in rxrpc_congestion_management() [all …]
|
D | sendmsg.c | 23 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, in rxrpc_propose_abort() argument 26 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); in rxrpc_propose_abort() 28 if (!call->send_abort && !rxrpc_call_is_complete(call)) { in rxrpc_propose_abort() 29 call->send_abort_why = why; in rxrpc_propose_abort() 30 call->send_abort_err = error; in rxrpc_propose_abort() 31 call->send_abort_seq = 0; in rxrpc_propose_abort() 33 smp_store_release(&call->send_abort, abort_code); in rxrpc_propose_abort() 34 rxrpc_poke_call(call, rxrpc_call_poke_abort); in rxrpc_propose_abort() 45 static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo) in rxrpc_wait_to_be_connected() argument 50 _enter("%d", call->debug_id); in rxrpc_wait_to_be_connected() [all …]
|
D | rxperf.c | 65 int (*deliver)(struct rxperf_call *call); 74 static int rxperf_deliver_param_block(struct rxperf_call *call); 75 static int rxperf_deliver_request(struct rxperf_call *call); 76 static int rxperf_process_call(struct rxperf_call *call); 82 static inline void rxperf_set_call_state(struct rxperf_call *call, in rxperf_set_call_state() argument 85 call->state = to; in rxperf_set_call_state() 88 static inline void rxperf_set_call_complete(struct rxperf_call *call, in rxperf_set_call_complete() argument 91 if (call->state != RXPERF_CALL_COMPLETE) { in rxperf_set_call_complete() 92 call->abort_code = remote_abort; in rxperf_set_call_complete() 93 call->error = error; in rxperf_set_call_complete() [all …]
|
D | recvmsg.c | 23 void rxrpc_notify_socket(struct rxrpc_call *call) in rxrpc_notify_socket() argument 28 _enter("%d", call->debug_id); in rxrpc_notify_socket() 30 if (!list_empty(&call->recvmsg_link)) in rxrpc_notify_socket() 35 rx = rcu_dereference(call->socket); in rxrpc_notify_socket() 38 if (call->notify_rx) { in rxrpc_notify_socket() 39 spin_lock(&call->notify_lock); in rxrpc_notify_socket() 40 call->notify_rx(sk, call, call->user_call_ID); in rxrpc_notify_socket() 41 spin_unlock(&call->notify_lock); in rxrpc_notify_socket() 44 if (list_empty(&call->recvmsg_link)) { in rxrpc_notify_socket() 45 rxrpc_get_call(call, rxrpc_call_get_notify_socket); in rxrpc_notify_socket() [all …]
|
D | call_state.c | 13 bool rxrpc_set_call_completion(struct rxrpc_call *call, in rxrpc_set_call_completion() argument 18 if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE) in rxrpc_set_call_completion() 21 call->abort_code = abort_code; in rxrpc_set_call_completion() 22 call->error = error; in rxrpc_set_call_completion() 23 call->completion = compl; in rxrpc_set_call_completion() 25 rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE); in rxrpc_set_call_completion() 26 trace_rxrpc_call_complete(call); in rxrpc_set_call_completion() 27 wake_up(&call->waitq); in rxrpc_set_call_completion() 28 rxrpc_notify_socket(call); in rxrpc_set_call_completion() 35 bool rxrpc_call_completed(struct rxrpc_call *call) in rxrpc_call_completed() argument [all …]
|
D | output.c | 48 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret) in rxrpc_tx_backoff() argument 51 u16 tx_backoff = READ_ONCE(call->tx_backoff); in rxrpc_tx_backoff() 54 WRITE_ONCE(call->tx_backoff, tx_backoff + 1); in rxrpc_tx_backoff() 56 WRITE_ONCE(call->tx_backoff, 0); in rxrpc_tx_backoff() 68 static void rxrpc_set_keepalive(struct rxrpc_call *call) in rxrpc_set_keepalive() argument 70 unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; in rxrpc_set_keepalive() 73 WRITE_ONCE(call->keepalive_at, keepalive_at); in rxrpc_set_keepalive() 74 rxrpc_reduce_call_timer(call, keepalive_at, now, in rxrpc_set_keepalive() 82 struct rxrpc_call *call, in rxrpc_fill_out_ack() argument 93 call->ackr_nr_unacked = 0; in rxrpc_fill_out_ack() [all …]
|
D | call_accept.c | 25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, in rxrpc_dummy_notify() argument 41 struct rxrpc_call *call, *xcall; in rxrpc_service_prealloc_one() local 98 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one() 99 if (!call) in rxrpc_service_prealloc_one() 101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); in rxrpc_service_prealloc_one() 102 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC); in rxrpc_service_prealloc_one() 103 __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events); in rxrpc_service_prealloc_one() 105 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), in rxrpc_service_prealloc_one() 124 call->user_call_ID = user_call_ID; in rxrpc_service_prealloc_one() 125 call->notify_rx = notify_rx; in rxrpc_service_prealloc_one() [all …]
|
D | conn_client.c | 73 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, in rxrpc_alloc_bundle() argument 81 bundle->local = call->local; in rxrpc_alloc_bundle() 82 bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle); in rxrpc_alloc_bundle() 83 bundle->key = key_get(call->key); in rxrpc_alloc_bundle() 84 bundle->security = call->security; in rxrpc_alloc_bundle() 85 bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); in rxrpc_alloc_bundle() 86 bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); in rxrpc_alloc_bundle() 87 bundle->service_id = call->dest_srx.srx_service; in rxrpc_alloc_bundle() 88 bundle->security_level = call->security_level; in rxrpc_alloc_bundle() 243 int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) in rxrpc_look_up_bundle() argument [all …]
|
D | txbuf.c | 19 struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type, in rxrpc_alloc_txbuf() argument 29 txb->call_debug_id = call->debug_id; in rxrpc_alloc_txbuf() 36 txb->seq = call->tx_prepared + 1; in rxrpc_alloc_txbuf() 37 txb->wire.epoch = htonl(call->conn->proto.epoch); in rxrpc_alloc_txbuf() 38 txb->wire.cid = htonl(call->cid); in rxrpc_alloc_txbuf() 39 txb->wire.callNumber = htonl(call->call_id); in rxrpc_alloc_txbuf() 42 txb->wire.flags = call->conn->out_clientflag; in rxrpc_alloc_txbuf() 44 txb->wire.securityIndex = call->security_ix; in rxrpc_alloc_txbuf() 46 txb->wire.serviceId = htons(call->dest_srx.srx_service); in rxrpc_alloc_txbuf() 105 void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call) in rxrpc_shrink_call_tx_buffer() argument [all …]
|
/linux-6.6.21/fs/afs/ |
D | cmservice.c | 108 bool afs_cm_incoming_call(struct afs_call *call) in afs_cm_incoming_call() argument 110 _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); in afs_cm_incoming_call() 112 switch (call->operation_ID) { in afs_cm_incoming_call() 114 call->type = &afs_SRXCBCallBack; in afs_cm_incoming_call() 117 call->type = &afs_SRXCBInitCallBackState; in afs_cm_incoming_call() 120 call->type = &afs_SRXCBInitCallBackState3; in afs_cm_incoming_call() 123 call->type = &afs_SRXCBProbe; in afs_cm_incoming_call() 126 call->type = &afs_SRXCBProbeUuid; in afs_cm_incoming_call() 129 call->type = &afs_SRXCBTellMeAboutYourself; in afs_cm_incoming_call() 132 if (call->service_id != YFS_CM_SERVICE) in afs_cm_incoming_call() [all …]
|
D | rxrpc.c | 140 struct afs_call *call; in afs_alloc_call() local 143 call = kzalloc(sizeof(*call), gfp); in afs_alloc_call() 144 if (!call) in afs_alloc_call() 147 call->type = type; in afs_alloc_call() 148 call->net = net; in afs_alloc_call() 149 call->debug_id = atomic_inc_return(&rxrpc_debug_id); in afs_alloc_call() 150 refcount_set(&call->ref, 1); in afs_alloc_call() 151 INIT_WORK(&call->async_work, afs_process_async_call); in afs_alloc_call() 152 init_waitqueue_head(&call->waitq); in afs_alloc_call() 153 spin_lock_init(&call->state_lock); in afs_alloc_call() [all …]
|
D | vlclient.c | 17 static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) in afs_deliver_vl_get_entry_by_name_u() argument 27 ret = afs_transfer_reply(call); in afs_deliver_vl_get_entry_by_name_u() 32 uvldb = call->buffer; in afs_deliver_vl_get_entry_by_name_u() 33 entry = call->ret_vldb; in afs_deliver_vl_get_entry_by_name_u() 109 static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call) in afs_destroy_vl_get_entry_by_name_u() argument 111 kfree(call->ret_vldb); in afs_destroy_vl_get_entry_by_name_u() 112 afs_flat_call_destructor(call); in afs_destroy_vl_get_entry_by_name_u() 134 struct afs_call *call; in afs_vl_get_entry_by_name_u() local 148 call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByNameU, reqsz, in afs_vl_get_entry_by_name_u() 150 if (!call) { in afs_vl_get_entry_by_name_u() [all …]
|
D | yfsclient.c | 138 static void yfs_check_req(struct afs_call *call, __be32 *bp) in yfs_check_req() argument 140 size_t len = (void *)bp - call->request; in yfs_check_req() 142 if (len > call->request_size) in yfs_check_req() 144 call->type->name, len, call->request_size); in yfs_check_req() 145 else if (len < call->request_size) in yfs_check_req() 147 call->type->name, len, call->request_size); in yfs_check_req() 174 struct afs_call *call, in xdr_decode_YFSFetchStatus() argument 220 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_YFSFetchStatus() 228 struct afs_call *call, in xdr_decode_YFSCallBack() argument 235 cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); in xdr_decode_YFSCallBack() [all …]
|
D | fsclient.c | 55 struct afs_call *call, in xdr_decode_AFSFetchStatus() argument 60 bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); in xdr_decode_AFSFetchStatus() 128 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_AFSFetchStatus() 132 static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) in xdr_decode_expiry() argument 134 return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; in xdr_decode_expiry() 138 struct afs_call *call, in xdr_decode_AFSCallBack() argument 145 cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++)); in xdr_decode_AFSCallBack() 238 static int afs_deliver_fs_fetch_status(struct afs_call *call) in afs_deliver_fs_fetch_status() argument 240 struct afs_operation *op = call->op; in afs_deliver_fs_fetch_status() 245 ret = afs_transfer_reply(call); in afs_deliver_fs_fetch_status() [all …]
|
/linux-6.6.21/include/trace/ |
D | trace_events.h | 115 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 116 struct trace_event_data_offsets_##call { \ 187 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 189 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 194 struct trace_event_raw_##call *field; \ 207 static struct trace_event_functions trace_event_type_funcs_##call = { \ 208 .trace = trace_raw_output_##call, \ 212 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ argument 214 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 223 if (entry->type != event_##call.event.type) { \ [all …]
|
D | trace_custom_events.h | 62 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 63 struct trace_custom_event_data_offsets_##call { \ 77 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 79 trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \ 84 struct trace_custom_event_raw_##call *field; \ 97 static struct trace_event_functions trace_custom_event_type_funcs_##call = { \ 98 .trace = trace_custom_raw_output_##call, \ 108 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ argument 109 static struct trace_event_fields trace_custom_event_fields_##call[] = { \ 120 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument [all …]
|
D | bpf_probe.h | 45 #define __BPF_DECLARE_TRACE(call, proto, args) \ argument 47 __bpf_trace_##call(void *__data, proto) \ 54 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument 55 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) 62 #define __DEFINE_EVENT(template, call, proto, args, size) \ argument 63 static inline void bpf_test_probe_##call(void) \ 65 check_trace_callback_type_##call(__bpf_trace_##template); \ 67 typedef void (*btf_trace_##call)(void *__data, proto); \ 70 btf_trace_##call handler; \ 71 } __bpf_trace_tp_map_##call __used \ [all …]
|
/linux-6.6.21/tools/ |
D | Makefile | 67 $(call descend,power/$@) 70 $(call descend,power/$@) 73 $(call descend,$@) 76 $(call descend,$@) 79 $(call descend,lib/api) 82 $(call descend,include/nolibc) 85 $(call descend,include/nolibc,$(patsubst nolibc_%,%,$@)) 96 $(call descend,testing/$@) 99 $(call descend,lib/$@) 102 $(call descend,power/x86/$@) [all …]
|
/linux-6.6.21/include/trace/events/ |
D | rxrpc.h | 694 __field(unsigned int, call) 701 __entry->call = call_debug_id; 708 __entry->call, 814 TP_PROTO(struct rxrpc_call *call), 816 TP_ARGS(call), 819 __field(unsigned int, call) 826 __entry->call = call->debug_id; 827 __entry->compl = call->completion; 828 __entry->error = call->error; 829 __entry->abort_code = call->abort_code; [all …]
|
D | afs.h | 651 TP_PROTO(struct afs_call *call, struct iov_iter *iter, 654 TP_ARGS(call, iter, want_more, ret), 658 __field(unsigned int, call ) 666 __entry->call = call->debug_id; 667 __entry->state = call->state; 668 __entry->unmarshall = call->unmarshall; 675 __entry->call, 684 TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call), 686 TP_ARGS(rxcall, call), 689 __field(unsigned int, call ) [all …]
|
/linux-6.6.21/scripts/ |
D | Makefile.extrawarn | 18 KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) 19 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) 40 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) 41 KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) 44 KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer) 55 KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type) 68 KBUILD_CFLAGS-$(call gcc-min-version, 90100) += -Wno-alloc-size-larger-than 75 KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) 78 KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) 81 KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion) [all …]
|
D | Makefile.kasan | 14 cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) 33 CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ 35 $(call cc-option, -fsanitize=kernel-address \ 43 $(call cc-param,asan-globals=1) \ 44 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ 45 $(call cc-param,asan-instrument-allocas=1) 48 CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) 53 CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1) 60 instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)) 62 instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1) [all …]
|
/linux-6.6.21/arch/powerpc/ |
D | Makefile | 13 HAS_BIARCH := $(call cc-option-yn, -m32) 59 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) 61 KBUILD_CFLAGS += $(call cc-option,-mbig-endian) 70 cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) 71 cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc) 72 aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1) 81 cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 83 aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 106 ifeq ($(call cc-option-yn,-mcmodel=medium),y) 122 CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no) [all …]
|