/* automatically generated by rust-bindgen 0.69.4 */ #[repr(C)] #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct __BindgenBitfieldUnit { storage: Storage, } impl __BindgenBitfieldUnit { #[inline] pub const fn new(storage: Storage) -> Self { Self { storage } } } impl __BindgenBitfieldUnit where Storage: AsRef<[u8]> + AsMut<[u8]>, { #[inline] pub fn get_bit(&self, index: usize) -> bool { debug_assert!(index / 8 < self.storage.as_ref().len()); let byte_index = index / 8; let byte = self.storage.as_ref()[byte_index]; let bit_index = if cfg!(target_endian = "big") { 7 - (index % 8) } else { index % 8 }; let mask = 1 << bit_index; byte & mask == mask } #[inline] pub fn set_bit(&mut self, index: usize, val: bool) { debug_assert!(index / 8 < self.storage.as_ref().len()); let byte_index = index / 8; let byte = &mut self.storage.as_mut()[byte_index]; let bit_index = if cfg!(target_endian = "big") { 7 - (index % 8) } else { index % 8 }; let mask = 1 << bit_index; if val { *byte |= mask; } else { *byte &= !mask; } } #[inline] pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { debug_assert!(bit_width <= 64); debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); let mut val = 0; for i in 0..(bit_width as usize) { if self.get_bit(i + bit_offset) { let index = if cfg!(target_endian = "big") { bit_width as usize - 1 - i } else { i }; val |= 1 << index; } } val } #[inline] pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { debug_assert!(bit_width <= 64); debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); for i in 0..(bit_width as usize) { let mask = 1 << i; let val_bit_is_set = val & mask == mask; let index = if cfg!(target_endian = "big") { bit_width as usize - 1 - i } else { i }; self.set_bit(index + bit_offset, val_bit_is_set); } } } #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::core::marker::PhantomData, [T; 0]); impl __IncompleteArrayField { #[inline] pub const fn new() -> Self { __IncompleteArrayField(::core::marker::PhantomData, []) } #[inline] pub fn as_ptr(&self) -> *const T { self as *const _ as *const T } #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::core::slice::from_raw_parts(self.as_ptr(), len) } #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } impl ::core::fmt::Debug for __IncompleteArrayField { fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { fmt.write_str("__IncompleteArrayField") } } pub const SO_ATTACH_BPF: u32 = 50; pub const SO_DETACH_BPF: u32 = 27; pub const BPF_LD: u32 = 0; pub const BPF_LDX: u32 = 1; pub const BPF_ST: u32 = 2; pub const BPF_STX: u32 = 3; pub const BPF_ALU: u32 = 4; pub const BPF_JMP: u32 = 5; pub const BPF_W: u32 = 0; pub const BPF_H: u32 = 8; pub const BPF_B: u32 = 16; pub const BPF_K: u32 = 0; pub const BPF_ALU64: u32 = 7; pub const BPF_DW: u32 = 24; pub const BPF_CALL: u32 = 128; pub const BPF_F_ALLOW_OVERRIDE: u32 = 1; pub const BPF_F_ALLOW_MULTI: u32 = 2; pub const BPF_F_REPLACE: u32 = 4; pub const BPF_F_BEFORE: u32 = 8; pub const BPF_F_AFTER: u32 = 16; pub const BPF_F_ID: u32 = 32; pub const BPF_F_STRICT_ALIGNMENT: u32 = 1; pub const BPF_F_ANY_ALIGNMENT: u32 = 2; pub const BPF_F_TEST_RND_HI32: u32 = 4; pub const BPF_F_TEST_STATE_FREQ: u32 = 8; pub const BPF_F_SLEEPABLE: u32 = 16; pub const BPF_F_XDP_HAS_FRAGS: u32 = 32; pub const BPF_F_XDP_DEV_BOUND_ONLY: u32 = 64; pub const BPF_F_TEST_REG_INVARIANTS: u32 = 128; pub const BPF_F_NETFILTER_IP_DEFRAG: u32 = 1; pub const BPF_PSEUDO_MAP_FD: u32 = 1; pub const BPF_PSEUDO_MAP_IDX: u32 = 5; pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; pub const BPF_PSEUDO_BTF_ID: u32 = 3; pub const BPF_PSEUDO_FUNC: u32 = 4; pub const BPF_PSEUDO_CALL: u32 = 1; pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; pub const BPF_F_QUERY_EFFECTIVE: u32 = 1; pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1; pub const BPF_F_TEST_XDP_LIVE_FRAMES: u32 = 2; pub const BTF_INT_SIGNED: u32 = 1; pub const BTF_INT_CHAR: u32 = 2; pub const BTF_INT_BOOL: u32 = 4; pub const NLMSG_ALIGNTO: u32 = 4; pub const XDP_FLAGS_UPDATE_IF_NOEXIST: u32 = 1; pub const XDP_FLAGS_SKB_MODE: u32 = 2; pub const XDP_FLAGS_DRV_MODE: u32 = 4; pub const XDP_FLAGS_HW_MODE: u32 = 8; pub const XDP_FLAGS_REPLACE: u32 = 16; pub const XDP_FLAGS_MODES: u32 = 14; pub const XDP_FLAGS_MASK: u32 = 31; pub const PERF_MAX_STACK_DEPTH: u32 = 127; pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8; pub const PERF_FLAG_FD_NO_GROUP: u32 = 1; pub const PERF_FLAG_FD_OUTPUT: u32 = 2; pub const PERF_FLAG_PID_CGROUP: u32 = 4; pub const PERF_FLAG_FD_CLOEXEC: u32 = 8; pub const TC_H_MAJ_MASK: u32 = 4294901760; pub const TC_H_MIN_MASK: u32 = 65535; pub const TC_H_UNSPEC: u32 = 0; pub const TC_H_ROOT: u32 = 4294967295; pub const TC_H_INGRESS: u32 = 4294967281; pub const TC_H_CLSACT: u32 = 4294967281; pub const TC_H_MIN_PRIORITY: u32 = 65504; pub const TC_H_MIN_INGRESS: u32 = 65522; pub const TC_H_MIN_EGRESS: u32 = 65523; pub const TCA_BPF_FLAG_ACT_DIRECT: u32 = 1; pub type __u8 = ::core::ffi::c_uchar; pub type __s16 = ::core::ffi::c_short; pub type __u16 = ::core::ffi::c_ushort; pub type __s32 = ::core::ffi::c_int; pub type __u32 = ::core::ffi::c_uint; pub type __s64 = ::core::ffi::c_longlong; pub type __u64 = ::core::ffi::c_ulonglong; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_insn { pub code: __u8, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, pub off: __s16, pub imm: __s32, } impl bpf_insn { #[inline] pub fn dst_reg(&self) -> __u8 { unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } } #[inline] pub fn set_dst_reg(&mut self, val: __u8) { unsafe { let val: u8 = ::core::mem::transmute(val); self._bitfield_1.set(0usize, 4u8, val as u64) } } #[inline] pub fn src_reg(&self) -> __u8 { unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } } #[inline] pub fn set_src_reg(&mut self, val: __u8) { unsafe { let val: u8 = ::core::mem::transmute(val); self._bitfield_1.set(4usize, 4u8, val as u64) } } #[inline] pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit.set(0usize, 4u8, { let dst_reg: u8 = unsafe { ::core::mem::transmute(dst_reg) }; dst_reg as u64 }); __bindgen_bitfield_unit.set(4usize, 4u8, { let src_reg: u8 = unsafe { ::core::mem::transmute(src_reg) }; src_reg as u64 }); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug)] pub struct bpf_lpm_trie_key { pub prefixlen: __u32, pub data: __IncompleteArrayField<__u8>, } impl bpf_cmd { pub const BPF_PROG_RUN: bpf_cmd = bpf_cmd::BPF_PROG_TEST_RUN; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum bpf_cmd { BPF_MAP_CREATE = 0, BPF_MAP_LOOKUP_ELEM = 1, BPF_MAP_UPDATE_ELEM = 2, BPF_MAP_DELETE_ELEM = 3, BPF_MAP_GET_NEXT_KEY = 4, BPF_PROG_LOAD = 5, BPF_OBJ_PIN = 6, BPF_OBJ_GET = 7, BPF_PROG_ATTACH = 8, BPF_PROG_DETACH = 9, BPF_PROG_TEST_RUN = 10, BPF_PROG_GET_NEXT_ID = 11, BPF_MAP_GET_NEXT_ID = 12, BPF_PROG_GET_FD_BY_ID = 13, BPF_MAP_GET_FD_BY_ID = 14, BPF_OBJ_GET_INFO_BY_FD = 15, BPF_PROG_QUERY = 16, BPF_RAW_TRACEPOINT_OPEN = 17, BPF_BTF_LOAD = 18, BPF_BTF_GET_FD_BY_ID = 19, BPF_TASK_FD_QUERY = 20, BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, BPF_MAP_FREEZE = 22, BPF_BTF_GET_NEXT_ID = 23, BPF_MAP_LOOKUP_BATCH = 24, BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, BPF_MAP_UPDATE_BATCH = 26, BPF_MAP_DELETE_BATCH = 27, BPF_LINK_CREATE = 28, BPF_LINK_UPDATE = 29, BPF_LINK_GET_FD_BY_ID = 30, BPF_LINK_GET_NEXT_ID = 31, BPF_ENABLE_STATS = 32, BPF_ITER_CREATE = 33, BPF_LINK_DETACH = 34, BPF_PROG_BIND_MAP = 35, BPF_TOKEN_CREATE = 36, __MAX_BPF_CMD = 37, } impl bpf_map_type { pub const BPF_MAP_TYPE_CGROUP_STORAGE: bpf_map_type = bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED; } impl bpf_map_type { pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: bpf_map_type = bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum bpf_map_type { BPF_MAP_TYPE_UNSPEC = 0, BPF_MAP_TYPE_HASH = 1, BPF_MAP_TYPE_ARRAY = 2, BPF_MAP_TYPE_PROG_ARRAY = 3, BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, BPF_MAP_TYPE_PERCPU_HASH = 5, BPF_MAP_TYPE_PERCPU_ARRAY = 6, BPF_MAP_TYPE_STACK_TRACE = 7, BPF_MAP_TYPE_CGROUP_ARRAY = 8, BPF_MAP_TYPE_LRU_HASH = 9, BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, BPF_MAP_TYPE_LPM_TRIE = 11, BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, BPF_MAP_TYPE_HASH_OF_MAPS = 13, BPF_MAP_TYPE_DEVMAP = 14, BPF_MAP_TYPE_SOCKMAP = 15, BPF_MAP_TYPE_CPUMAP = 16, BPF_MAP_TYPE_XSKMAP = 17, BPF_MAP_TYPE_SOCKHASH = 18, BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, BPF_MAP_TYPE_QUEUE = 22, BPF_MAP_TYPE_STACK = 23, BPF_MAP_TYPE_SK_STORAGE = 24, BPF_MAP_TYPE_DEVMAP_HASH = 25, BPF_MAP_TYPE_STRUCT_OPS = 26, BPF_MAP_TYPE_RINGBUF = 27, BPF_MAP_TYPE_INODE_STORAGE = 28, BPF_MAP_TYPE_TASK_STORAGE = 29, BPF_MAP_TYPE_BLOOM_FILTER = 30, BPF_MAP_TYPE_USER_RINGBUF = 31, BPF_MAP_TYPE_CGRP_STORAGE = 32, BPF_MAP_TYPE_ARENA = 33, __MAX_BPF_MAP_TYPE = 34, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6, BPF_PROG_TYPE_PERF_EVENT = 7, BPF_PROG_TYPE_CGROUP_SKB = 8, BPF_PROG_TYPE_CGROUP_SOCK = 9, BPF_PROG_TYPE_LWT_IN = 10, BPF_PROG_TYPE_LWT_OUT = 11, BPF_PROG_TYPE_LWT_XMIT = 12, BPF_PROG_TYPE_SOCK_OPS = 13, BPF_PROG_TYPE_SK_SKB = 14, BPF_PROG_TYPE_CGROUP_DEVICE = 15, BPF_PROG_TYPE_SK_MSG = 16, BPF_PROG_TYPE_RAW_TRACEPOINT = 17, BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, BPF_PROG_TYPE_LIRC_MODE2 = 20, BPF_PROG_TYPE_SK_REUSEPORT = 21, BPF_PROG_TYPE_FLOW_DISSECTOR = 22, BPF_PROG_TYPE_CGROUP_SYSCTL = 23, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, BPF_PROG_TYPE_TRACING = 26, BPF_PROG_TYPE_STRUCT_OPS = 27, BPF_PROG_TYPE_EXT = 28, BPF_PROG_TYPE_LSM = 29, BPF_PROG_TYPE_SK_LOOKUP = 30, BPF_PROG_TYPE_SYSCALL = 31, BPF_PROG_TYPE_NETFILTER = 32, __MAX_BPF_PROG_TYPE = 33, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum bpf_attach_type { BPF_CGROUP_INET_INGRESS = 0, BPF_CGROUP_INET_EGRESS = 1, BPF_CGROUP_INET_SOCK_CREATE = 2, BPF_CGROUP_SOCK_OPS = 3, BPF_SK_SKB_STREAM_PARSER = 4, BPF_SK_SKB_STREAM_VERDICT = 5, BPF_CGROUP_DEVICE = 6, BPF_SK_MSG_VERDICT = 7, BPF_CGROUP_INET4_BIND = 8, BPF_CGROUP_INET6_BIND = 9, BPF_CGROUP_INET4_CONNECT = 10, BPF_CGROUP_INET6_CONNECT = 11, BPF_CGROUP_INET4_POST_BIND = 12, BPF_CGROUP_INET6_POST_BIND = 13, BPF_CGROUP_UDP4_SENDMSG = 14, BPF_CGROUP_UDP6_SENDMSG = 15, BPF_LIRC_MODE2 = 16, BPF_FLOW_DISSECTOR = 17, BPF_CGROUP_SYSCTL = 18, BPF_CGROUP_UDP4_RECVMSG = 19, BPF_CGROUP_UDP6_RECVMSG = 20, BPF_CGROUP_GETSOCKOPT = 21, BPF_CGROUP_SETSOCKOPT = 22, BPF_TRACE_RAW_TP = 23, BPF_TRACE_FENTRY = 24, BPF_TRACE_FEXIT = 25, BPF_MODIFY_RETURN = 26, BPF_LSM_MAC = 27, BPF_TRACE_ITER = 28, BPF_CGROUP_INET4_GETPEERNAME = 29, BPF_CGROUP_INET6_GETPEERNAME = 30, BPF_CGROUP_INET4_GETSOCKNAME = 31, BPF_CGROUP_INET6_GETSOCKNAME = 32, BPF_XDP_DEVMAP = 33, BPF_CGROUP_INET_SOCK_RELEASE = 34, BPF_XDP_CPUMAP = 35, BPF_SK_LOOKUP = 36, BPF_XDP = 37, BPF_SK_SKB_VERDICT = 38, BPF_SK_REUSEPORT_SELECT = 39, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, BPF_PERF_EVENT = 41, BPF_TRACE_KPROBE_MULTI = 42, BPF_LSM_CGROUP = 43, BPF_STRUCT_OPS = 44, BPF_NETFILTER = 45, BPF_TCX_INGRESS = 46, BPF_TCX_EGRESS = 47, BPF_TRACE_UPROBE_MULTI = 48, BPF_CGROUP_UNIX_CONNECT = 49, BPF_CGROUP_UNIX_SENDMSG = 50, BPF_CGROUP_UNIX_RECVMSG = 51, BPF_CGROUP_UNIX_GETPEERNAME = 52, BPF_CGROUP_UNIX_GETSOCKNAME = 53, BPF_NETKIT_PRIMARY = 54, BPF_NETKIT_PEER = 55, __MAX_BPF_ATTACH_TYPE = 56, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum bpf_link_type { BPF_LINK_TYPE_UNSPEC = 0, BPF_LINK_TYPE_RAW_TRACEPOINT = 1, BPF_LINK_TYPE_TRACING = 2, BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, __MAX_BPF_LINK_TYPE = 14, } pub const BPF_F_KPROBE_MULTI_RETURN: _bindgen_ty_2 = 1; pub type _bindgen_ty_2 = ::core::ffi::c_uint; pub const BPF_F_UPROBE_MULTI_RETURN: _bindgen_ty_3 = 1; pub type _bindgen_ty_3 = ::core::ffi::c_uint; pub const BPF_ANY: _bindgen_ty_4 = 0; pub const BPF_NOEXIST: _bindgen_ty_4 = 1; pub const BPF_EXIST: _bindgen_ty_4 = 2; pub const BPF_F_LOCK: _bindgen_ty_4 = 4; pub type _bindgen_ty_4 = ::core::ffi::c_uint; pub const BPF_F_NO_PREALLOC: _bindgen_ty_5 = 1; pub const BPF_F_NO_COMMON_LRU: _bindgen_ty_5 = 2; pub const BPF_F_NUMA_NODE: _bindgen_ty_5 = 4; pub const BPF_F_RDONLY: _bindgen_ty_5 = 8; pub const BPF_F_WRONLY: _bindgen_ty_5 = 16; pub const BPF_F_STACK_BUILD_ID: _bindgen_ty_5 = 32; pub const BPF_F_ZERO_SEED: _bindgen_ty_5 = 64; pub const BPF_F_RDONLY_PROG: _bindgen_ty_5 = 128; pub const BPF_F_WRONLY_PROG: _bindgen_ty_5 = 256; pub const BPF_F_CLONE: _bindgen_ty_5 = 512; pub const BPF_F_MMAPABLE: _bindgen_ty_5 = 1024; pub const BPF_F_PRESERVE_ELEMS: _bindgen_ty_5 = 2048; pub const BPF_F_INNER_MAP: _bindgen_ty_5 = 4096; pub const BPF_F_LINK: _bindgen_ty_5 = 8192; pub const BPF_F_PATH_FD: _bindgen_ty_5 = 16384; pub const BPF_F_VTYPE_BTF_OBJ_FD: _bindgen_ty_5 = 32768; pub const BPF_F_TOKEN_FD: _bindgen_ty_5 = 65536; pub const BPF_F_SEGV_ON_FAULT: _bindgen_ty_5 = 131072; pub const BPF_F_NO_USER_CONV: _bindgen_ty_5 = 262144; pub type _bindgen_ty_5 = ::core::ffi::c_uint; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum bpf_stats_type { BPF_STATS_RUN_TIME = 0, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr { pub __bindgen_anon_1: bpf_attr__bindgen_ty_1, pub __bindgen_anon_2: bpf_attr__bindgen_ty_2, pub batch: bpf_attr__bindgen_ty_3, pub __bindgen_anon_3: bpf_attr__bindgen_ty_4, pub __bindgen_anon_4: bpf_attr__bindgen_ty_5, pub __bindgen_anon_5: bpf_attr__bindgen_ty_6, pub test: bpf_attr__bindgen_ty_7, pub __bindgen_anon_6: bpf_attr__bindgen_ty_8, pub info: bpf_attr__bindgen_ty_9, pub query: bpf_attr__bindgen_ty_10, pub raw_tracepoint: bpf_attr__bindgen_ty_11, pub __bindgen_anon_7: bpf_attr__bindgen_ty_12, pub task_fd_query: bpf_attr__bindgen_ty_13, pub link_create: bpf_attr__bindgen_ty_14, pub link_update: bpf_attr__bindgen_ty_15, pub link_detach: bpf_attr__bindgen_ty_16, pub enable_stats: bpf_attr__bindgen_ty_17, pub iter_create: bpf_attr__bindgen_ty_18, pub prog_bind_map: bpf_attr__bindgen_ty_19, pub token_create: bpf_attr__bindgen_ty_20, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_1 { pub map_type: __u32, pub key_size: __u32, pub value_size: __u32, pub max_entries: __u32, pub map_flags: __u32, pub inner_map_fd: __u32, pub numa_node: __u32, pub map_name: [::core::ffi::c_char; 16usize], pub map_ifindex: __u32, pub btf_fd: __u32, pub btf_key_type_id: __u32, pub btf_value_type_id: __u32, pub btf_vmlinux_value_type_id: __u32, pub map_extra: __u64, pub value_type_btf_obj_fd: __s32, pub map_token_fd: __s32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_2 { pub map_fd: __u32, pub key: __u64, pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1, pub flags: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 { pub value: __u64, pub next_key: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_3 { pub in_batch: __u64, pub out_batch: __u64, pub keys: __u64, pub values: __u64, pub count: __u32, pub map_fd: __u32, pub elem_flags: __u64, pub flags: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_4 { pub prog_type: __u32, pub insn_cnt: __u32, pub insns: __u64, pub license: __u64, pub log_level: __u32, pub log_size: __u32, pub log_buf: __u64, pub kern_version: __u32, pub prog_flags: __u32, pub prog_name: [::core::ffi::c_char; 16usize], pub prog_ifindex: __u32, pub expected_attach_type: __u32, pub prog_btf_fd: __u32, pub func_info_rec_size: __u32, pub func_info: __u64, pub func_info_cnt: __u32, pub line_info_rec_size: __u32, pub line_info: __u64, pub line_info_cnt: __u32, pub attach_btf_id: __u32, pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1, pub core_relo_cnt: __u32, pub fd_array: __u64, pub core_relos: __u64, pub core_relo_rec_size: __u32, pub log_true_size: __u32, pub prog_token_fd: __s32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 { pub attach_prog_fd: __u32, pub attach_btf_obj_fd: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_5 { pub pathname: __u64, pub bpf_fd: __u32, pub file_flags: __u32, pub path_fd: __s32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_6 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_6__bindgen_ty_1, pub attach_bpf_fd: __u32, pub attach_type: __u32, pub attach_flags: __u32, pub replace_bpf_fd: __u32, pub __bindgen_anon_2: bpf_attr__bindgen_ty_6__bindgen_ty_2, pub expected_revision: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_6__bindgen_ty_1 { pub target_fd: __u32, pub target_ifindex: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_6__bindgen_ty_2 { pub relative_fd: __u32, pub relative_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_7 { pub prog_fd: __u32, pub retval: __u32, pub data_size_in: __u32, pub data_size_out: __u32, pub data_in: __u64, pub data_out: __u64, pub repeat: __u32, pub duration: __u32, pub ctx_size_in: __u32, pub ctx_size_out: __u32, pub ctx_in: __u64, pub ctx_out: __u64, pub flags: __u32, pub cpu: __u32, pub batch_size: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_8 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1, pub next_id: __u32, pub open_flags: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 { pub start_id: __u32, pub prog_id: __u32, pub map_id: __u32, pub btf_id: __u32, pub link_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_9 { pub bpf_fd: __u32, pub info_len: __u32, pub info: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_10 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_10__bindgen_ty_1, pub attach_type: __u32, pub query_flags: __u32, pub attach_flags: __u32, pub prog_ids: __u64, pub __bindgen_anon_2: bpf_attr__bindgen_ty_10__bindgen_ty_2, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub prog_attach_flags: __u64, pub link_ids: __u64, pub link_attach_flags: __u64, pub revision: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_10__bindgen_ty_1 { pub target_fd: __u32, pub target_ifindex: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_10__bindgen_ty_2 { pub prog_cnt: __u32, pub count: __u32, } impl bpf_attr__bindgen_ty_10 { #[inline] pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_11 { pub name: __u64, pub prog_fd: __u32, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub cookie: __u64, } impl bpf_attr__bindgen_ty_11 { #[inline] pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_12 { pub btf: __u64, pub btf_log_buf: __u64, pub btf_size: __u32, pub btf_log_size: __u32, pub btf_log_level: __u32, pub btf_log_true_size: __u32, pub btf_flags: __u32, pub btf_token_fd: __s32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_13 { pub pid: __u32, pub fd: __u32, pub flags: __u32, pub buf_len: __u32, pub buf: __u64, pub prog_id: __u32, pub fd_type: __u32, pub probe_offset: __u64, pub probe_addr: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_14 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1, pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2, pub attach_type: __u32, pub flags: __u32, pub __bindgen_anon_3: bpf_attr__bindgen_ty_14__bindgen_ty_3, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 { pub prog_fd: __u32, pub map_fd: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 { pub target_fd: __u32, pub target_ifindex: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_14__bindgen_ty_3 { pub target_btf_id: __u32, pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1, pub perf_event: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2, pub kprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3, pub tracing: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4, pub netfilter: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5, pub tcx: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6, pub uprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7, pub netkit: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1 { pub iter_info: __u64, pub iter_info_len: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2 { pub bpf_cookie: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3 { pub flags: __u32, pub cnt: __u32, pub syms: __u64, pub addrs: __u64, pub cookies: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4 { pub target_btf_id: __u32, pub cookie: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5 { pub pf: __u32, pub hooknum: __u32, pub priority: __s32, pub flags: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1, pub expected_revision: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1 { pub relative_fd: __u32, pub relative_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7 { pub path: __u64, pub offsets: __u64, pub ref_ctr_offsets: __u64, pub cookies: __u64, pub cnt: __u32, pub flags: __u32, pub pid: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8 { pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1, pub expected_revision: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1 { pub relative_fd: __u32, pub relative_id: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_attr__bindgen_ty_15 { pub link_fd: __u32, pub __bindgen_anon_1: bpf_attr__bindgen_ty_15__bindgen_ty_1, pub flags: __u32, pub __bindgen_anon_2: bpf_attr__bindgen_ty_15__bindgen_ty_2, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_15__bindgen_ty_1 { pub new_prog_fd: __u32, pub new_map_fd: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_attr__bindgen_ty_15__bindgen_ty_2 { pub old_prog_fd: __u32, pub old_map_fd: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_16 { pub link_fd: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_17 { pub type_: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_18 { pub link_fd: __u32, pub flags: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_19 { pub prog_fd: __u32, pub map_fd: __u32, pub flags: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_attr__bindgen_ty_20 { pub flags: __u32, pub bpffs_fd: __u32, } pub const BPF_F_RECOMPUTE_CSUM: _bindgen_ty_6 = 1; pub const BPF_F_INVALIDATE_HASH: _bindgen_ty_6 = 2; pub type _bindgen_ty_6 = ::core::ffi::c_uint; pub const BPF_F_HDR_FIELD_MASK: _bindgen_ty_7 = 15; pub type _bindgen_ty_7 = ::core::ffi::c_uint; pub const BPF_F_PSEUDO_HDR: _bindgen_ty_8 = 16; pub const BPF_F_MARK_MANGLED_0: _bindgen_ty_8 = 32; pub const BPF_F_MARK_ENFORCE: _bindgen_ty_8 = 64; pub type _bindgen_ty_8 = ::core::ffi::c_uint; pub const BPF_F_INGRESS: _bindgen_ty_9 = 1; pub type _bindgen_ty_9 = ::core::ffi::c_uint; pub const BPF_F_TUNINFO_IPV6: _bindgen_ty_10 = 1; pub type _bindgen_ty_10 = ::core::ffi::c_uint; pub const BPF_F_SKIP_FIELD_MASK: _bindgen_ty_11 = 255; pub const BPF_F_USER_STACK: _bindgen_ty_11 = 256; pub const BPF_F_FAST_STACK_CMP: _bindgen_ty_11 = 512; pub const BPF_F_REUSE_STACKID: _bindgen_ty_11 = 1024; pub const BPF_F_USER_BUILD_ID: _bindgen_ty_11 = 2048; pub type _bindgen_ty_11 = ::core::ffi::c_uint; pub const BPF_F_ZERO_CSUM_TX: _bindgen_ty_12 = 2; pub const BPF_F_DONT_FRAGMENT: _bindgen_ty_12 = 4; pub const BPF_F_SEQ_NUMBER: _bindgen_ty_12 = 8; pub const BPF_F_NO_TUNNEL_KEY: _bindgen_ty_12 = 16; pub type _bindgen_ty_12 = ::core::ffi::c_uint; pub const BPF_F_TUNINFO_FLAGS: _bindgen_ty_13 = 16; pub type _bindgen_ty_13 = ::core::ffi::c_uint; pub const BPF_F_INDEX_MASK: _bindgen_ty_14 = 4294967295; pub const BPF_F_CURRENT_CPU: _bindgen_ty_14 = 4294967295; pub const BPF_F_CTXLEN_MASK: _bindgen_ty_14 = 4503595332403200; pub type _bindgen_ty_14 = ::core::ffi::c_ulong; pub const BPF_F_CURRENT_NETNS: _bindgen_ty_15 = -1; pub type _bindgen_ty_15 = ::core::ffi::c_int; pub const BPF_F_ADJ_ROOM_FIXED_GSO: _bindgen_ty_17 = 1; pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: _bindgen_ty_17 = 2; pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: _bindgen_ty_17 = 4; pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: _bindgen_ty_17 = 8; pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: _bindgen_ty_17 = 16; pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: _bindgen_ty_17 = 32; pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: _bindgen_ty_17 = 64; pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV4: _bindgen_ty_17 = 128; pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV6: _bindgen_ty_17 = 256; pub type _bindgen_ty_17 = ::core::ffi::c_uint; pub const BPF_F_SYSCTL_BASE_NAME: _bindgen_ty_19 = 1; pub type _bindgen_ty_19 = ::core::ffi::c_uint; pub const BPF_F_GET_BRANCH_RECORDS_SIZE: _bindgen_ty_21 = 1; pub type _bindgen_ty_21 = ::core::ffi::c_uint; pub const BPF_RINGBUF_BUSY_BIT: _bindgen_ty_24 = 2147483648; pub const BPF_RINGBUF_DISCARD_BIT: _bindgen_ty_24 = 1073741824; pub const BPF_RINGBUF_HDR_SZ: _bindgen_ty_24 = 8; pub type _bindgen_ty_24 = ::core::ffi::c_uint; pub const BPF_F_BPRM_SECUREEXEC: _bindgen_ty_26 = 1; pub type _bindgen_ty_26 = ::core::ffi::c_uint; pub const BPF_F_BROADCAST: _bindgen_ty_27 = 8; pub const BPF_F_EXCLUDE_INGRESS: _bindgen_ty_27 = 16; pub type _bindgen_ty_27 = ::core::ffi::c_uint; #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_devmap_val { pub ifindex: __u32, pub bpf_prog: bpf_devmap_val__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_devmap_val__bindgen_ty_1 { pub fd: ::core::ffi::c_int, pub id: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_cpumap_val { pub qsize: __u32, pub bpf_prog: bpf_cpumap_val__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_cpumap_val__bindgen_ty_1 { pub fd: ::core::ffi::c_int, pub id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_prog_info { pub type_: __u32, pub id: __u32, pub tag: [__u8; 8usize], pub jited_prog_len: __u32, pub xlated_prog_len: __u32, pub jited_prog_insns: __u64, pub xlated_prog_insns: __u64, pub load_time: __u64, pub created_by_uid: __u32, pub nr_map_ids: __u32, pub map_ids: __u64, pub name: [::core::ffi::c_char; 16usize], pub ifindex: __u32, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub netns_dev: __u64, pub netns_ino: __u64, pub nr_jited_ksyms: __u32, pub nr_jited_func_lens: __u32, pub jited_ksyms: __u64, pub jited_func_lens: __u64, pub btf_id: __u32, pub func_info_rec_size: __u32, pub func_info: __u64, pub nr_func_info: __u32, pub nr_line_info: __u32, pub line_info: __u64, pub jited_line_info: __u64, pub nr_jited_line_info: __u32, pub line_info_rec_size: __u32, pub jited_line_info_rec_size: __u32, pub nr_prog_tags: __u32, pub prog_tags: __u64, pub run_time_ns: __u64, pub run_cnt: __u64, pub recursion_misses: __u64, pub verified_insns: __u32, pub attach_btf_obj_id: __u32, pub attach_btf_id: __u32, } impl bpf_prog_info { #[inline] pub fn gpl_compatible(&self) -> __u32 { unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } } #[inline] pub fn set_gpl_compatible(&mut self, val: __u32) { unsafe { let val: u32 = ::core::mem::transmute(val); self._bitfield_1.set(0usize, 1u8, val as u64) } } #[inline] pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit.set(0usize, 1u8, { let gpl_compatible: u32 = unsafe { ::core::mem::transmute(gpl_compatible) }; gpl_compatible as u64 }); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_map_info { pub type_: __u32, pub id: __u32, pub key_size: __u32, pub value_size: __u32, pub max_entries: __u32, pub map_flags: __u32, pub name: [::core::ffi::c_char; 16usize], pub ifindex: __u32, pub btf_vmlinux_value_type_id: __u32, pub netns_dev: __u64, pub netns_ino: __u64, pub btf_id: __u32, pub btf_key_type_id: __u32, pub btf_value_type_id: __u32, pub btf_vmlinux_id: __u32, pub map_extra: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_btf_info { pub btf: __u64, pub btf_size: __u32, pub id: __u32, pub name: __u64, pub name_len: __u32, pub kernel_btf: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_link_info { pub type_: __u32, pub id: __u32, pub prog_id: __u32, pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_link_info__bindgen_ty_1 { pub raw_tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_1, pub tracing: bpf_link_info__bindgen_ty_1__bindgen_ty_2, pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_3, pub iter: bpf_link_info__bindgen_ty_1__bindgen_ty_4, pub netns: bpf_link_info__bindgen_ty_1__bindgen_ty_5, pub xdp: bpf_link_info__bindgen_ty_1__bindgen_ty_6, pub struct_ops: bpf_link_info__bindgen_ty_1__bindgen_ty_7, pub netfilter: bpf_link_info__bindgen_ty_1__bindgen_ty_8, pub kprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_9, pub uprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_10, pub perf_event: bpf_link_info__bindgen_ty_1__bindgen_ty_11, pub tcx: bpf_link_info__bindgen_ty_1__bindgen_ty_12, pub netkit: bpf_link_info__bindgen_ty_1__bindgen_ty_13, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_1 { pub tp_name: __u64, pub tp_name_len: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_2 { pub attach_type: __u32, pub target_obj_id: __u32, pub target_btf_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_3 { pub cgroup_id: __u64, pub attach_type: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4 { pub target_name: __u64, pub target_name_len: __u32, pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1, pub __bindgen_anon_2: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1 { pub map: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { pub map_id: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2 { pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1, pub task: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1 { pub cgroup_id: __u64, pub order: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2 { pub tid: __u32, pub pid: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_5 { pub netns_ino: __u32, pub attach_type: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_6 { pub ifindex: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_7 { pub map_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_8 { pub pf: __u32, pub hooknum: __u32, pub priority: __s32, pub flags: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_9 { pub addrs: __u64, pub count: __u32, pub flags: __u32, pub missed: __u64, pub cookies: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_10 { pub path: __u64, pub offsets: __u64, pub ref_ctr_offsets: __u64, pub cookies: __u64, pub path_size: __u32, pub count: __u32, pub flags: __u32, pub pid: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11 { pub type_: __u32, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1 { pub uprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1, pub kprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2, pub tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3, pub event: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1 { pub file_name: __u64, pub name_len: __u32, pub offset: __u32, pub cookie: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2 { pub func_name: __u64, pub name_len: __u32, pub offset: __u32, pub addr: __u64, pub missed: __u64, pub cookie: __u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 { pub tp_name: __u64, pub name_len: __u32, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub cookie: __u64, } impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 { #[inline] pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 { pub config: __u64, pub type_: __u32, pub _bitfield_align_1: [u8; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, pub cookie: __u64, } impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 { #[inline] pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit } } impl bpf_link_info__bindgen_ty_1__bindgen_ty_11 { #[inline] pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_12 { pub ifindex: __u32, pub attach_type: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_13 { pub ifindex: __u32, pub attach_type: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_func_info { pub insn_off: __u32, pub type_id: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_line_info { pub insn_off: __u32, pub file_name_off: __u32, pub line_off: __u32, pub line_col: __u32, } pub const BPF_F_TIMER_ABS: _bindgen_ty_41 = 1; pub const BPF_F_TIMER_CPU_PIN: _bindgen_ty_41 = 2; pub type _bindgen_ty_41 = ::core::ffi::c_uint; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_header { pub magic: __u16, pub version: __u8, pub flags: __u8, pub hdr_len: __u32, pub type_off: __u32, pub type_len: __u32, pub str_off: __u32, pub str_len: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub struct btf_type { pub name_off: __u32, pub info: __u32, pub __bindgen_anon_1: btf_type__bindgen_ty_1, } #[repr(C)] #[derive(Copy, Clone)] pub union btf_type__bindgen_ty_1 { pub size: __u32, pub type_: __u32, } pub const BTF_KIND_UNKN: _bindgen_ty_42 = 0; pub const BTF_KIND_INT: _bindgen_ty_42 = 1; pub const BTF_KIND_PTR: _bindgen_ty_42 = 2; pub const BTF_KIND_ARRAY: _bindgen_ty_42 = 3; pub const BTF_KIND_STRUCT: _bindgen_ty_42 = 4; pub const BTF_KIND_UNION: _bindgen_ty_42 = 5; pub const BTF_KIND_ENUM: _bindgen_ty_42 = 6; pub const BTF_KIND_FWD: _bindgen_ty_42 = 7; pub const BTF_KIND_TYPEDEF: _bindgen_ty_42 = 8; pub const BTF_KIND_VOLATILE: _bindgen_ty_42 = 9; pub const BTF_KIND_CONST: _bindgen_ty_42 = 10; pub const BTF_KIND_RESTRICT: _bindgen_ty_42 = 11; pub const BTF_KIND_FUNC: _bindgen_ty_42 = 12; pub const BTF_KIND_FUNC_PROTO: _bindgen_ty_42 = 13; pub const BTF_KIND_VAR: _bindgen_ty_42 = 14; pub const BTF_KIND_DATASEC: _bindgen_ty_42 = 15; pub const BTF_KIND_FLOAT: _bindgen_ty_42 = 16; pub const BTF_KIND_DECL_TAG: _bindgen_ty_42 = 17; pub const BTF_KIND_TYPE_TAG: _bindgen_ty_42 = 18; pub const BTF_KIND_ENUM64: _bindgen_ty_42 = 19; pub const NR_BTF_KINDS: _bindgen_ty_42 = 20; pub const BTF_KIND_MAX: _bindgen_ty_42 = 19; pub type _bindgen_ty_42 = ::core::ffi::c_uint; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_enum { pub name_off: __u32, pub val: __s32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_array { pub type_: __u32, pub index_type: __u32, pub nelems: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_member { pub name_off: __u32, pub type_: __u32, pub offset: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_param { pub name_off: __u32, pub type_: __u32, } pub const BTF_VAR_STATIC: _bindgen_ty_43 = 0; pub const BTF_VAR_GLOBAL_ALLOCATED: _bindgen_ty_43 = 1; pub const BTF_VAR_GLOBAL_EXTERN: _bindgen_ty_43 = 2; pub type _bindgen_ty_43 = ::core::ffi::c_uint; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum btf_func_linkage { BTF_FUNC_STATIC = 0, BTF_FUNC_GLOBAL = 1, BTF_FUNC_EXTERN = 2, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_var { pub linkage: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_var_secinfo { pub type_: __u32, pub offset: __u32, pub size: __u32, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct btf_decl_tag { pub component_idx: __s32, } pub const IFLA_XDP_UNSPEC: _bindgen_ty_92 = 0; pub const IFLA_XDP_FD: _bindgen_ty_92 = 1; pub const IFLA_XDP_ATTACHED: _bindgen_ty_92 = 2; pub const IFLA_XDP_FLAGS: _bindgen_ty_92 = 3; pub const IFLA_XDP_PROG_ID: _bindgen_ty_92 = 4; pub const IFLA_XDP_DRV_PROG_ID: _bindgen_ty_92 = 5; pub const IFLA_XDP_SKB_PROG_ID: _bindgen_ty_92 = 6; pub const IFLA_XDP_HW_PROG_ID: _bindgen_ty_92 = 7; pub const IFLA_XDP_EXPECTED_FD: _bindgen_ty_92 = 8; pub const __IFLA_XDP_MAX: _bindgen_ty_92 = 9; pub type _bindgen_ty_92 = ::core::ffi::c_uint; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX = 6, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum perf_hw_id { PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX = 10, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX = 7, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum perf_hw_cache_op_id { PERF_COUNT_HW_CACHE_OP_READ = 0, PERF_COUNT_HW_CACHE_OP_WRITE = 1, PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, PERF_COUNT_HW_CACHE_OP_MAX = 3, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, PERF_COUNT_HW_CACHE_RESULT_MAX = 2, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX = 12, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] pub enum perf_event_sample_format { PERF_SAMPLE_IP = 1, PERF_SAMPLE_TID = 2, PERF_SAMPLE_TIME = 4, PERF_SAMPLE_ADDR = 8, PERF_SAMPLE_READ = 16, PERF_SAMPLE_CALLCHAIN = 32, PERF_SAMPLE_ID = 64, PERF_SAMPLE_CPU = 128, PERF_SAMPLE_PERIOD = 256, PERF_SAMPLE_STREAM_ID = 512, PERF_SAMPLE_RAW = 1024, PERF_SAMPLE_BRANCH_STACK = 2048, PERF_SAMPLE_REGS_USER = 4096, PERF_SAMPLE_STACK_USER = 8192, PERF_SAMPLE_WEIGHT = 16384, PERF_SAMPLE_DATA_SRC = 32768, PERF_SAMPLE_IDENTIFIER = 65536, PERF_SAMPLE_TRANSACTION = 131072, PERF_SAMPLE_REGS_INTR = 262144, PERF_SAMPLE_PHYS_ADDR = 524288, PERF_SAMPLE_AUX = 1048576, PERF_SAMPLE_CGROUP = 2097152, PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, PERF_SAMPLE_WEIGHT_STRUCT = 16777216, PERF_SAMPLE_MAX = 33554432, } #[repr(C)] #[derive(Copy, Clone)] pub struct perf_event_attr { pub type_: __u32, pub size: __u32, pub config: __u64, pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1, pub sample_type: __u64, pub read_format: __u64, pub _bitfield_align_1: [u32; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2, pub bp_type: __u32, pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3, pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4, pub branch_sample_type: __u64, pub sample_regs_user: __u64, pub sample_stack_user: __u32, pub clockid: __s32, pub sample_regs_intr: __u64, pub aux_watermark: __u32, pub sample_max_stack: __u16, pub __reserved_2: __u16, pub aux_sample_size: __u32, pub __reserved_3: __u32, pub sig_data: __u64, pub config3: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union perf_event_attr__bindgen_ty_1 { pub sample_period: __u64, pub sample_freq: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union perf_event_attr__bindgen_ty_2 { pub wakeup_events: __u32, pub wakeup_watermark: __u32, } #[repr(C)] #[derive(Copy, Clone)] pub union perf_event_attr__bindgen_ty_3 { pub bp_addr: __u64, pub kprobe_func: __u64, pub uprobe_path: __u64, pub config1: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union perf_event_attr__bindgen_ty_4 { pub bp_len: __u64, pub kprobe_addr: __u64, pub probe_offset: __u64, pub config2: __u64, } impl perf_event_attr { #[inline] pub fn disabled(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } } #[inline] pub fn set_disabled(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(0usize, 1u8, val as u64) } } #[inline] pub fn inherit(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } } #[inline] pub fn set_inherit(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(1usize, 1u8, val as u64) } } #[inline] pub fn pinned(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } } #[inline] pub fn set_pinned(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(2usize, 1u8, val as u64) } } #[inline] pub fn exclusive(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } } #[inline] pub fn set_exclusive(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(3usize, 1u8, val as u64) } } #[inline] pub fn exclude_user(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } } #[inline] pub fn set_exclude_user(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(4usize, 1u8, val as u64) } } #[inline] pub fn exclude_kernel(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } } #[inline] pub fn set_exclude_kernel(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(5usize, 1u8, val as u64) } } #[inline] pub fn exclude_hv(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) } } #[inline] pub fn set_exclude_hv(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(6usize, 1u8, val as u64) } } #[inline] pub fn exclude_idle(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) } } #[inline] pub fn set_exclude_idle(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(7usize, 1u8, val as u64) } } #[inline] pub fn mmap(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) } } #[inline] pub fn set_mmap(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(8usize, 1u8, val as u64) } } #[inline] pub fn comm(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) } } #[inline] pub fn set_comm(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(9usize, 1u8, val as u64) } } #[inline] pub fn freq(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) } } #[inline] pub fn set_freq(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(10usize, 1u8, val as u64) } } #[inline] pub fn inherit_stat(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) } } #[inline] pub fn set_inherit_stat(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(11usize, 1u8, val as u64) } } #[inline] pub fn enable_on_exec(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) } } #[inline] pub fn set_enable_on_exec(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(12usize, 1u8, val as u64) } } #[inline] pub fn task(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) } } #[inline] pub fn set_task(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(13usize, 1u8, val as u64) } } #[inline] pub fn watermark(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) } } #[inline] pub fn set_watermark(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(14usize, 1u8, val as u64) } } #[inline] pub fn precise_ip(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) } } #[inline] pub fn set_precise_ip(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(15usize, 2u8, val as u64) } } #[inline] pub fn mmap_data(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) } } #[inline] pub fn set_mmap_data(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(17usize, 1u8, val as u64) } } #[inline] pub fn sample_id_all(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) } } #[inline] pub fn set_sample_id_all(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(18usize, 1u8, val as u64) } } #[inline] pub fn exclude_host(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) } } #[inline] pub fn set_exclude_host(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(19usize, 1u8, val as u64) } } #[inline] pub fn exclude_guest(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) } } #[inline] pub fn set_exclude_guest(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(20usize, 1u8, val as u64) } } #[inline] pub fn exclude_callchain_kernel(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) } } #[inline] pub fn set_exclude_callchain_kernel(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(21usize, 1u8, val as u64) } } #[inline] pub fn exclude_callchain_user(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) } } #[inline] pub fn set_exclude_callchain_user(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(22usize, 1u8, val as u64) } } #[inline] pub fn mmap2(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) } } #[inline] pub fn set_mmap2(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(23usize, 1u8, val as u64) } } #[inline] pub fn comm_exec(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) } } #[inline] pub fn set_comm_exec(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(24usize, 1u8, val as u64) } } #[inline] pub fn use_clockid(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) } } #[inline] pub fn set_use_clockid(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(25usize, 1u8, val as u64) } } #[inline] pub fn context_switch(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) } } #[inline] pub fn set_context_switch(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(26usize, 1u8, val as u64) } } #[inline] pub fn write_backward(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) } } #[inline] pub fn set_write_backward(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(27usize, 1u8, val as u64) } } #[inline] pub fn namespaces(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) } } #[inline] pub fn set_namespaces(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(28usize, 1u8, val as u64) } } #[inline] pub fn ksymbol(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) } } #[inline] pub fn set_ksymbol(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(29usize, 1u8, val as u64) } } #[inline] pub fn bpf_event(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) } } #[inline] pub fn set_bpf_event(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(30usize, 1u8, val as u64) } } #[inline] pub fn aux_output(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) } } #[inline] pub fn set_aux_output(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(31usize, 1u8, val as u64) } } #[inline] pub fn cgroup(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } } #[inline] pub fn set_cgroup(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(32usize, 1u8, val as u64) } } #[inline] pub fn text_poke(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } } #[inline] pub fn set_text_poke(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(33usize, 1u8, val as u64) } } #[inline] pub fn build_id(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u64) } } #[inline] pub fn set_build_id(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(34usize, 1u8, val as u64) } } #[inline] pub fn inherit_thread(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u64) } } #[inline] pub fn set_inherit_thread(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(35usize, 1u8, val as u64) } } #[inline] pub fn remove_on_exec(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u64) } } #[inline] pub fn set_remove_on_exec(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(36usize, 1u8, val as u64) } } #[inline] pub fn sigtrap(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u64) } } #[inline] pub fn set_sigtrap(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(37usize, 1u8, val as u64) } } #[inline] pub fn __reserved_1(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(38usize, 26u8) as u64) } } #[inline] pub fn set___reserved_1(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(38usize, 26u8, val as u64) } } #[inline] pub fn new_bitfield_1( disabled: __u64, inherit: __u64, pinned: __u64, exclusive: __u64, exclude_user: __u64, exclude_kernel: __u64, exclude_hv: __u64, exclude_idle: __u64, mmap: __u64, comm: __u64, freq: __u64, inherit_stat: __u64, enable_on_exec: __u64, task: __u64, watermark: __u64, precise_ip: __u64, mmap_data: __u64, sample_id_all: __u64, exclude_host: __u64, exclude_guest: __u64, exclude_callchain_kernel: __u64, exclude_callchain_user: __u64, mmap2: __u64, comm_exec: __u64, use_clockid: __u64, context_switch: __u64, write_backward: __u64, namespaces: __u64, ksymbol: __u64, bpf_event: __u64, aux_output: __u64, cgroup: __u64, text_poke: __u64, build_id: __u64, inherit_thread: __u64, remove_on_exec: __u64, sigtrap: __u64, __reserved_1: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit.set(0usize, 1u8, { let disabled: u64 = unsafe { ::core::mem::transmute(disabled) }; disabled as u64 }); __bindgen_bitfield_unit.set(1usize, 1u8, { let inherit: u64 = unsafe { ::core::mem::transmute(inherit) }; inherit as u64 }); __bindgen_bitfield_unit.set(2usize, 1u8, { let pinned: u64 = unsafe { ::core::mem::transmute(pinned) }; pinned as u64 }); __bindgen_bitfield_unit.set(3usize, 1u8, { let exclusive: u64 = unsafe { ::core::mem::transmute(exclusive) }; exclusive as u64 }); __bindgen_bitfield_unit.set(4usize, 1u8, { let exclude_user: u64 = unsafe { ::core::mem::transmute(exclude_user) }; exclude_user as u64 }); __bindgen_bitfield_unit.set(5usize, 1u8, { let exclude_kernel: u64 = unsafe { ::core::mem::transmute(exclude_kernel) }; exclude_kernel as u64 }); __bindgen_bitfield_unit.set(6usize, 1u8, { let exclude_hv: u64 = unsafe { ::core::mem::transmute(exclude_hv) }; exclude_hv as u64 }); __bindgen_bitfield_unit.set(7usize, 1u8, { let exclude_idle: u64 = unsafe { ::core::mem::transmute(exclude_idle) }; exclude_idle as u64 }); __bindgen_bitfield_unit.set(8usize, 1u8, { let mmap: u64 = unsafe { ::core::mem::transmute(mmap) }; mmap as u64 }); __bindgen_bitfield_unit.set(9usize, 1u8, { let comm: u64 = unsafe { ::core::mem::transmute(comm) }; comm as u64 }); __bindgen_bitfield_unit.set(10usize, 1u8, { let freq: u64 = unsafe { ::core::mem::transmute(freq) }; freq as u64 }); __bindgen_bitfield_unit.set(11usize, 1u8, { let inherit_stat: u64 = unsafe { ::core::mem::transmute(inherit_stat) }; inherit_stat as u64 }); __bindgen_bitfield_unit.set(12usize, 1u8, { let enable_on_exec: u64 = unsafe { ::core::mem::transmute(enable_on_exec) }; enable_on_exec as u64 }); __bindgen_bitfield_unit.set(13usize, 1u8, { let task: u64 = unsafe { ::core::mem::transmute(task) }; task as u64 }); __bindgen_bitfield_unit.set(14usize, 1u8, { let watermark: u64 = unsafe { ::core::mem::transmute(watermark) }; watermark as u64 }); __bindgen_bitfield_unit.set(15usize, 2u8, { let precise_ip: u64 = unsafe { ::core::mem::transmute(precise_ip) }; precise_ip as u64 }); __bindgen_bitfield_unit.set(17usize, 1u8, { let mmap_data: u64 = unsafe { ::core::mem::transmute(mmap_data) }; mmap_data as u64 }); __bindgen_bitfield_unit.set(18usize, 1u8, { let sample_id_all: u64 = unsafe { ::core::mem::transmute(sample_id_all) }; sample_id_all as u64 }); __bindgen_bitfield_unit.set(19usize, 1u8, { let exclude_host: u64 = unsafe { ::core::mem::transmute(exclude_host) }; exclude_host as u64 }); __bindgen_bitfield_unit.set(20usize, 1u8, { let exclude_guest: u64 = unsafe { ::core::mem::transmute(exclude_guest) }; exclude_guest as u64 }); __bindgen_bitfield_unit.set(21usize, 1u8, { let exclude_callchain_kernel: u64 = unsafe { ::core::mem::transmute(exclude_callchain_kernel) }; exclude_callchain_kernel as u64 }); __bindgen_bitfield_unit.set(22usize, 1u8, { let exclude_callchain_user: u64 = unsafe { ::core::mem::transmute(exclude_callchain_user) }; exclude_callchain_user as u64 }); __bindgen_bitfield_unit.set(23usize, 1u8, { let mmap2: u64 = unsafe { ::core::mem::transmute(mmap2) }; mmap2 as u64 }); __bindgen_bitfield_unit.set(24usize, 1u8, { let comm_exec: u64 = unsafe { ::core::mem::transmute(comm_exec) }; comm_exec as u64 }); __bindgen_bitfield_unit.set(25usize, 1u8, { let use_clockid: u64 = unsafe { ::core::mem::transmute(use_clockid) }; use_clockid as u64 }); __bindgen_bitfield_unit.set(26usize, 1u8, { let context_switch: u64 = unsafe { ::core::mem::transmute(context_switch) }; context_switch as u64 }); __bindgen_bitfield_unit.set(27usize, 1u8, { let write_backward: u64 = unsafe { ::core::mem::transmute(write_backward) }; write_backward as u64 }); __bindgen_bitfield_unit.set(28usize, 1u8, { let namespaces: u64 = unsafe { ::core::mem::transmute(namespaces) }; namespaces as u64 }); __bindgen_bitfield_unit.set(29usize, 1u8, { let ksymbol: u64 = unsafe { ::core::mem::transmute(ksymbol) }; ksymbol as u64 }); __bindgen_bitfield_unit.set(30usize, 1u8, { let bpf_event: u64 = unsafe { ::core::mem::transmute(bpf_event) }; bpf_event as u64 }); __bindgen_bitfield_unit.set(31usize, 1u8, { let aux_output: u64 = unsafe { ::core::mem::transmute(aux_output) }; aux_output as u64 }); __bindgen_bitfield_unit.set(32usize, 1u8, { let cgroup: u64 = unsafe { ::core::mem::transmute(cgroup) }; cgroup as u64 }); __bindgen_bitfield_unit.set(33usize, 1u8, { let text_poke: u64 = unsafe { ::core::mem::transmute(text_poke) }; text_poke as u64 }); __bindgen_bitfield_unit.set(34usize, 1u8, { let build_id: u64 = unsafe { ::core::mem::transmute(build_id) }; build_id as u64 }); __bindgen_bitfield_unit.set(35usize, 1u8, { let inherit_thread: u64 = unsafe { ::core::mem::transmute(inherit_thread) }; inherit_thread as u64 }); __bindgen_bitfield_unit.set(36usize, 1u8, { let remove_on_exec: u64 = unsafe { ::core::mem::transmute(remove_on_exec) }; remove_on_exec as u64 }); __bindgen_bitfield_unit.set(37usize, 1u8, { let sigtrap: u64 = unsafe { ::core::mem::transmute(sigtrap) }; sigtrap as u64 }); __bindgen_bitfield_unit.set(38usize, 26u8, { let __reserved_1: u64 = unsafe { ::core::mem::transmute(__reserved_1) }; __reserved_1 as u64 }); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Copy, Clone)] pub struct perf_event_mmap_page { pub version: __u32, pub compat_version: __u32, pub lock: __u32, pub index: __u32, pub offset: __s64, pub time_enabled: __u64, pub time_running: __u64, pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1, pub pmc_width: __u16, pub time_shift: __u16, pub time_mult: __u32, pub time_offset: __u64, pub time_zero: __u64, pub size: __u32, pub __reserved_1: __u32, pub time_cycles: __u64, pub time_mask: __u64, pub __reserved: [__u8; 928usize], pub data_head: __u64, pub data_tail: __u64, pub data_offset: __u64, pub data_size: __u64, pub aux_head: __u64, pub aux_tail: __u64, pub aux_offset: __u64, pub aux_size: __u64, } #[repr(C)] #[derive(Copy, Clone)] pub union perf_event_mmap_page__bindgen_ty_1 { pub capabilities: __u64, pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { pub _bitfield_align_1: [u64; 0], pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, } impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { #[inline] pub fn cap_bit0(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } } #[inline] pub fn set_cap_bit0(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(0usize, 1u8, val as u64) } } #[inline] pub fn cap_bit0_is_deprecated(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } } #[inline] pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(1usize, 1u8, val as u64) } } #[inline] pub fn cap_user_rdpmc(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } } #[inline] pub fn set_cap_user_rdpmc(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(2usize, 1u8, val as u64) } } #[inline] pub fn cap_user_time(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } } #[inline] pub fn set_cap_user_time(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(3usize, 1u8, val as u64) } } #[inline] pub fn cap_user_time_zero(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } } #[inline] pub fn set_cap_user_time_zero(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(4usize, 1u8, val as u64) } } #[inline] pub fn cap_user_time_short(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } } #[inline] pub fn set_cap_user_time_short(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(5usize, 1u8, val as u64) } } #[inline] pub fn cap_____res(&self) -> __u64 { unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } } #[inline] pub fn set_cap_____res(&mut self, val: __u64) { unsafe { let val: u64 = ::core::mem::transmute(val); self._bitfield_1.set(6usize, 58u8, val as u64) } } #[inline] pub fn new_bitfield_1( cap_bit0: __u64, cap_bit0_is_deprecated: __u64, cap_user_rdpmc: __u64, cap_user_time: __u64, cap_user_time_zero: __u64, cap_user_time_short: __u64, cap_____res: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit.set(0usize, 1u8, { let cap_bit0: u64 = unsafe { ::core::mem::transmute(cap_bit0) }; cap_bit0 as u64 }); __bindgen_bitfield_unit.set(1usize, 1u8, { let cap_bit0_is_deprecated: u64 = unsafe { ::core::mem::transmute(cap_bit0_is_deprecated) }; cap_bit0_is_deprecated as u64 }); __bindgen_bitfield_unit.set(2usize, 1u8, { let cap_user_rdpmc: u64 = unsafe { ::core::mem::transmute(cap_user_rdpmc) }; cap_user_rdpmc as u64 }); __bindgen_bitfield_unit.set(3usize, 1u8, { let cap_user_time: u64 = unsafe { ::core::mem::transmute(cap_user_time) }; cap_user_time as u64 }); __bindgen_bitfield_unit.set(4usize, 1u8, { let cap_user_time_zero: u64 = unsafe { ::core::mem::transmute(cap_user_time_zero) }; cap_user_time_zero as u64 }); __bindgen_bitfield_unit.set(5usize, 1u8, { let cap_user_time_short: u64 = unsafe { ::core::mem::transmute(cap_user_time_short) }; cap_user_time_short as u64 }); __bindgen_bitfield_unit.set(6usize, 58u8, { let cap_____res: u64 = unsafe { ::core::mem::transmute(cap_____res) }; cap_____res as u64 }); __bindgen_bitfield_unit } } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct perf_event_header { pub type_: __u32, pub misc: __u16, pub size: __u16, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, ToPrimitive)] pub enum perf_event_type { PERF_RECORD_MMAP = 1, PERF_RECORD_LOST = 2, PERF_RECORD_COMM = 3, PERF_RECORD_EXIT = 4, PERF_RECORD_THROTTLE = 5, PERF_RECORD_UNTHROTTLE = 6, PERF_RECORD_FORK = 7, PERF_RECORD_READ = 8, PERF_RECORD_SAMPLE = 9, PERF_RECORD_MMAP2 = 10, PERF_RECORD_AUX = 11, PERF_RECORD_ITRACE_START = 12, PERF_RECORD_LOST_SAMPLES = 13, PERF_RECORD_SWITCH = 14, PERF_RECORD_SWITCH_CPU_WIDE = 15, PERF_RECORD_NAMESPACES = 16, PERF_RECORD_KSYMBOL = 17, PERF_RECORD_BPF_EVENT = 18, PERF_RECORD_CGROUP = 19, PERF_RECORD_TEXT_POKE = 20, PERF_RECORD_AUX_OUTPUT_HW_ID = 21, PERF_RECORD_MAX = 22, } pub const TCA_BPF_UNSPEC: _bindgen_ty_152 = 0; pub const TCA_BPF_ACT: _bindgen_ty_152 = 1; pub const TCA_BPF_POLICE: _bindgen_ty_152 = 2; pub const TCA_BPF_CLASSID: _bindgen_ty_152 = 3; pub const TCA_BPF_OPS_LEN: _bindgen_ty_152 = 4; pub const TCA_BPF_OPS: _bindgen_ty_152 = 5; pub const TCA_BPF_FD: _bindgen_ty_152 = 6; pub const TCA_BPF_NAME: _bindgen_ty_152 = 7; pub const TCA_BPF_FLAGS: _bindgen_ty_152 = 8; pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_152 = 9; pub const TCA_BPF_TAG: _bindgen_ty_152 = 10; pub const TCA_BPF_ID: _bindgen_ty_152 = 11; pub const __TCA_BPF_MAX: _bindgen_ty_152 = 12; pub type _bindgen_ty_152 = ::core::ffi::c_uint; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ifinfomsg { pub ifi_family: ::core::ffi::c_uchar, pub __ifi_pad: ::core::ffi::c_uchar, pub ifi_type: ::core::ffi::c_ushort, pub ifi_index: ::core::ffi::c_int, pub ifi_flags: ::core::ffi::c_uint, pub ifi_change: ::core::ffi::c_uint, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct tcmsg { pub tcm_family: ::core::ffi::c_uchar, pub tcm__pad1: ::core::ffi::c_uchar, pub tcm__pad2: ::core::ffi::c_ushort, pub tcm_ifindex: ::core::ffi::c_int, pub tcm_handle: __u32, pub tcm_parent: __u32, pub tcm_info: __u32, } pub const TCA_UNSPEC: _bindgen_ty_170 = 0; pub const TCA_KIND: _bindgen_ty_170 = 1; pub const TCA_OPTIONS: _bindgen_ty_170 = 2; pub const TCA_STATS: _bindgen_ty_170 = 3; pub const TCA_XSTATS: _bindgen_ty_170 = 4; pub const TCA_RATE: _bindgen_ty_170 = 5; pub const TCA_FCNT: _bindgen_ty_170 = 6; pub const TCA_STATS2: _bindgen_ty_170 = 7; pub const TCA_STAB: _bindgen_ty_170 = 8; pub const TCA_PAD: _bindgen_ty_170 = 9; pub const TCA_DUMP_INVISIBLE: _bindgen_ty_170 = 10; pub const TCA_CHAIN: _bindgen_ty_170 = 11; pub const TCA_HW_OFFLOAD: _bindgen_ty_170 = 12; pub const TCA_INGRESS_BLOCK: _bindgen_ty_170 = 13; pub const TCA_EGRESS_BLOCK: _bindgen_ty_170 = 14; pub const __TCA_MAX: _bindgen_ty_170 = 15; pub type _bindgen_ty_170 = ::core::ffi::c_uint; pub const AYA_PERF_EVENT_IOC_ENABLE: ::core::ffi::c_int = 9216; pub const AYA_PERF_EVENT_IOC_DISABLE: ::core::ffi::c_int = 9217; pub const AYA_PERF_EVENT_IOC_SET_BPF: ::core::ffi::c_int = 1074013192;