/linux-5.19.10/include/scsi/ |
D | scsi_devinfo.h | 9 #define BLIST_NOLUN ((__force blist_flags_t)(1ULL << 0)) 12 #define BLIST_FORCELUN ((__force blist_flags_t)(1ULL << 1)) 14 #define BLIST_BORKEN ((__force blist_flags_t)(1ULL << 2)) 16 #define BLIST_KEY ((__force blist_flags_t)(1ULL << 3)) 18 #define BLIST_SINGLELUN ((__force blist_flags_t)(1ULL << 4)) 20 #define BLIST_NOTQ ((__force blist_flags_t)(1ULL << 5)) 22 #define BLIST_SPARSELUN ((__force blist_flags_t)(1ULL << 6)) 24 #define BLIST_MAX5LUN ((__force blist_flags_t)(1ULL << 7)) 26 #define BLIST_ISROM ((__force blist_flags_t)(1ULL << 8)) 28 #define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9)) [all …]
|
/linux-5.19.10/drivers/net/wireless/intel/iwlwifi/fw/ |
D | file.h | 249 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, 250 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, 251 IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, 252 IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, 253 IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, 254 IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, 255 IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, 256 IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, 258 IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, 259 IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, [all …]
|
/linux-5.19.10/include/uapi/linux/byteorder/ |
D | big_endian.h | 16 #define __constant_htonl(x) ((__force __be32)(__u32)(x)) 17 #define __constant_ntohl(x) ((__force __u32)(__be32)(x)) 18 #define __constant_htons(x) ((__force __be16)(__u16)(x)) 19 #define __constant_ntohs(x) ((__force __u16)(__be16)(x)) 20 #define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x))) 21 #define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x)) 22 #define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x))) 23 #define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x)) 24 #define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x))) 25 #define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x)) [all …]
|
D | little_endian.h | 16 #define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) 17 #define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) 18 #define __constant_htons(x) ((__force __be16)___constant_swab16((x))) 19 #define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) 20 #define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) 21 #define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) 22 #define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) 23 #define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) 24 #define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) 25 #define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) [all …]
|
/linux-5.19.10/include/uapi/linux/ |
D | eventpoll.h | 31 #define EPOLLIN (__force __poll_t)0x00000001 32 #define EPOLLPRI (__force __poll_t)0x00000002 33 #define EPOLLOUT (__force __poll_t)0x00000004 34 #define EPOLLERR (__force __poll_t)0x00000008 35 #define EPOLLHUP (__force __poll_t)0x00000010 36 #define EPOLLNVAL (__force __poll_t)0x00000020 37 #define EPOLLRDNORM (__force __poll_t)0x00000040 38 #define EPOLLRDBAND (__force __poll_t)0x00000080 39 #define EPOLLWRNORM (__force __poll_t)0x00000100 40 #define EPOLLWRBAND (__force __poll_t)0x00000200 [all …]
|
/linux-5.19.10/tools/include/uapi/sound/ |
D | asound.h | 189 #define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */ 190 #define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap … 191 #define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */ 192 #define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */ 193 #define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */ 197 #define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0) 198 #define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1) 199 #define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2) 200 #define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3) 201 #define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4) [all …]
|
/linux-5.19.10/include/uapi/sound/ |
D | asound.h | 189 #define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */ 190 #define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap … 191 #define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */ 192 #define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */ 193 #define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */ 197 #define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0) 198 #define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1) 199 #define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2) 200 #define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3) 201 #define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4) [all …]
|
/linux-5.19.10/net/ipv6/ |
D | ip6_checksum.c | 16 __u32 sum = (__force u32)csum; in csum_ipv6_magic() 18 sum += (__force u32)saddr->s6_addr32[0]; in csum_ipv6_magic() 19 carry = (sum < (__force u32)saddr->s6_addr32[0]); in csum_ipv6_magic() 22 sum += (__force u32)saddr->s6_addr32[1]; in csum_ipv6_magic() 23 carry = (sum < (__force u32)saddr->s6_addr32[1]); in csum_ipv6_magic() 26 sum += (__force u32)saddr->s6_addr32[2]; in csum_ipv6_magic() 27 carry = (sum < (__force u32)saddr->s6_addr32[2]); in csum_ipv6_magic() 30 sum += (__force u32)saddr->s6_addr32[3]; in csum_ipv6_magic() 31 carry = (sum < (__force u32)saddr->s6_addr32[3]); in csum_ipv6_magic() 34 sum += (__force u32)daddr->s6_addr32[0]; in csum_ipv6_magic() [all …]
|
/linux-5.19.10/arch/s390/include/asm/ |
D | checksum.h | 49 u32 csum = (__force u32) sum; in csum_fold() 53 return (__force __sum16) ~csum; in csum_fold() 73 return csum_fold((__force __wsum)(csum >> 32)); in ip_fast_csum() 83 __u64 csum = (__force __u64)sum; in csum_tcpudp_nofold() 85 csum += (__force __u32)saddr; in csum_tcpudp_nofold() 86 csum += (__force __u32)daddr; in csum_tcpudp_nofold() 90 return (__force __wsum)(csum >> 32); in csum_tcpudp_nofold() 116 __u64 sum = (__force __u64)csum; in csum_ipv6_magic() 118 sum += (__force __u32)saddr->s6_addr32[0]; in csum_ipv6_magic() 119 sum += (__force __u32)saddr->s6_addr32[1]; in csum_ipv6_magic() [all …]
|
/linux-5.19.10/include/linux/ |
D | virtio_byteorder.h | 19 return le16_to_cpu((__force __le16)val); in __virtio16_to_cpu() 21 return be16_to_cpu((__force __be16)val); in __virtio16_to_cpu() 27 return (__force __virtio16)cpu_to_le16(val); in __cpu_to_virtio16() 29 return (__force __virtio16)cpu_to_be16(val); in __cpu_to_virtio16() 35 return le32_to_cpu((__force __le32)val); in __virtio32_to_cpu() 37 return be32_to_cpu((__force __be32)val); in __virtio32_to_cpu() 43 return (__force __virtio32)cpu_to_le32(val); in __cpu_to_virtio32() 45 return (__force __virtio32)cpu_to_be32(val); in __cpu_to_virtio32() 51 return le64_to_cpu((__force __le64)val); in __virtio64_to_cpu() 53 return be64_to_cpu((__force __be64)val); in __virtio64_to_cpu() [all …]
|
D | gfp.h | 80 #define __GFP_DMA ((__force gfp_t)___GFP_DMA) 81 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 82 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 83 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 113 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 114 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) 115 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) 116 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) 117 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) 146 #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) [all …]
|
D | serial_core.h | 179 #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) 180 #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) 181 #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) 182 #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) 183 #define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) 184 #define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) 185 #define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) 186 #define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) 187 #define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) 188 #define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) [all …]
|
/linux-5.19.10/include/linux/rpmsg/ |
D | byteorder.h | 22 return le16_to_cpu((__force __le16)val); in __rpmsg16_to_cpu() 24 return be16_to_cpu((__force __be16)val); in __rpmsg16_to_cpu() 30 return (__force __rpmsg16)cpu_to_le16(val); in __cpu_to_rpmsg16() 32 return (__force __rpmsg16)cpu_to_be16(val); in __cpu_to_rpmsg16() 38 return le32_to_cpu((__force __le32)val); in __rpmsg32_to_cpu() 40 return be32_to_cpu((__force __be32)val); in __rpmsg32_to_cpu() 46 return (__force __rpmsg32)cpu_to_le32(val); in __cpu_to_rpmsg32() 48 return (__force __rpmsg32)cpu_to_be32(val); in __cpu_to_rpmsg32() 54 return le64_to_cpu((__force __le64)val); in __rpmsg64_to_cpu() 56 return be64_to_cpu((__force __be64)val); in __rpmsg64_to_cpu() [all …]
|
/linux-5.19.10/fs/ntfs/ |
D | endian.h | 21 return le16_to_cpu((__force le16)x); in sle16_to_cpu() 26 return le32_to_cpu((__force le32)x); in sle32_to_cpu() 31 return le64_to_cpu((__force le64)x); in sle64_to_cpu() 36 return le16_to_cpu(*(__force le16*)x); in sle16_to_cpup() 41 return le32_to_cpu(*(__force le32*)x); in sle32_to_cpup() 46 return le64_to_cpu(*(__force le64*)x); in sle64_to_cpup() 51 return (__force sle16)cpu_to_le16(x); in cpu_to_sle16() 56 return (__force sle32)cpu_to_le32(x); in cpu_to_sle32() 61 return (__force sle64)cpu_to_le64(x); in cpu_to_sle64() 66 return (__force sle16)cpu_to_le16(*x); in cpu_to_sle16p() [all …]
|
/linux-5.19.10/arch/powerpc/include/asm/ |
D | checksum.h | 41 u32 tmp = (__force u32)sum; in csum_fold() 49 return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); in csum_fold() 61 u64 s = (__force u32)sum; in csum_tcpudp_nofold() 63 s += (__force u32)saddr; in csum_tcpudp_nofold() 64 s += (__force u32)daddr; in csum_tcpudp_nofold() 70 return (__force __wsum) from64to32(s); in csum_tcpudp_nofold() 98 u64 res = (__force u64)csum; in csum_add() 100 res += (__force u64)addend; in csum_add() 101 return (__force __wsum)((u32)res + (res >> 32)); in csum_add() 119 return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3); in csum_shift() [all …]
|
/linux-5.19.10/include/net/ |
D | checksum.h | 59 u32 res = (__force u32)csum; in csum_add() 60 res += (__force u32)addend; in csum_add() 61 return (__force __wsum)(res + (res < (__force u32)addend)); in csum_add() 72 u16 res = (__force u16)csum; in csum16_add() 74 res += (__force u16)addend; in csum16_add() 75 return (__force __sum16)(res + (res < (__force u16)addend)); in csum16_add() 88 return (__force __wsum)ror32((__force u32)sum, 8); in csum_shift() 113 return (__force __wsum)n; in csum_unfold() 122 #define CSUM_MANGLED_0 ((__force __sum16)0xffff) 131 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); in csum_replace4() [all …]
|
/linux-5.19.10/fs/befs/ |
D | endian.h | 19 return le64_to_cpu((__force __le64)n); in fs64_to_cpu() 21 return be64_to_cpu((__force __be64)n); in fs64_to_cpu() 28 return (__force fs64)cpu_to_le64(n); in cpu_to_fs64() 30 return (__force fs64)cpu_to_be64(n); in cpu_to_fs64() 37 return le32_to_cpu((__force __le32)n); in fs32_to_cpu() 39 return be32_to_cpu((__force __be32)n); in fs32_to_cpu() 46 return (__force fs32)cpu_to_le32(n); in cpu_to_fs32() 48 return (__force fs32)cpu_to_be32(n); in cpu_to_fs32() 55 return le16_to_cpu((__force __le16)n); in fs16_to_cpu() 57 return be16_to_cpu((__force __be16)n); in fs16_to_cpu() [all …]
|
/linux-5.19.10/include/net/netfilter/ |
D | nf_queue.h | 52 if ((__force u32)iph->saddr < (__force u32)iph->daddr) in hash_v4() 53 return jhash_3words((__force u32)iph->saddr, in hash_v4() 54 (__force u32)iph->daddr, iph->protocol, initval); in hash_v4() 56 return jhash_3words((__force u32)iph->daddr, in hash_v4() 57 (__force u32)iph->saddr, iph->protocol, initval); in hash_v4() 64 if ((__force u32)ip6h->saddr.s6_addr32[3] < in hash_v6() 65 (__force u32)ip6h->daddr.s6_addr32[3]) { in hash_v6() 66 a = (__force u32) ip6h->saddr.s6_addr32[3]; in hash_v6() 67 b = (__force u32) ip6h->daddr.s6_addr32[3]; in hash_v6() 69 b = (__force u32) ip6h->saddr.s6_addr32[3]; in hash_v6() [all …]
|
/linux-5.19.10/arch/ia64/lib/ |
D | checksum.c | 41 return (__force __sum16)~from64to16( in csum_tcpudp_magic() 42 (__force u64)saddr + (__force u64)daddr + in csum_tcpudp_magic() 43 (__force u64)sum + ((len + proto) << 8)); in csum_tcpudp_magic() 54 result = (__force u64)saddr + (__force u64)daddr + in csum_tcpudp_nofold() 55 (__force u64)sum + ((len + proto) << 8); in csum_tcpudp_nofold() 62 return (__force __wsum)result; in csum_tcpudp_nofold() 85 result += (__force u32)sum; in csum_partial() 88 return (__force __wsum)result; in csum_partial() 99 return (__force __sum16)~do_csum(buff,len); in ip_compute_csum()
|
/linux-5.19.10/arch/alpha/include/asm/ |
D | io_trivial.h | 12 return __kernel_ldbu(*(const volatile u8 __force *)a); in IO_CONCAT() 18 return __kernel_ldwu(*(const volatile u16 __force *)a); in IO_CONCAT() 24 __kernel_stb(b, *(volatile u8 __force *)a); in IO_CONCAT() 30 __kernel_stw(b, *(volatile u16 __force *)a); in IO_CONCAT() 38 return *(const volatile u32 __force *)a; in IO_CONCAT() 44 *(volatile u32 __force *)a = b; in IO_CONCAT() 52 return __kernel_ldbu(*(const volatile u8 __force *)a); in IO_CONCAT() 58 return __kernel_ldwu(*(const volatile u16 __force *)a); in IO_CONCAT() 64 __kernel_stb(b, *(volatile u8 __force *)a); in IO_CONCAT() 70 __kernel_stw(b, *(volatile u16 __force *)a); in IO_CONCAT() [all …]
|
/linux-5.19.10/fs/ufs/ |
D | swab.h | 30 return le64_to_cpu((__force __le64)n); in fs64_to_cpu() 32 return be64_to_cpu((__force __be64)n); in fs64_to_cpu() 39 return (__force __fs64)cpu_to_le64(n); in cpu_to_fs64() 41 return (__force __fs64)cpu_to_be64(n); in cpu_to_fs64() 48 return le32_to_cpu((__force __le32)n); in fs32_to_cpu() 50 return be32_to_cpu((__force __be32)n); in fs32_to_cpu() 57 return (__force __fs32)cpu_to_le32(n); in cpu_to_fs32() 59 return (__force __fs32)cpu_to_be32(n); in cpu_to_fs32() 84 return le16_to_cpu((__force __le16)n); in fs16_to_cpu() 86 return be16_to_cpu((__force __be16)n); in fs16_to_cpu() [all …]
|
/linux-5.19.10/arch/alpha/lib/ |
D | checksum.c | 48 return (__force __sum16)~from64to16( in csum_tcpudp_magic() 49 (__force u64)saddr + (__force u64)daddr + in csum_tcpudp_magic() 50 (__force u64)sum + ((len + proto) << 8)); in csum_tcpudp_magic() 59 result = (__force u64)saddr + (__force u64)daddr + in csum_tcpudp_nofold() 60 (__force u64)sum + ((len + proto) << 8); in csum_tcpudp_nofold() 68 return (__force __wsum)result; in csum_tcpudp_nofold() 147 return (__force __sum16)~do_csum(iph,ihl*4); in ip_fast_csum() 168 result += (__force u32)sum; in csum_partial() 171 return (__force __wsum)result; in csum_partial() 182 return (__force __sum16)~from64to16(do_csum(buff,len)); in ip_compute_csum()
|
/linux-5.19.10/include/asm-generic/ |
D | uaccess.h | 23 *(u8 *)to = *((u8 __force *)from); in __get_user_fn() 26 *(u16 *)to = get_unaligned((u16 __force *)from); in __get_user_fn() 29 *(u32 *)to = get_unaligned((u32 __force *)from); in __get_user_fn() 32 *(u64 *)to = get_unaligned((u64 __force *)from); in __get_user_fn() 49 *(u8 __force *)to = *(u8 *)from; in __put_user_fn() 52 put_unaligned(*(u16 *)from, (u16 __force *)to); in __put_user_fn() 55 put_unaligned(*(u32 *)from, (u32 __force *)to); in __put_user_fn() 58 put_unaligned(*(u64 *)from, (u64 __force *)to); in __put_user_fn() 84 memcpy(to, (const void __force *)from, n); in raw_copy_from_user() 91 memcpy((void __force *)to, from, n); in raw_copy_to_user() [all …]
|
/linux-5.19.10/drivers/staging/gdm724x/ |
D | gdm_endian.c | 10 return (__force __dev16)cpu_to_le16(x); in gdm_cpu_to_dev16() 12 return (__force __dev16)cpu_to_be16(x); in gdm_cpu_to_dev16() 18 return le16_to_cpu((__force __le16)x); in gdm_dev16_to_cpu() 20 return be16_to_cpu((__force __be16)x); in gdm_dev16_to_cpu() 26 return (__force __dev32)cpu_to_le32(x); in gdm_cpu_to_dev32() 28 return (__force __dev32)cpu_to_be32(x); in gdm_cpu_to_dev32() 34 return le32_to_cpu((__force __le32)x); in gdm_dev32_to_cpu() 36 return be32_to_cpu((__force __be32)x); in gdm_dev32_to_cpu()
|
/linux-5.19.10/fs/qnx6/ |
D | qnx6.h | 81 return le64_to_cpu((__force __le64)n); in fs64_to_cpu() 83 return be64_to_cpu((__force __be64)n); in fs64_to_cpu() 89 return (__force __fs64)cpu_to_le64(n); in cpu_to_fs64() 91 return (__force __fs64)cpu_to_be64(n); in cpu_to_fs64() 97 return le32_to_cpu((__force __le32)n); in fs32_to_cpu() 99 return be32_to_cpu((__force __be32)n); in fs32_to_cpu() 105 return (__force __fs32)cpu_to_le32(n); in cpu_to_fs32() 107 return (__force __fs32)cpu_to_be32(n); in cpu_to_fs32() 113 return le16_to_cpu((__force __le16)n); in fs16_to_cpu() 115 return be16_to_cpu((__force __be16)n); in fs16_to_cpu() [all …]
|