1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _LINUX_RANDOM_H
4 #define _LINUX_RANDOM_H
5 
6 #include <linux/bug.h>
7 #include <linux/kernel.h>
8 #include <linux/list.h>
9 #include <linux/once.h>
10 
11 #include <uapi/linux/random.h>
12 
13 struct notifier_block;
14 
15 void add_device_randomness(const void *buf, size_t len);
16 void __init add_bootloader_randomness(const void *buf, size_t len);
17 void add_input_randomness(unsigned int type, unsigned int code,
18 			  unsigned int value) __latent_entropy;
19 void add_interrupt_randomness(int irq) __latent_entropy;
20 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
21 
22 #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
add_latent_entropy(void)23 static inline void add_latent_entropy(void)
24 {
25 	add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
26 }
27 #else
add_latent_entropy(void)28 static inline void add_latent_entropy(void) { }
29 #endif
30 
31 #if IS_ENABLED(CONFIG_VMGENID)
32 void add_vmfork_randomness(const void *unique_vm_id, size_t len);
33 int register_random_vmfork_notifier(struct notifier_block *nb);
34 int unregister_random_vmfork_notifier(struct notifier_block *nb);
35 #else
register_random_vmfork_notifier(struct notifier_block * nb)36 static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
unregister_random_vmfork_notifier(struct notifier_block * nb)37 static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
38 #endif
39 
40 void get_random_bytes(void *buf, size_t len);
41 u8 get_random_u8(void);
42 u16 get_random_u16(void);
43 u32 get_random_u32(void);
44 u64 get_random_u64(void);
get_random_long(void)45 static inline unsigned long get_random_long(void)
46 {
47 #if BITS_PER_LONG == 64
48 	return get_random_u64();
49 #else
50 	return get_random_u32();
51 #endif
52 }
53 
54 u32 __get_random_u32_below(u32 ceil);
55 
56 /*
57  * Returns a random integer in the interval [0, ceil), with uniform
58  * distribution, suitable for all uses. Fastest when ceil is a constant, but
59  * still fast for variable ceil as well.
60  */
get_random_u32_below(u32 ceil)61 static inline u32 get_random_u32_below(u32 ceil)
62 {
63 	if (!__builtin_constant_p(ceil))
64 		return __get_random_u32_below(ceil);
65 
66 	/*
67 	 * For the fast path, below, all operations on ceil are precomputed by
68 	 * the compiler, so this incurs no overhead for checking pow2, doing
69 	 * divisions, or branching based on integer size. The resultant
70 	 * algorithm does traditional reciprocal multiplication (typically
71 	 * optimized by the compiler into shifts and adds), rejecting samples
72 	 * whose lower half would indicate a range indivisible by ceil.
73 	 */
74 	BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
75 	if (ceil <= 1)
76 		return 0;
77 	for (;;) {
78 		if (ceil <= 1U << 8) {
79 			u32 mult = ceil * get_random_u8();
80 			if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
81 				return mult >> 8;
82 		} else if (ceil <= 1U << 16) {
83 			u32 mult = ceil * get_random_u16();
84 			if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
85 				return mult >> 16;
86 		} else {
87 			u64 mult = (u64)ceil * get_random_u32();
88 			if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
89 				return mult >> 32;
90 		}
91 	}
92 }
93 
94 /*
95  * Returns a random integer in the interval (floor, U32_MAX], with uniform
96  * distribution, suitable for all uses. Fastest when floor is a constant, but
97  * still fast for variable floor as well.
98  */
get_random_u32_above(u32 floor)99 static inline u32 get_random_u32_above(u32 floor)
100 {
101 	BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
102 			 "get_random_u32_above() must take floor < U32_MAX");
103 	return floor + 1 + get_random_u32_below(U32_MAX - floor);
104 }
105 
106 /*
107  * Returns a random integer in the interval [floor, ceil], with uniform
108  * distribution, suitable for all uses. Fastest when floor and ceil are
109  * constant, but still fast for variable floor and ceil as well.
110  */
get_random_u32_inclusive(u32 floor,u32 ceil)111 static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
112 {
113 	BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
114 			 (floor > ceil || ceil - floor == U32_MAX),
115 			 "get_random_u32_inclusive() must take floor <= ceil");
116 	return floor + get_random_u32_below(ceil - floor + 1);
117 }
118 
119 /*
120  * On 64-bit architectures, protect against non-terminated C string overflows
121  * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
122  */
123 #ifdef CONFIG_64BIT
124 # ifdef __LITTLE_ENDIAN
125 #  define CANARY_MASK 0xffffffffffffff00UL
126 # else /* big endian, 64 bits: */
127 #  define CANARY_MASK 0x00ffffffffffffffUL
128 # endif
129 #else /* 32 bits: */
130 # define CANARY_MASK 0xffffffffUL
131 #endif
132 
get_random_canary(void)133 static inline unsigned long get_random_canary(void)
134 {
135 	return get_random_long() & CANARY_MASK;
136 }
137 
138 void __init random_init_early(const char *command_line);
139 void __init random_init(void);
140 bool rng_is_initialized(void);
141 int wait_for_random_bytes(void);
142 
143 /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
144  * Returns the result of the call to wait_for_random_bytes. */
get_random_bytes_wait(void * buf,size_t nbytes)145 static inline int get_random_bytes_wait(void *buf, size_t nbytes)
146 {
147 	int ret = wait_for_random_bytes();
148 	get_random_bytes(buf, nbytes);
149 	return ret;
150 }
151 
152 #define declare_get_random_var_wait(name, ret_type) \
153 	static inline int get_random_ ## name ## _wait(ret_type *out) { \
154 		int ret = wait_for_random_bytes(); \
155 		if (unlikely(ret)) \
156 			return ret; \
157 		*out = get_random_ ## name(); \
158 		return 0; \
159 	}
declare_get_random_var_wait(u8,u8)160 declare_get_random_var_wait(u8, u8)
161 declare_get_random_var_wait(u16, u16)
162 declare_get_random_var_wait(u32, u32)
163 declare_get_random_var_wait(u64, u32)
164 declare_get_random_var_wait(long, unsigned long)
165 #undef declare_get_random_var
166 
167 /*
168  * This is designed to be standalone for just prandom
169  * users, but for now we include it from <linux/random.h>
170  * for legacy reasons.
171  */
172 #include <linux/prandom.h>
173 
174 #include <asm/archrandom.h>
175 
176 /*
177  * Called from the boot CPU during startup; not valid to call once
178  * secondary CPUs are up and preemption is possible.
179  */
180 #ifndef arch_get_random_seed_longs_early
181 static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
182 {
183 	WARN_ON(system_state != SYSTEM_BOOTING);
184 	return arch_get_random_seed_longs(v, max_longs);
185 }
186 #endif
187 
188 #ifndef arch_get_random_longs_early
arch_get_random_longs_early(unsigned long * v,size_t max_longs)189 static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
190 {
191 	WARN_ON(system_state != SYSTEM_BOOTING);
192 	return arch_get_random_longs(v, max_longs);
193 }
194 #endif
195 
196 #ifdef CONFIG_SMP
197 int random_prepare_cpu(unsigned int cpu);
198 int random_online_cpu(unsigned int cpu);
199 #endif
200 
201 #ifndef MODULE
202 extern const struct file_operations random_fops, urandom_fops;
203 #endif
204 
205 #endif /* _LINUX_RANDOM_H */
206