1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6  *
7  * This driver produces cryptographically secure pseudorandom data. It is divided
8  * into roughly six sections, each with a section header:
9  *
10  *   - Initialization and readiness waiting.
11  *   - Fast key erasure RNG, the "crng".
12  *   - Entropy accumulation and extraction routines.
13  *   - Entropy collection routines.
14  *   - Userspace reader/writer interfaces.
15  *   - Sysctl interface.
16  *
17  * The high level overview is that there is one input pool, into which
18  * various pieces of data are hashed. Prior to initialization, some of that
19  * data is then "credited" as having a certain number of bits of entropy.
20  * When enough bits of entropy are available, the hash is finalized and
21  * handed as a key to a stream cipher that expands it indefinitely for
22  * various consumers. This key is periodically refreshed as the various
23  * entropy collectors, described below, add data to the input pool.
24  */
25 
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/blkdev.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/suspend.h>
55 #include <linux/siphash.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62 
63 /*********************************************************************
64  *
65  * Initialization and readiness waiting.
66  *
67  * Much of the RNG infrastructure is devoted to various dependencies
68  * being able to wait until the RNG has collected enough entropy and
69  * is ready for safe consumption.
70  *
71  *********************************************************************/
72 
73 /*
74  * crng_init is protected by base_crng->lock, and only increases
75  * its value (from empty->early->ready).
76  */
77 static enum {
78 	CRNG_EMPTY = 0, /* Little to no entropy collected */
79 	CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
80 	CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init __read_mostly = CRNG_EMPTY;
82 static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
83 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
84 /* Various types of waiters for crng_init->CRNG_READY transition. */
85 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
86 static struct fasync_struct *fasync;
87 
88 /* Control how we warn userspace. */
89 static struct ratelimit_state urandom_warning =
90 	RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
91 static int ratelimit_disable __read_mostly =
92 	IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
93 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
94 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
95 
96 /*
97  * Returns whether or not the input pool has been seeded and thus guaranteed
98  * to supply cryptographically secure random numbers. This applies to: the
99  * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
100  * u16,u32,u64,long} family of functions.
101  *
102  * Returns: true if the input pool has been seeded.
103  *          false if the input pool has not been seeded.
104  */
rng_is_initialized(void)105 bool rng_is_initialized(void)
106 {
107 	return crng_ready();
108 }
109 EXPORT_SYMBOL(rng_is_initialized);
110 
crng_set_ready(struct work_struct * work)111 static void __cold crng_set_ready(struct work_struct *work)
112 {
113 	static_branch_enable(&crng_is_ready);
114 }
115 
116 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
117 static void try_to_generate_entropy(void);
118 
119 /*
120  * Wait for the input pool to be seeded and thus guaranteed to supply
121  * cryptographically secure random numbers. This applies to: the /dev/urandom
122  * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
123  * int,long} family of functions. Using any of these functions without first
124  * calling this function forfeits the guarantee of security.
125  *
126  * Returns: 0 if the input pool has been seeded.
127  *          -ERESTARTSYS if the function was interrupted by a signal.
128  */
wait_for_random_bytes(void)129 int wait_for_random_bytes(void)
130 {
131 	while (!crng_ready()) {
132 		int ret;
133 
134 		try_to_generate_entropy();
135 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
136 		if (ret)
137 			return ret > 0 ? 0 : ret;
138 	}
139 	return 0;
140 }
141 EXPORT_SYMBOL(wait_for_random_bytes);
142 
143 #define warn_unseeded_randomness() \
144 	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
145 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
146 				__func__, (void *)_RET_IP_, crng_init)
147 
148 
149 /*********************************************************************
150  *
151  * Fast key erasure RNG, the "crng".
152  *
153  * These functions expand entropy from the entropy extractor into
154  * long streams for external consumption using the "fast key erasure"
155  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
156  *
157  * There are a few exported interfaces for use by other drivers:
158  *
159  *	void get_random_bytes(void *buf, size_t len)
160  *	u8 get_random_u8()
161  *	u16 get_random_u16()
162  *	u32 get_random_u32()
163  *	u32 get_random_u32_below(u32 ceil)
164  *	u32 get_random_u32_above(u32 floor)
165  *	u32 get_random_u32_inclusive(u32 floor, u32 ceil)
166  *	u64 get_random_u64()
167  *	unsigned long get_random_long()
168  *
169  * These interfaces will return the requested number of random bytes
170  * into the given buffer or as a return value. This is equivalent to
171  * a read from /dev/urandom. The u8, u16, u32, u64, long family of
172  * functions may be higher performance for one-off random integers,
173  * because they do a bit of buffering and do not invoke reseeding
174  * until the buffer is emptied.
175  *
176  *********************************************************************/
177 
178 enum {
179 	CRNG_RESEED_START_INTERVAL = HZ,
180 	CRNG_RESEED_INTERVAL = 60 * HZ
181 };
182 
183 static struct {
184 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
185 	unsigned long birth;
186 	unsigned long generation;
187 	spinlock_t lock;
188 } base_crng = {
189 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
190 };
191 
192 struct crng {
193 	u8 key[CHACHA_KEY_SIZE];
194 	unsigned long generation;
195 	local_lock_t lock;
196 };
197 
198 static DEFINE_PER_CPU(struct crng, crngs) = {
199 	.generation = ULONG_MAX,
200 	.lock = INIT_LOCAL_LOCK(crngs.lock),
201 };
202 
203 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
204 static void extract_entropy(void *buf, size_t len);
205 
206 /* This extracts a new crng key from the input pool. */
crng_reseed(void)207 static void crng_reseed(void)
208 {
209 	unsigned long flags;
210 	unsigned long next_gen;
211 	u8 key[CHACHA_KEY_SIZE];
212 
213 	extract_entropy(key, sizeof(key));
214 
215 	/*
216 	 * We copy the new key into the base_crng, overwriting the old one,
217 	 * and update the generation counter. We avoid hitting ULONG_MAX,
218 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
219 	 * forces new CPUs that come online to always initialize.
220 	 */
221 	spin_lock_irqsave(&base_crng.lock, flags);
222 	memcpy(base_crng.key, key, sizeof(base_crng.key));
223 	next_gen = base_crng.generation + 1;
224 	if (next_gen == ULONG_MAX)
225 		++next_gen;
226 	WRITE_ONCE(base_crng.generation, next_gen);
227 	WRITE_ONCE(base_crng.birth, jiffies);
228 	if (!static_branch_likely(&crng_is_ready))
229 		crng_init = CRNG_READY;
230 	spin_unlock_irqrestore(&base_crng.lock, flags);
231 	memzero_explicit(key, sizeof(key));
232 }
233 
234 /*
235  * This generates a ChaCha block using the provided key, and then
236  * immediately overwrites that key with half the block. It returns
237  * the resultant ChaCha state to the user, along with the second
238  * half of the block containing 32 bytes of random data that may
239  * be used; random_data_len may not be greater than 32.
240  *
241  * The returned ChaCha state contains within it a copy of the old
242  * key value, at index 4, so the state should always be zeroed out
243  * immediately after using in order to maintain forward secrecy.
244  * If the state cannot be erased in a timely manner, then it is
245  * safer to set the random_data parameter to &chacha_state[4] so
246  * that this function overwrites it before returning.
247  */
crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)248 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
249 				  u32 chacha_state[CHACHA_STATE_WORDS],
250 				  u8 *random_data, size_t random_data_len)
251 {
252 	u8 first_block[CHACHA_BLOCK_SIZE];
253 
254 	BUG_ON(random_data_len > 32);
255 
256 	chacha_init_consts(chacha_state);
257 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
258 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
259 	chacha20_block(chacha_state, first_block);
260 
261 	memcpy(key, first_block, CHACHA_KEY_SIZE);
262 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
263 	memzero_explicit(first_block, sizeof(first_block));
264 }
265 
266 /*
267  * Return the interval until the next reseeding, which is normally
268  * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
269  * proportional to the uptime.
270  */
crng_reseed_interval(void)271 static unsigned int crng_reseed_interval(void)
272 {
273 	static bool early_boot = true;
274 
275 	if (unlikely(READ_ONCE(early_boot))) {
276 		time64_t uptime = ktime_get_seconds();
277 		if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
278 			WRITE_ONCE(early_boot, false);
279 		else
280 			return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
281 				     (unsigned int)uptime / 2 * HZ);
282 	}
283 	return CRNG_RESEED_INTERVAL;
284 }
285 
286 /*
287  * This function returns a ChaCha state that you may use for generating
288  * random data. It also returns up to 32 bytes on its own of random data
289  * that may be used; random_data_len may not be greater than 32.
290  */
crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)291 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
292 			    u8 *random_data, size_t random_data_len)
293 {
294 	unsigned long flags;
295 	struct crng *crng;
296 
297 	BUG_ON(random_data_len > 32);
298 
299 	/*
300 	 * For the fast path, we check whether we're ready, unlocked first, and
301 	 * then re-check once locked later. In the case where we're really not
302 	 * ready, we do fast key erasure with the base_crng directly, extracting
303 	 * when crng_init is CRNG_EMPTY.
304 	 */
305 	if (!crng_ready()) {
306 		bool ready;
307 
308 		spin_lock_irqsave(&base_crng.lock, flags);
309 		ready = crng_ready();
310 		if (!ready) {
311 			if (crng_init == CRNG_EMPTY)
312 				extract_entropy(base_crng.key, sizeof(base_crng.key));
313 			crng_fast_key_erasure(base_crng.key, chacha_state,
314 					      random_data, random_data_len);
315 		}
316 		spin_unlock_irqrestore(&base_crng.lock, flags);
317 		if (!ready)
318 			return;
319 	}
320 
321 	/*
322 	 * If the base_crng is old enough, we reseed, which in turn bumps the
323 	 * generation counter that we check below.
324 	 */
325 	if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
326 		crng_reseed();
327 
328 	local_lock_irqsave(&crngs.lock, flags);
329 	crng = raw_cpu_ptr(&crngs);
330 
331 	/*
332 	 * If our per-cpu crng is older than the base_crng, then it means
333 	 * somebody reseeded the base_crng. In that case, we do fast key
334 	 * erasure on the base_crng, and use its output as the new key
335 	 * for our per-cpu crng. This brings us up to date with base_crng.
336 	 */
337 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
338 		spin_lock(&base_crng.lock);
339 		crng_fast_key_erasure(base_crng.key, chacha_state,
340 				      crng->key, sizeof(crng->key));
341 		crng->generation = base_crng.generation;
342 		spin_unlock(&base_crng.lock);
343 	}
344 
345 	/*
346 	 * Finally, when we've made it this far, our per-cpu crng has an up
347 	 * to date key, and we can do fast key erasure with it to produce
348 	 * some random data and a ChaCha state for the caller. All other
349 	 * branches of this function are "unlikely", so most of the time we
350 	 * should wind up here immediately.
351 	 */
352 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
353 	local_unlock_irqrestore(&crngs.lock, flags);
354 }
355 
_get_random_bytes(void * buf,size_t len)356 static void _get_random_bytes(void *buf, size_t len)
357 {
358 	u32 chacha_state[CHACHA_STATE_WORDS];
359 	u8 tmp[CHACHA_BLOCK_SIZE];
360 	size_t first_block_len;
361 
362 	if (!len)
363 		return;
364 
365 	first_block_len = min_t(size_t, 32, len);
366 	crng_make_state(chacha_state, buf, first_block_len);
367 	len -= first_block_len;
368 	buf += first_block_len;
369 
370 	while (len) {
371 		if (len < CHACHA_BLOCK_SIZE) {
372 			chacha20_block(chacha_state, tmp);
373 			memcpy(buf, tmp, len);
374 			memzero_explicit(tmp, sizeof(tmp));
375 			break;
376 		}
377 
378 		chacha20_block(chacha_state, buf);
379 		if (unlikely(chacha_state[12] == 0))
380 			++chacha_state[13];
381 		len -= CHACHA_BLOCK_SIZE;
382 		buf += CHACHA_BLOCK_SIZE;
383 	}
384 
385 	memzero_explicit(chacha_state, sizeof(chacha_state));
386 }
387 
388 /*
389  * This function is the exported kernel interface. It returns some number of
390  * good random numbers, suitable for key generation, seeding TCP sequence
391  * numbers, etc. In order to ensure that the randomness returned by this
392  * function is okay, the function wait_for_random_bytes() should be called and
393  * return 0 at least once at any point prior.
394  */
get_random_bytes(void * buf,size_t len)395 void get_random_bytes(void *buf, size_t len)
396 {
397 	warn_unseeded_randomness();
398 	_get_random_bytes(buf, len);
399 }
400 EXPORT_SYMBOL(get_random_bytes);
401 
get_random_bytes_user(struct iov_iter * iter)402 static ssize_t get_random_bytes_user(struct iov_iter *iter)
403 {
404 	u32 chacha_state[CHACHA_STATE_WORDS];
405 	u8 block[CHACHA_BLOCK_SIZE];
406 	size_t ret = 0, copied;
407 
408 	if (unlikely(!iov_iter_count(iter)))
409 		return 0;
410 
411 	/*
412 	 * Immediately overwrite the ChaCha key at index 4 with random
413 	 * bytes, in case userspace causes copy_to_iter() below to sleep
414 	 * forever, so that we still retain forward secrecy in that case.
415 	 */
416 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
417 	/*
418 	 * However, if we're doing a read of len <= 32, we don't need to
419 	 * use chacha_state after, so we can simply return those bytes to
420 	 * the user directly.
421 	 */
422 	if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
423 		ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
424 		goto out_zero_chacha;
425 	}
426 
427 	for (;;) {
428 		chacha20_block(chacha_state, block);
429 		if (unlikely(chacha_state[12] == 0))
430 			++chacha_state[13];
431 
432 		copied = copy_to_iter(block, sizeof(block), iter);
433 		ret += copied;
434 		if (!iov_iter_count(iter) || copied != sizeof(block))
435 			break;
436 
437 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
438 		if (ret % PAGE_SIZE == 0) {
439 			if (signal_pending(current))
440 				break;
441 			cond_resched();
442 		}
443 	}
444 
445 	memzero_explicit(block, sizeof(block));
446 out_zero_chacha:
447 	memzero_explicit(chacha_state, sizeof(chacha_state));
448 	return ret ? ret : -EFAULT;
449 }
450 
451 /*
452  * Batched entropy returns random integers. The quality of the random
453  * number is good as /dev/urandom. In order to ensure that the randomness
454  * provided by this function is okay, the function wait_for_random_bytes()
455  * should be called and return 0 at least once at any point prior.
456  */
457 
458 #define DEFINE_BATCHED_ENTROPY(type)						\
459 struct batch_ ##type {								\
460 	/*									\
461 	 * We make this 1.5x a ChaCha block, so that we get the			\
462 	 * remaining 32 bytes from fast key erasure, plus one full		\
463 	 * block from the detached ChaCha state. We can increase		\
464 	 * the size of this later if needed so long as we keep the		\
465 	 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.		\
466 	 */									\
467 	type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];		\
468 	local_lock_t lock;							\
469 	unsigned long generation;						\
470 	unsigned int position;							\
471 };										\
472 										\
473 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {	\
474 	.lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),			\
475 	.position = UINT_MAX							\
476 };										\
477 										\
478 type get_random_ ##type(void)							\
479 {										\
480 	type ret;								\
481 	unsigned long flags;							\
482 	struct batch_ ##type *batch;						\
483 	unsigned long next_gen;							\
484 										\
485 	warn_unseeded_randomness();						\
486 										\
487 	if  (!crng_ready()) {							\
488 		_get_random_bytes(&ret, sizeof(ret));				\
489 		return ret;							\
490 	}									\
491 										\
492 	local_lock_irqsave(&batched_entropy_ ##type.lock, flags);		\
493 	batch = raw_cpu_ptr(&batched_entropy_##type);				\
494 										\
495 	next_gen = READ_ONCE(base_crng.generation);				\
496 	if (batch->position >= ARRAY_SIZE(batch->entropy) ||			\
497 	    next_gen != batch->generation) {					\
498 		_get_random_bytes(batch->entropy, sizeof(batch->entropy));	\
499 		batch->position = 0;						\
500 		batch->generation = next_gen;					\
501 	}									\
502 										\
503 	ret = batch->entropy[batch->position];					\
504 	batch->entropy[batch->position] = 0;					\
505 	++batch->position;							\
506 	local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);		\
507 	return ret;								\
508 }										\
509 EXPORT_SYMBOL(get_random_ ##type);
510 
511 DEFINE_BATCHED_ENTROPY(u8)
DEFINE_BATCHED_ENTROPY(u16)512 DEFINE_BATCHED_ENTROPY(u16)
513 DEFINE_BATCHED_ENTROPY(u32)
514 DEFINE_BATCHED_ENTROPY(u64)
515 
516 u32 __get_random_u32_below(u32 ceil)
517 {
518 	/*
519 	 * This is the slow path for variable ceil. It is still fast, most of
520 	 * the time, by doing traditional reciprocal multiplication and
521 	 * opportunistically comparing the lower half to ceil itself, before
522 	 * falling back to computing a larger bound, and then rejecting samples
523 	 * whose lower half would indicate a range indivisible by ceil. The use
524 	 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
525 	 * in 32-bits.
526 	 */
527 	u32 rand = get_random_u32();
528 	u64 mult;
529 
530 	/*
531 	 * This function is technically undefined for ceil == 0, and in fact
532 	 * for the non-underscored constant version in the header, we build bug
533 	 * on that. But for the non-constant case, it's convenient to have that
534 	 * evaluate to being a straight call to get_random_u32(), so that
535 	 * get_random_u32_inclusive() can work over its whole range without
536 	 * undefined behavior.
537 	 */
538 	if (unlikely(!ceil))
539 		return rand;
540 
541 	mult = (u64)ceil * rand;
542 	if (unlikely((u32)mult < ceil)) {
543 		u32 bound = -ceil % ceil;
544 		while (unlikely((u32)mult < bound))
545 			mult = (u64)ceil * get_random_u32();
546 	}
547 	return mult >> 32;
548 }
549 EXPORT_SYMBOL(__get_random_u32_below);
550 
551 #ifdef CONFIG_SMP
552 /*
553  * This function is called when the CPU is coming up, with entry
554  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
555  */
random_prepare_cpu(unsigned int cpu)556 int __cold random_prepare_cpu(unsigned int cpu)
557 {
558 	/*
559 	 * When the cpu comes back online, immediately invalidate both
560 	 * the per-cpu crng and all batches, so that we serve fresh
561 	 * randomness.
562 	 */
563 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
564 	per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
565 	per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
566 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
567 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
568 	return 0;
569 }
570 #endif
571 
572 
573 /**********************************************************************
574  *
575  * Entropy accumulation and extraction routines.
576  *
577  * Callers may add entropy via:
578  *
579  *     static void mix_pool_bytes(const void *buf, size_t len)
580  *
581  * After which, if added entropy should be credited:
582  *
583  *     static void credit_init_bits(size_t bits)
584  *
585  * Finally, extract entropy via:
586  *
587  *     static void extract_entropy(void *buf, size_t len)
588  *
589  **********************************************************************/
590 
591 enum {
592 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
593 	POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
594 	POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
595 };
596 
597 static struct {
598 	struct blake2s_state hash;
599 	spinlock_t lock;
600 	unsigned int init_bits;
601 } input_pool = {
602 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
603 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
604 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
605 	.hash.outlen = BLAKE2S_HASH_SIZE,
606 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
607 };
608 
_mix_pool_bytes(const void * buf,size_t len)609 static void _mix_pool_bytes(const void *buf, size_t len)
610 {
611 	blake2s_update(&input_pool.hash, buf, len);
612 }
613 
614 /*
615  * This function adds bytes into the input pool. It does not
616  * update the initialization bit counter; the caller should call
617  * credit_init_bits if this is appropriate.
618  */
mix_pool_bytes(const void * buf,size_t len)619 static void mix_pool_bytes(const void *buf, size_t len)
620 {
621 	unsigned long flags;
622 
623 	spin_lock_irqsave(&input_pool.lock, flags);
624 	_mix_pool_bytes(buf, len);
625 	spin_unlock_irqrestore(&input_pool.lock, flags);
626 }
627 
628 /*
629  * This is an HKDF-like construction for using the hashed collected entropy
630  * as a PRF key, that's then expanded block-by-block.
631  */
extract_entropy(void * buf,size_t len)632 static void extract_entropy(void *buf, size_t len)
633 {
634 	unsigned long flags;
635 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
636 	struct {
637 		unsigned long rdseed[32 / sizeof(long)];
638 		size_t counter;
639 	} block;
640 	size_t i, longs;
641 
642 	for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
643 		longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
644 		if (longs) {
645 			i += longs;
646 			continue;
647 		}
648 		longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
649 		if (longs) {
650 			i += longs;
651 			continue;
652 		}
653 		block.rdseed[i++] = random_get_entropy();
654 	}
655 
656 	spin_lock_irqsave(&input_pool.lock, flags);
657 
658 	/* seed = HASHPRF(last_key, entropy_input) */
659 	blake2s_final(&input_pool.hash, seed);
660 
661 	/* next_key = HASHPRF(seed, RDSEED || 0) */
662 	block.counter = 0;
663 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
664 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
665 
666 	spin_unlock_irqrestore(&input_pool.lock, flags);
667 	memzero_explicit(next_key, sizeof(next_key));
668 
669 	while (len) {
670 		i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
671 		/* output = HASHPRF(seed, RDSEED || ++counter) */
672 		++block.counter;
673 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
674 		len -= i;
675 		buf += i;
676 	}
677 
678 	memzero_explicit(seed, sizeof(seed));
679 	memzero_explicit(&block, sizeof(block));
680 }
681 
682 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
683 
_credit_init_bits(size_t bits)684 static void __cold _credit_init_bits(size_t bits)
685 {
686 	static struct execute_work set_ready;
687 	unsigned int new, orig, add;
688 	unsigned long flags;
689 
690 	if (!bits)
691 		return;
692 
693 	add = min_t(size_t, bits, POOL_BITS);
694 
695 	orig = READ_ONCE(input_pool.init_bits);
696 	do {
697 		new = min_t(unsigned int, POOL_BITS, orig + add);
698 	} while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
699 
700 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
701 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
702 		if (static_key_initialized)
703 			execute_in_process_context(crng_set_ready, &set_ready);
704 		wake_up_interruptible(&crng_init_wait);
705 		kill_fasync(&fasync, SIGIO, POLL_IN);
706 		pr_notice("crng init done\n");
707 		if (urandom_warning.missed)
708 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
709 				  urandom_warning.missed);
710 	} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
711 		spin_lock_irqsave(&base_crng.lock, flags);
712 		/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
713 		if (crng_init == CRNG_EMPTY) {
714 			extract_entropy(base_crng.key, sizeof(base_crng.key));
715 			crng_init = CRNG_EARLY;
716 		}
717 		spin_unlock_irqrestore(&base_crng.lock, flags);
718 	}
719 }
720 
721 
722 /**********************************************************************
723  *
724  * Entropy collection routines.
725  *
726  * The following exported functions are used for pushing entropy into
727  * the above entropy accumulation routines:
728  *
729  *	void add_device_randomness(const void *buf, size_t len);
730  *	void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
731  *	void add_bootloader_randomness(const void *buf, size_t len);
732  *	void add_vmfork_randomness(const void *unique_vm_id, size_t len);
733  *	void add_interrupt_randomness(int irq);
734  *	void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
735  *	void add_disk_randomness(struct gendisk *disk);
736  *
737  * add_device_randomness() adds data to the input pool that
738  * is likely to differ between two devices (or possibly even per boot).
739  * This would be things like MAC addresses or serial numbers, or the
740  * read-out of the RTC. This does *not* credit any actual entropy to
741  * the pool, but it initializes the pool to different values for devices
742  * that might otherwise be identical and have very little entropy
743  * available to them (particularly common in the embedded world).
744  *
745  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
746  * entropy as specified by the caller. If the entropy pool is full it will
747  * block until more entropy is needed.
748  *
749  * add_bootloader_randomness() is called by bootloader drivers, such as EFI
750  * and device tree, and credits its input depending on whether or not the
751  * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
752  *
753  * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
754  * representing the current instance of a VM to the pool, without crediting,
755  * and then force-reseeds the crng so that it takes effect immediately.
756  *
757  * add_interrupt_randomness() uses the interrupt timing as random
758  * inputs to the entropy pool. Using the cycle counters and the irq source
759  * as inputs, it feeds the input pool roughly once a second or after 64
760  * interrupts, crediting 1 bit of entropy for whichever comes first.
761  *
762  * add_input_randomness() uses the input layer interrupt timing, as well
763  * as the event type information from the hardware.
764  *
765  * add_disk_randomness() uses what amounts to the seek time of block
766  * layer request events, on a per-disk_devt basis, as input to the
767  * entropy pool. Note that high-speed solid state drives with very low
768  * seek times do not make for good sources of entropy, as their seek
769  * times are usually fairly consistent.
770  *
771  * The last two routines try to estimate how many bits of entropy
772  * to credit. They do this by keeping track of the first and second
773  * order deltas of the event timings.
774  *
775  **********************************************************************/
776 
777 static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
778 static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
parse_trust_cpu(char * arg)779 static int __init parse_trust_cpu(char *arg)
780 {
781 	return kstrtobool(arg, &trust_cpu);
782 }
parse_trust_bootloader(char * arg)783 static int __init parse_trust_bootloader(char *arg)
784 {
785 	return kstrtobool(arg, &trust_bootloader);
786 }
787 early_param("random.trust_cpu", parse_trust_cpu);
788 early_param("random.trust_bootloader", parse_trust_bootloader);
789 
random_pm_notification(struct notifier_block * nb,unsigned long action,void * data)790 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
791 {
792 	unsigned long flags, entropy = random_get_entropy();
793 
794 	/*
795 	 * Encode a representation of how long the system has been suspended,
796 	 * in a way that is distinct from prior system suspends.
797 	 */
798 	ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
799 
800 	spin_lock_irqsave(&input_pool.lock, flags);
801 	_mix_pool_bytes(&action, sizeof(action));
802 	_mix_pool_bytes(stamps, sizeof(stamps));
803 	_mix_pool_bytes(&entropy, sizeof(entropy));
804 	spin_unlock_irqrestore(&input_pool.lock, flags);
805 
806 	if (crng_ready() && (action == PM_RESTORE_PREPARE ||
807 	    (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
808 	     !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
809 		crng_reseed();
810 		pr_notice("crng reseeded on system resumption\n");
811 	}
812 	return 0;
813 }
814 
815 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
816 
817 /*
818  * This is called extremely early, before time keeping functionality is
819  * available, but arch randomness is. Interrupts are not yet enabled.
820  */
random_init_early(const char * command_line)821 void __init random_init_early(const char *command_line)
822 {
823 	unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
824 	size_t i, longs, arch_bits;
825 
826 #if defined(LATENT_ENTROPY_PLUGIN)
827 	static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
828 	_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
829 #endif
830 
831 	for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
832 		longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
833 		if (longs) {
834 			_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
835 			i += longs;
836 			continue;
837 		}
838 		longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
839 		if (longs) {
840 			_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
841 			i += longs;
842 			continue;
843 		}
844 		arch_bits -= sizeof(*entropy) * 8;
845 		++i;
846 	}
847 
848 	_mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
849 	_mix_pool_bytes(command_line, strlen(command_line));
850 
851 	/* Reseed if already seeded by earlier phases. */
852 	if (crng_ready())
853 		crng_reseed();
854 	else if (trust_cpu)
855 		_credit_init_bits(arch_bits);
856 }
857 
858 /*
859  * This is called a little bit after the prior function, and now there is
860  * access to timestamps counters. Interrupts are not yet enabled.
861  */
random_init(void)862 void __init random_init(void)
863 {
864 	unsigned long entropy = random_get_entropy();
865 	ktime_t now = ktime_get_real();
866 
867 	_mix_pool_bytes(&now, sizeof(now));
868 	_mix_pool_bytes(&entropy, sizeof(entropy));
869 	add_latent_entropy();
870 
871 	/*
872 	 * If we were initialized by the cpu or bootloader before jump labels
873 	 * are initialized, then we should enable the static branch here, where
874 	 * it's guaranteed that jump labels have been initialized.
875 	 */
876 	if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
877 		crng_set_ready(NULL);
878 
879 	/* Reseed if already seeded by earlier phases. */
880 	if (crng_ready())
881 		crng_reseed();
882 
883 	WARN_ON(register_pm_notifier(&pm_notifier));
884 
885 	WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
886 		       "entropy collection will consequently suffer.");
887 }
888 
889 /*
890  * Add device- or boot-specific data to the input pool to help
891  * initialize it.
892  *
893  * None of this adds any entropy; it is meant to avoid the problem of
894  * the entropy pool having similar initial state across largely
895  * identical devices.
896  */
add_device_randomness(const void * buf,size_t len)897 void add_device_randomness(const void *buf, size_t len)
898 {
899 	unsigned long entropy = random_get_entropy();
900 	unsigned long flags;
901 
902 	spin_lock_irqsave(&input_pool.lock, flags);
903 	_mix_pool_bytes(&entropy, sizeof(entropy));
904 	_mix_pool_bytes(buf, len);
905 	spin_unlock_irqrestore(&input_pool.lock, flags);
906 }
907 EXPORT_SYMBOL(add_device_randomness);
908 
909 /*
910  * Interface for in-kernel drivers of true hardware RNGs.
911  * Those devices may produce endless random bits and will be throttled
912  * when our pool is full.
913  */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy)914 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
915 {
916 	mix_pool_bytes(buf, len);
917 	credit_init_bits(entropy);
918 
919 	/*
920 	 * Throttle writing to once every reseed interval, unless we're not yet
921 	 * initialized or no entropy is credited.
922 	 */
923 	if (!kthread_should_stop() && (crng_ready() || !entropy))
924 		schedule_timeout_interruptible(crng_reseed_interval());
925 }
926 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
927 
928 /*
929  * Handle random seed passed by bootloader, and credit it if
930  * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
931  */
add_bootloader_randomness(const void * buf,size_t len)932 void __init add_bootloader_randomness(const void *buf, size_t len)
933 {
934 	mix_pool_bytes(buf, len);
935 	if (trust_bootloader)
936 		credit_init_bits(len * 8);
937 }
938 
939 #if IS_ENABLED(CONFIG_VMGENID)
940 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
941 
942 /*
943  * Handle a new unique VM ID, which is unique, not secret, so we
944  * don't credit it, but we do immediately force a reseed after so
945  * that it's used by the crng posthaste.
946  */
add_vmfork_randomness(const void * unique_vm_id,size_t len)947 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
948 {
949 	add_device_randomness(unique_vm_id, len);
950 	if (crng_ready()) {
951 		crng_reseed();
952 		pr_notice("crng reseeded due to virtual machine fork\n");
953 	}
954 	blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
955 }
956 #if IS_MODULE(CONFIG_VMGENID)
957 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
958 #endif
959 
register_random_vmfork_notifier(struct notifier_block * nb)960 int __cold register_random_vmfork_notifier(struct notifier_block *nb)
961 {
962 	return blocking_notifier_chain_register(&vmfork_chain, nb);
963 }
964 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
965 
unregister_random_vmfork_notifier(struct notifier_block * nb)966 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
967 {
968 	return blocking_notifier_chain_unregister(&vmfork_chain, nb);
969 }
970 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
971 #endif
972 
973 struct fast_pool {
974 	unsigned long pool[4];
975 	unsigned long last;
976 	unsigned int count;
977 	struct timer_list mix;
978 };
979 
980 static void mix_interrupt_randomness(struct timer_list *work);
981 
982 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
983 #ifdef CONFIG_64BIT
984 #define FASTMIX_PERM SIPHASH_PERMUTATION
985 	.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
986 #else
987 #define FASTMIX_PERM HSIPHASH_PERMUTATION
988 	.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
989 #endif
990 	.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
991 };
992 
993 /*
994  * This is [Half]SipHash-1-x, starting from an empty key. Because
995  * the key is fixed, it assumes that its inputs are non-malicious,
996  * and therefore this has no security on its own. s represents the
997  * four-word SipHash state, while v represents a two-word input.
998  */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)999 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
1000 {
1001 	s[3] ^= v1;
1002 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1003 	s[0] ^= v1;
1004 	s[3] ^= v2;
1005 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1006 	s[0] ^= v2;
1007 }
1008 
1009 #ifdef CONFIG_SMP
1010 /*
1011  * This function is called when the CPU has just come online, with
1012  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1013  */
random_online_cpu(unsigned int cpu)1014 int __cold random_online_cpu(unsigned int cpu)
1015 {
1016 	/*
1017 	 * During CPU shutdown and before CPU onlining, add_interrupt_
1018 	 * randomness() may schedule mix_interrupt_randomness(), and
1019 	 * set the MIX_INFLIGHT flag. However, because the worker can
1020 	 * be scheduled on a different CPU during this period, that
1021 	 * flag will never be cleared. For that reason, we zero out
1022 	 * the flag here, which runs just after workqueues are onlined
1023 	 * for the CPU again. This also has the effect of setting the
1024 	 * irq randomness count to zero so that new accumulated irqs
1025 	 * are fresh.
1026 	 */
1027 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1028 	return 0;
1029 }
1030 #endif
1031 
mix_interrupt_randomness(struct timer_list * work)1032 static void mix_interrupt_randomness(struct timer_list *work)
1033 {
1034 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1035 	/*
1036 	 * The size of the copied stack pool is explicitly 2 longs so that we
1037 	 * only ever ingest half of the siphash output each time, retaining
1038 	 * the other half as the next "key" that carries over. The entropy is
1039 	 * supposed to be sufficiently dispersed between bits so on average
1040 	 * we don't wind up "losing" some.
1041 	 */
1042 	unsigned long pool[2];
1043 	unsigned int count;
1044 
1045 	/* Check to see if we're running on the wrong CPU due to hotplug. */
1046 	local_irq_disable();
1047 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1048 		local_irq_enable();
1049 		return;
1050 	}
1051 
1052 	/*
1053 	 * Copy the pool to the stack so that the mixer always has a
1054 	 * consistent view, before we reenable irqs again.
1055 	 */
1056 	memcpy(pool, fast_pool->pool, sizeof(pool));
1057 	count = fast_pool->count;
1058 	fast_pool->count = 0;
1059 	fast_pool->last = jiffies;
1060 	local_irq_enable();
1061 
1062 	mix_pool_bytes(pool, sizeof(pool));
1063 	credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
1064 
1065 	memzero_explicit(pool, sizeof(pool));
1066 }
1067 
add_interrupt_randomness(int irq)1068 void add_interrupt_randomness(int irq)
1069 {
1070 	enum { MIX_INFLIGHT = 1U << 31 };
1071 	unsigned long entropy = random_get_entropy();
1072 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1073 	struct pt_regs *regs = get_irq_regs();
1074 	unsigned int new_count;
1075 
1076 	fast_mix(fast_pool->pool, entropy,
1077 		 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1078 	new_count = ++fast_pool->count;
1079 
1080 	if (new_count & MIX_INFLIGHT)
1081 		return;
1082 
1083 	if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1084 		return;
1085 
1086 	fast_pool->count |= MIX_INFLIGHT;
1087 	if (!timer_pending(&fast_pool->mix)) {
1088 		fast_pool->mix.expires = jiffies;
1089 		add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1090 	}
1091 }
1092 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1093 
1094 /* There is one of these per entropy source */
1095 struct timer_rand_state {
1096 	unsigned long last_time;
1097 	long last_delta, last_delta2;
1098 };
1099 
1100 /*
1101  * This function adds entropy to the entropy "pool" by using timing
1102  * delays. It uses the timer_rand_state structure to make an estimate
1103  * of how many bits of entropy this call has added to the pool. The
1104  * value "num" is also added to the pool; it should somehow describe
1105  * the type of event that just happened.
1106  */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1107 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1108 {
1109 	unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1110 	long delta, delta2, delta3;
1111 	unsigned int bits;
1112 
1113 	/*
1114 	 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1115 	 * sometime after, so mix into the fast pool.
1116 	 */
1117 	if (in_hardirq()) {
1118 		fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1119 	} else {
1120 		spin_lock_irqsave(&input_pool.lock, flags);
1121 		_mix_pool_bytes(&entropy, sizeof(entropy));
1122 		_mix_pool_bytes(&num, sizeof(num));
1123 		spin_unlock_irqrestore(&input_pool.lock, flags);
1124 	}
1125 
1126 	if (crng_ready())
1127 		return;
1128 
1129 	/*
1130 	 * Calculate number of bits of randomness we probably added.
1131 	 * We take into account the first, second and third-order deltas
1132 	 * in order to make our estimate.
1133 	 */
1134 	delta = now - READ_ONCE(state->last_time);
1135 	WRITE_ONCE(state->last_time, now);
1136 
1137 	delta2 = delta - READ_ONCE(state->last_delta);
1138 	WRITE_ONCE(state->last_delta, delta);
1139 
1140 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1141 	WRITE_ONCE(state->last_delta2, delta2);
1142 
1143 	if (delta < 0)
1144 		delta = -delta;
1145 	if (delta2 < 0)
1146 		delta2 = -delta2;
1147 	if (delta3 < 0)
1148 		delta3 = -delta3;
1149 	if (delta > delta2)
1150 		delta = delta2;
1151 	if (delta > delta3)
1152 		delta = delta3;
1153 
1154 	/*
1155 	 * delta is now minimum absolute delta. Round down by 1 bit
1156 	 * on general principles, and limit entropy estimate to 11 bits.
1157 	 */
1158 	bits = min(fls(delta >> 1), 11);
1159 
1160 	/*
1161 	 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1162 	 * will run after this, which uses a different crediting scheme of 1 bit
1163 	 * per every 64 interrupts. In order to let that function do accounting
1164 	 * close to the one in this function, we credit a full 64/64 bit per bit,
1165 	 * and then subtract one to account for the extra one added.
1166 	 */
1167 	if (in_hardirq())
1168 		this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1169 	else
1170 		_credit_init_bits(bits);
1171 }
1172 
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1173 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1174 {
1175 	static unsigned char last_value;
1176 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1177 
1178 	/* Ignore autorepeat and the like. */
1179 	if (value == last_value)
1180 		return;
1181 
1182 	last_value = value;
1183 	add_timer_randomness(&input_timer_state,
1184 			     (type << 4) ^ code ^ (code >> 4) ^ value);
1185 }
1186 EXPORT_SYMBOL_GPL(add_input_randomness);
1187 
1188 #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1189 void add_disk_randomness(struct gendisk *disk)
1190 {
1191 	if (!disk || !disk->random)
1192 		return;
1193 	/* First major is 1, so we get >= 0x200 here. */
1194 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1195 }
1196 EXPORT_SYMBOL_GPL(add_disk_randomness);
1197 
rand_initialize_disk(struct gendisk * disk)1198 void __cold rand_initialize_disk(struct gendisk *disk)
1199 {
1200 	struct timer_rand_state *state;
1201 
1202 	/*
1203 	 * If kzalloc returns null, we just won't use that entropy
1204 	 * source.
1205 	 */
1206 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1207 	if (state) {
1208 		state->last_time = INITIAL_JIFFIES;
1209 		disk->random = state;
1210 	}
1211 }
1212 #endif
1213 
1214 struct entropy_timer_state {
1215 	unsigned long entropy;
1216 	struct timer_list timer;
1217 	unsigned int samples, samples_per_bit;
1218 };
1219 
1220 /*
1221  * Each time the timer fires, we expect that we got an unpredictable
1222  * jump in the cycle counter. Even if the timer is running on another
1223  * CPU, the timer activity will be touching the stack of the CPU that is
1224  * generating entropy..
1225  *
1226  * Note that we don't re-arm the timer in the timer itself - we are
1227  * happy to be scheduled away, since that just makes the load more
1228  * complex, but we do not want the timer to keep ticking unless the
1229  * entropy loop is running.
1230  *
1231  * So the re-arming always happens in the entropy loop itself.
1232  */
entropy_timer(struct timer_list * timer)1233 static void __cold entropy_timer(struct timer_list *timer)
1234 {
1235 	struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1236 
1237 	if (++state->samples == state->samples_per_bit) {
1238 		credit_init_bits(1);
1239 		state->samples = 0;
1240 	}
1241 }
1242 
1243 /*
1244  * If we have an actual cycle counter, see if we can
1245  * generate enough entropy with timing noise
1246  */
try_to_generate_entropy(void)1247 static void __cold try_to_generate_entropy(void)
1248 {
1249 	enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
1250 	struct entropy_timer_state stack;
1251 	unsigned int i, num_different = 0;
1252 	unsigned long last = random_get_entropy();
1253 
1254 	for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1255 		stack.entropy = random_get_entropy();
1256 		if (stack.entropy != last)
1257 			++num_different;
1258 		last = stack.entropy;
1259 	}
1260 	stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1261 	if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
1262 		return;
1263 
1264 	stack.samples = 0;
1265 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1266 	while (!crng_ready() && !signal_pending(current)) {
1267 		if (!timer_pending(&stack.timer))
1268 			mod_timer(&stack.timer, jiffies);
1269 		mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1270 		schedule();
1271 		stack.entropy = random_get_entropy();
1272 	}
1273 
1274 	del_timer_sync(&stack.timer);
1275 	destroy_timer_on_stack(&stack.timer);
1276 	mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1277 }
1278 
1279 
1280 /**********************************************************************
1281  *
1282  * Userspace reader/writer interfaces.
1283  *
1284  * getrandom(2) is the primary modern interface into the RNG and should
1285  * be used in preference to anything else.
1286  *
1287  * Reading from /dev/random has the same functionality as calling
1288  * getrandom(2) with flags=0. In earlier versions, however, it had
1289  * vastly different semantics and should therefore be avoided, to
1290  * prevent backwards compatibility issues.
1291  *
1292  * Reading from /dev/urandom has the same functionality as calling
1293  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1294  * waiting for the RNG to be ready, it should not be used.
1295  *
1296  * Writing to either /dev/random or /dev/urandom adds entropy to
1297  * the input pool but does not credit it.
1298  *
1299  * Polling on /dev/random indicates when the RNG is initialized, on
1300  * the read side, and when it wants new entropy, on the write side.
1301  *
1302  * Both /dev/random and /dev/urandom have the same set of ioctls for
1303  * adding entropy, getting the entropy count, zeroing the count, and
1304  * reseeding the crng.
1305  *
1306  **********************************************************************/
1307 
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1308 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1309 {
1310 	struct iov_iter iter;
1311 	struct iovec iov;
1312 	int ret;
1313 
1314 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1315 		return -EINVAL;
1316 
1317 	/*
1318 	 * Requesting insecure and blocking randomness at the same time makes
1319 	 * no sense.
1320 	 */
1321 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1322 		return -EINVAL;
1323 
1324 	if (!crng_ready() && !(flags & GRND_INSECURE)) {
1325 		if (flags & GRND_NONBLOCK)
1326 			return -EAGAIN;
1327 		ret = wait_for_random_bytes();
1328 		if (unlikely(ret))
1329 			return ret;
1330 	}
1331 
1332 	ret = import_single_range(READ, ubuf, len, &iov, &iter);
1333 	if (unlikely(ret))
1334 		return ret;
1335 	return get_random_bytes_user(&iter);
1336 }
1337 
random_poll(struct file * file,poll_table * wait)1338 static __poll_t random_poll(struct file *file, poll_table *wait)
1339 {
1340 	poll_wait(file, &crng_init_wait, wait);
1341 	return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1342 }
1343 
write_pool_user(struct iov_iter * iter)1344 static ssize_t write_pool_user(struct iov_iter *iter)
1345 {
1346 	u8 block[BLAKE2S_BLOCK_SIZE];
1347 	ssize_t ret = 0;
1348 	size_t copied;
1349 
1350 	if (unlikely(!iov_iter_count(iter)))
1351 		return 0;
1352 
1353 	for (;;) {
1354 		copied = copy_from_iter(block, sizeof(block), iter);
1355 		ret += copied;
1356 		mix_pool_bytes(block, copied);
1357 		if (!iov_iter_count(iter) || copied != sizeof(block))
1358 			break;
1359 
1360 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1361 		if (ret % PAGE_SIZE == 0) {
1362 			if (signal_pending(current))
1363 				break;
1364 			cond_resched();
1365 		}
1366 	}
1367 
1368 	memzero_explicit(block, sizeof(block));
1369 	return ret ? ret : -EFAULT;
1370 }
1371 
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1372 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1373 {
1374 	return write_pool_user(iter);
1375 }
1376 
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1377 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1378 {
1379 	static int maxwarn = 10;
1380 
1381 	/*
1382 	 * Opportunistically attempt to initialize the RNG on platforms that
1383 	 * have fast cycle counters, but don't (for now) require it to succeed.
1384 	 */
1385 	if (!crng_ready())
1386 		try_to_generate_entropy();
1387 
1388 	if (!crng_ready()) {
1389 		if (!ratelimit_disable && maxwarn <= 0)
1390 			++urandom_warning.missed;
1391 		else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1392 			--maxwarn;
1393 			pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1394 				  current->comm, iov_iter_count(iter));
1395 		}
1396 	}
1397 
1398 	return get_random_bytes_user(iter);
1399 }
1400 
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1401 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1402 {
1403 	int ret;
1404 
1405 	if (!crng_ready() &&
1406 	    ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1407 	     (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1408 		return -EAGAIN;
1409 
1410 	ret = wait_for_random_bytes();
1411 	if (ret != 0)
1412 		return ret;
1413 	return get_random_bytes_user(iter);
1414 }
1415 
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1416 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1417 {
1418 	int __user *p = (int __user *)arg;
1419 	int ent_count;
1420 
1421 	switch (cmd) {
1422 	case RNDGETENTCNT:
1423 		/* Inherently racy, no point locking. */
1424 		if (put_user(input_pool.init_bits, p))
1425 			return -EFAULT;
1426 		return 0;
1427 	case RNDADDTOENTCNT:
1428 		if (!capable(CAP_SYS_ADMIN))
1429 			return -EPERM;
1430 		if (get_user(ent_count, p))
1431 			return -EFAULT;
1432 		if (ent_count < 0)
1433 			return -EINVAL;
1434 		credit_init_bits(ent_count);
1435 		return 0;
1436 	case RNDADDENTROPY: {
1437 		struct iov_iter iter;
1438 		struct iovec iov;
1439 		ssize_t ret;
1440 		int len;
1441 
1442 		if (!capable(CAP_SYS_ADMIN))
1443 			return -EPERM;
1444 		if (get_user(ent_count, p++))
1445 			return -EFAULT;
1446 		if (ent_count < 0)
1447 			return -EINVAL;
1448 		if (get_user(len, p++))
1449 			return -EFAULT;
1450 		ret = import_single_range(WRITE, p, len, &iov, &iter);
1451 		if (unlikely(ret))
1452 			return ret;
1453 		ret = write_pool_user(&iter);
1454 		if (unlikely(ret < 0))
1455 			return ret;
1456 		/* Since we're crediting, enforce that it was all written into the pool. */
1457 		if (unlikely(ret != len))
1458 			return -EFAULT;
1459 		credit_init_bits(ent_count);
1460 		return 0;
1461 	}
1462 	case RNDZAPENTCNT:
1463 	case RNDCLEARPOOL:
1464 		/* No longer has any effect. */
1465 		if (!capable(CAP_SYS_ADMIN))
1466 			return -EPERM;
1467 		return 0;
1468 	case RNDRESEEDCRNG:
1469 		if (!capable(CAP_SYS_ADMIN))
1470 			return -EPERM;
1471 		if (!crng_ready())
1472 			return -ENODATA;
1473 		crng_reseed();
1474 		return 0;
1475 	default:
1476 		return -EINVAL;
1477 	}
1478 }
1479 
random_fasync(int fd,struct file * filp,int on)1480 static int random_fasync(int fd, struct file *filp, int on)
1481 {
1482 	return fasync_helper(fd, filp, on, &fasync);
1483 }
1484 
1485 const struct file_operations random_fops = {
1486 	.read_iter = random_read_iter,
1487 	.write_iter = random_write_iter,
1488 	.poll = random_poll,
1489 	.unlocked_ioctl = random_ioctl,
1490 	.compat_ioctl = compat_ptr_ioctl,
1491 	.fasync = random_fasync,
1492 	.llseek = noop_llseek,
1493 	.splice_read = generic_file_splice_read,
1494 	.splice_write = iter_file_splice_write,
1495 };
1496 
1497 const struct file_operations urandom_fops = {
1498 	.read_iter = urandom_read_iter,
1499 	.write_iter = random_write_iter,
1500 	.unlocked_ioctl = random_ioctl,
1501 	.compat_ioctl = compat_ptr_ioctl,
1502 	.fasync = random_fasync,
1503 	.llseek = noop_llseek,
1504 	.splice_read = generic_file_splice_read,
1505 	.splice_write = iter_file_splice_write,
1506 };
1507 
1508 
1509 /********************************************************************
1510  *
1511  * Sysctl interface.
1512  *
1513  * These are partly unused legacy knobs with dummy values to not break
1514  * userspace and partly still useful things. They are usually accessible
1515  * in /proc/sys/kernel/random/ and are as follows:
1516  *
1517  * - boot_id - a UUID representing the current boot.
1518  *
1519  * - uuid - a random UUID, different each time the file is read.
1520  *
1521  * - poolsize - the number of bits of entropy that the input pool can
1522  *   hold, tied to the POOL_BITS constant.
1523  *
1524  * - entropy_avail - the number of bits of entropy currently in the
1525  *   input pool. Always <= poolsize.
1526  *
1527  * - write_wakeup_threshold - the amount of entropy in the input pool
1528  *   below which write polls to /dev/random will unblock, requesting
1529  *   more entropy, tied to the POOL_READY_BITS constant. It is writable
1530  *   to avoid breaking old userspaces, but writing to it does not
1531  *   change any behavior of the RNG.
1532  *
1533  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1534  *   It is writable to avoid breaking old userspaces, but writing
1535  *   to it does not change any behavior of the RNG.
1536  *
1537  ********************************************************************/
1538 
1539 #ifdef CONFIG_SYSCTL
1540 
1541 #include <linux/sysctl.h>
1542 
1543 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1544 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1545 static int sysctl_poolsize = POOL_BITS;
1546 static u8 sysctl_bootid[UUID_SIZE];
1547 
1548 /*
1549  * This function is used to return both the bootid UUID, and random
1550  * UUID. The difference is in whether table->data is NULL; if it is,
1551  * then a new UUID is generated and returned to the user.
1552  */
proc_do_uuid(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1553 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1554 			size_t *lenp, loff_t *ppos)
1555 {
1556 	u8 tmp_uuid[UUID_SIZE], *uuid;
1557 	char uuid_string[UUID_STRING_LEN + 1];
1558 	struct ctl_table fake_table = {
1559 		.data = uuid_string,
1560 		.maxlen = UUID_STRING_LEN
1561 	};
1562 
1563 	if (write)
1564 		return -EPERM;
1565 
1566 	uuid = table->data;
1567 	if (!uuid) {
1568 		uuid = tmp_uuid;
1569 		generate_random_uuid(uuid);
1570 	} else {
1571 		static DEFINE_SPINLOCK(bootid_spinlock);
1572 
1573 		spin_lock(&bootid_spinlock);
1574 		if (!uuid[8])
1575 			generate_random_uuid(uuid);
1576 		spin_unlock(&bootid_spinlock);
1577 	}
1578 
1579 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1580 	return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1581 }
1582 
1583 /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1584 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1585 			    size_t *lenp, loff_t *ppos)
1586 {
1587 	return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1588 }
1589 
1590 static struct ctl_table random_table[] = {
1591 	{
1592 		.procname	= "poolsize",
1593 		.data		= &sysctl_poolsize,
1594 		.maxlen		= sizeof(int),
1595 		.mode		= 0444,
1596 		.proc_handler	= proc_dointvec,
1597 	},
1598 	{
1599 		.procname	= "entropy_avail",
1600 		.data		= &input_pool.init_bits,
1601 		.maxlen		= sizeof(int),
1602 		.mode		= 0444,
1603 		.proc_handler	= proc_dointvec,
1604 	},
1605 	{
1606 		.procname	= "write_wakeup_threshold",
1607 		.data		= &sysctl_random_write_wakeup_bits,
1608 		.maxlen		= sizeof(int),
1609 		.mode		= 0644,
1610 		.proc_handler	= proc_do_rointvec,
1611 	},
1612 	{
1613 		.procname	= "urandom_min_reseed_secs",
1614 		.data		= &sysctl_random_min_urandom_seed,
1615 		.maxlen		= sizeof(int),
1616 		.mode		= 0644,
1617 		.proc_handler	= proc_do_rointvec,
1618 	},
1619 	{
1620 		.procname	= "boot_id",
1621 		.data		= &sysctl_bootid,
1622 		.mode		= 0444,
1623 		.proc_handler	= proc_do_uuid,
1624 	},
1625 	{
1626 		.procname	= "uuid",
1627 		.mode		= 0444,
1628 		.proc_handler	= proc_do_uuid,
1629 	},
1630 	{ }
1631 };
1632 
1633 /*
1634  * random_init() is called before sysctl_init(),
1635  * so we cannot call register_sysctl_init() in random_init()
1636  */
random_sysctls_init(void)1637 static int __init random_sysctls_init(void)
1638 {
1639 	register_sysctl_init("kernel/random", random_table);
1640 	return 0;
1641 }
1642 device_initcall(random_sysctls_init);
1643 #endif
1644