1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6 *
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
9 *
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
16 *
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/blkdev.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/suspend.h>
55 #include <linux/siphash.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62
63 /*********************************************************************
64 *
65 * Initialization and readiness waiting.
66 *
67 * Much of the RNG infrastructure is devoted to various dependencies
68 * being able to wait until the RNG has collected enough entropy and
69 * is ready for safe consumption.
70 *
71 *********************************************************************/
72
73 /*
74 * crng_init is protected by base_crng->lock, and only increases
75 * its value (from empty->early->ready).
76 */
77 static enum {
78 CRNG_EMPTY = 0, /* Little to no entropy collected */
79 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
80 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init __read_mostly = CRNG_EMPTY;
82 static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
83 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
84 /* Various types of waiters for crng_init->CRNG_READY transition. */
85 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
86 static struct fasync_struct *fasync;
87
88 /* Control how we warn userspace. */
89 static struct ratelimit_state urandom_warning =
90 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
91 static int ratelimit_disable __read_mostly =
92 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
93 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
94 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
95
96 /*
97 * Returns whether or not the input pool has been seeded and thus guaranteed
98 * to supply cryptographically secure random numbers. This applies to: the
99 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
100 * ,u64,int,long} family of functions.
101 *
102 * Returns: true if the input pool has been seeded.
103 * false if the input pool has not been seeded.
104 */
rng_is_initialized(void)105 bool rng_is_initialized(void)
106 {
107 return crng_ready();
108 }
109 EXPORT_SYMBOL(rng_is_initialized);
110
crng_set_ready(struct work_struct * work)111 static void __cold crng_set_ready(struct work_struct *work)
112 {
113 static_branch_enable(&crng_is_ready);
114 }
115
116 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
117 static void try_to_generate_entropy(void);
118
119 /*
120 * Wait for the input pool to be seeded and thus guaranteed to supply
121 * cryptographically secure random numbers. This applies to: the /dev/urandom
122 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
123 * family of functions. Using any of these functions without first calling
124 * this function forfeits the guarantee of security.
125 *
126 * Returns: 0 if the input pool has been seeded.
127 * -ERESTARTSYS if the function was interrupted by a signal.
128 */
wait_for_random_bytes(void)129 int wait_for_random_bytes(void)
130 {
131 while (!crng_ready()) {
132 int ret;
133
134 try_to_generate_entropy();
135 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
136 if (ret)
137 return ret > 0 ? 0 : ret;
138 }
139 return 0;
140 }
141 EXPORT_SYMBOL(wait_for_random_bytes);
142
143 #define warn_unseeded_randomness() \
144 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
145 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
146 __func__, (void *)_RET_IP_, crng_init)
147
148
149 /*********************************************************************
150 *
151 * Fast key erasure RNG, the "crng".
152 *
153 * These functions expand entropy from the entropy extractor into
154 * long streams for external consumption using the "fast key erasure"
155 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
156 *
157 * There are a few exported interfaces for use by other drivers:
158 *
159 * void get_random_bytes(void *buf, size_t len)
160 * u32 get_random_u32()
161 * u64 get_random_u64()
162 * unsigned int get_random_int()
163 * unsigned long get_random_long()
164 *
165 * These interfaces will return the requested number of random bytes
166 * into the given buffer or as a return value. This is equivalent to
167 * a read from /dev/urandom. The u32, u64, int, and long family of
168 * functions may be higher performance for one-off random integers,
169 * because they do a bit of buffering and do not invoke reseeding
170 * until the buffer is emptied.
171 *
172 *********************************************************************/
173
174 enum {
175 CRNG_RESEED_START_INTERVAL = HZ,
176 CRNG_RESEED_INTERVAL = 60 * HZ
177 };
178
179 static struct {
180 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
181 unsigned long birth;
182 unsigned long generation;
183 spinlock_t lock;
184 } base_crng = {
185 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
186 };
187
188 struct crng {
189 u8 key[CHACHA_KEY_SIZE];
190 unsigned long generation;
191 local_lock_t lock;
192 };
193
194 static DEFINE_PER_CPU(struct crng, crngs) = {
195 .generation = ULONG_MAX,
196 .lock = INIT_LOCAL_LOCK(crngs.lock),
197 };
198
199 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
200 static void extract_entropy(void *buf, size_t len);
201
202 /* This extracts a new crng key from the input pool. */
crng_reseed(void)203 static void crng_reseed(void)
204 {
205 unsigned long flags;
206 unsigned long next_gen;
207 u8 key[CHACHA_KEY_SIZE];
208
209 extract_entropy(key, sizeof(key));
210
211 /*
212 * We copy the new key into the base_crng, overwriting the old one,
213 * and update the generation counter. We avoid hitting ULONG_MAX,
214 * because the per-cpu crngs are initialized to ULONG_MAX, so this
215 * forces new CPUs that come online to always initialize.
216 */
217 spin_lock_irqsave(&base_crng.lock, flags);
218 memcpy(base_crng.key, key, sizeof(base_crng.key));
219 next_gen = base_crng.generation + 1;
220 if (next_gen == ULONG_MAX)
221 ++next_gen;
222 WRITE_ONCE(base_crng.generation, next_gen);
223 WRITE_ONCE(base_crng.birth, jiffies);
224 if (!static_branch_likely(&crng_is_ready))
225 crng_init = CRNG_READY;
226 spin_unlock_irqrestore(&base_crng.lock, flags);
227 memzero_explicit(key, sizeof(key));
228 }
229
230 /*
231 * This generates a ChaCha block using the provided key, and then
232 * immediately overwites that key with half the block. It returns
233 * the resultant ChaCha state to the user, along with the second
234 * half of the block containing 32 bytes of random data that may
235 * be used; random_data_len may not be greater than 32.
236 *
237 * The returned ChaCha state contains within it a copy of the old
238 * key value, at index 4, so the state should always be zeroed out
239 * immediately after using in order to maintain forward secrecy.
240 * If the state cannot be erased in a timely manner, then it is
241 * safer to set the random_data parameter to &chacha_state[4] so
242 * that this function overwrites it before returning.
243 */
crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)244 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
245 u32 chacha_state[CHACHA_STATE_WORDS],
246 u8 *random_data, size_t random_data_len)
247 {
248 u8 first_block[CHACHA_BLOCK_SIZE];
249
250 BUG_ON(random_data_len > 32);
251
252 chacha_init_consts(chacha_state);
253 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
254 memset(&chacha_state[12], 0, sizeof(u32) * 4);
255 chacha20_block(chacha_state, first_block);
256
257 memcpy(key, first_block, CHACHA_KEY_SIZE);
258 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
259 memzero_explicit(first_block, sizeof(first_block));
260 }
261
262 /*
263 * Return whether the crng seed is considered to be sufficiently old
264 * that a reseeding is needed. This happens if the last reseeding
265 * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
266 * proportional to the uptime.
267 */
crng_has_old_seed(void)268 static bool crng_has_old_seed(void)
269 {
270 static bool early_boot = true;
271 unsigned long interval = CRNG_RESEED_INTERVAL;
272
273 if (unlikely(READ_ONCE(early_boot))) {
274 time64_t uptime = ktime_get_seconds();
275 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
276 WRITE_ONCE(early_boot, false);
277 else
278 interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
279 (unsigned int)uptime / 2 * HZ);
280 }
281 return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
282 }
283
284 /*
285 * This function returns a ChaCha state that you may use for generating
286 * random data. It also returns up to 32 bytes on its own of random data
287 * that may be used; random_data_len may not be greater than 32.
288 */
crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)289 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
290 u8 *random_data, size_t random_data_len)
291 {
292 unsigned long flags;
293 struct crng *crng;
294
295 BUG_ON(random_data_len > 32);
296
297 /*
298 * For the fast path, we check whether we're ready, unlocked first, and
299 * then re-check once locked later. In the case where we're really not
300 * ready, we do fast key erasure with the base_crng directly, extracting
301 * when crng_init is CRNG_EMPTY.
302 */
303 if (!crng_ready()) {
304 bool ready;
305
306 spin_lock_irqsave(&base_crng.lock, flags);
307 ready = crng_ready();
308 if (!ready) {
309 if (crng_init == CRNG_EMPTY)
310 extract_entropy(base_crng.key, sizeof(base_crng.key));
311 crng_fast_key_erasure(base_crng.key, chacha_state,
312 random_data, random_data_len);
313 }
314 spin_unlock_irqrestore(&base_crng.lock, flags);
315 if (!ready)
316 return;
317 }
318
319 /*
320 * If the base_crng is old enough, we reseed, which in turn bumps the
321 * generation counter that we check below.
322 */
323 if (unlikely(crng_has_old_seed()))
324 crng_reseed();
325
326 local_lock_irqsave(&crngs.lock, flags);
327 crng = raw_cpu_ptr(&crngs);
328
329 /*
330 * If our per-cpu crng is older than the base_crng, then it means
331 * somebody reseeded the base_crng. In that case, we do fast key
332 * erasure on the base_crng, and use its output as the new key
333 * for our per-cpu crng. This brings us up to date with base_crng.
334 */
335 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
336 spin_lock(&base_crng.lock);
337 crng_fast_key_erasure(base_crng.key, chacha_state,
338 crng->key, sizeof(crng->key));
339 crng->generation = base_crng.generation;
340 spin_unlock(&base_crng.lock);
341 }
342
343 /*
344 * Finally, when we've made it this far, our per-cpu crng has an up
345 * to date key, and we can do fast key erasure with it to produce
346 * some random data and a ChaCha state for the caller. All other
347 * branches of this function are "unlikely", so most of the time we
348 * should wind up here immediately.
349 */
350 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
351 local_unlock_irqrestore(&crngs.lock, flags);
352 }
353
_get_random_bytes(void * buf,size_t len)354 static void _get_random_bytes(void *buf, size_t len)
355 {
356 u32 chacha_state[CHACHA_STATE_WORDS];
357 u8 tmp[CHACHA_BLOCK_SIZE];
358 size_t first_block_len;
359
360 if (!len)
361 return;
362
363 first_block_len = min_t(size_t, 32, len);
364 crng_make_state(chacha_state, buf, first_block_len);
365 len -= first_block_len;
366 buf += first_block_len;
367
368 while (len) {
369 if (len < CHACHA_BLOCK_SIZE) {
370 chacha20_block(chacha_state, tmp);
371 memcpy(buf, tmp, len);
372 memzero_explicit(tmp, sizeof(tmp));
373 break;
374 }
375
376 chacha20_block(chacha_state, buf);
377 if (unlikely(chacha_state[12] == 0))
378 ++chacha_state[13];
379 len -= CHACHA_BLOCK_SIZE;
380 buf += CHACHA_BLOCK_SIZE;
381 }
382
383 memzero_explicit(chacha_state, sizeof(chacha_state));
384 }
385
386 /*
387 * This function is the exported kernel interface. It returns some
388 * number of good random numbers, suitable for key generation, seeding
389 * TCP sequence numbers, etc. In order to ensure that the randomness
390 * by this function is okay, the function wait_for_random_bytes()
391 * should be called and return 0 at least once at any point prior.
392 */
get_random_bytes(void * buf,size_t len)393 void get_random_bytes(void *buf, size_t len)
394 {
395 warn_unseeded_randomness();
396 _get_random_bytes(buf, len);
397 }
398 EXPORT_SYMBOL(get_random_bytes);
399
get_random_bytes_user(struct iov_iter * iter)400 static ssize_t get_random_bytes_user(struct iov_iter *iter)
401 {
402 u32 chacha_state[CHACHA_STATE_WORDS];
403 u8 block[CHACHA_BLOCK_SIZE];
404 size_t ret = 0, copied;
405
406 if (unlikely(!iov_iter_count(iter)))
407 return 0;
408
409 /*
410 * Immediately overwrite the ChaCha key at index 4 with random
411 * bytes, in case userspace causes copy_to_iter() below to sleep
412 * forever, so that we still retain forward secrecy in that case.
413 */
414 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
415 /*
416 * However, if we're doing a read of len <= 32, we don't need to
417 * use chacha_state after, so we can simply return those bytes to
418 * the user directly.
419 */
420 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
421 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
422 goto out_zero_chacha;
423 }
424
425 for (;;) {
426 chacha20_block(chacha_state, block);
427 if (unlikely(chacha_state[12] == 0))
428 ++chacha_state[13];
429
430 copied = copy_to_iter(block, sizeof(block), iter);
431 ret += copied;
432 if (!iov_iter_count(iter) || copied != sizeof(block))
433 break;
434
435 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
436 if (ret % PAGE_SIZE == 0) {
437 if (signal_pending(current))
438 break;
439 cond_resched();
440 }
441 }
442
443 memzero_explicit(block, sizeof(block));
444 out_zero_chacha:
445 memzero_explicit(chacha_state, sizeof(chacha_state));
446 return ret ? ret : -EFAULT;
447 }
448
449 /*
450 * Batched entropy returns random integers. The quality of the random
451 * number is good as /dev/urandom. In order to ensure that the randomness
452 * provided by this function is okay, the function wait_for_random_bytes()
453 * should be called and return 0 at least once at any point prior.
454 */
455
456 #define DEFINE_BATCHED_ENTROPY(type) \
457 struct batch_ ##type { \
458 /* \
459 * We make this 1.5x a ChaCha block, so that we get the \
460 * remaining 32 bytes from fast key erasure, plus one full \
461 * block from the detached ChaCha state. We can increase \
462 * the size of this later if needed so long as we keep the \
463 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
464 */ \
465 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
466 local_lock_t lock; \
467 unsigned long generation; \
468 unsigned int position; \
469 }; \
470 \
471 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
472 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
473 .position = UINT_MAX \
474 }; \
475 \
476 type get_random_ ##type(void) \
477 { \
478 type ret; \
479 unsigned long flags; \
480 struct batch_ ##type *batch; \
481 unsigned long next_gen; \
482 \
483 warn_unseeded_randomness(); \
484 \
485 if (!crng_ready()) { \
486 _get_random_bytes(&ret, sizeof(ret)); \
487 return ret; \
488 } \
489 \
490 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
491 batch = raw_cpu_ptr(&batched_entropy_##type); \
492 \
493 next_gen = READ_ONCE(base_crng.generation); \
494 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
495 next_gen != batch->generation) { \
496 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
497 batch->position = 0; \
498 batch->generation = next_gen; \
499 } \
500 \
501 ret = batch->entropy[batch->position]; \
502 batch->entropy[batch->position] = 0; \
503 ++batch->position; \
504 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
505 return ret; \
506 } \
507 EXPORT_SYMBOL(get_random_ ##type);
508
509 DEFINE_BATCHED_ENTROPY(u64)
DEFINE_BATCHED_ENTROPY(u32)510 DEFINE_BATCHED_ENTROPY(u32)
511
512 #ifdef CONFIG_SMP
513 /*
514 * This function is called when the CPU is coming up, with entry
515 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
516 */
517 int __cold random_prepare_cpu(unsigned int cpu)
518 {
519 /*
520 * When the cpu comes back online, immediately invalidate both
521 * the per-cpu crng and all batches, so that we serve fresh
522 * randomness.
523 */
524 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
525 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
526 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
527 return 0;
528 }
529 #endif
530
531
532 /**********************************************************************
533 *
534 * Entropy accumulation and extraction routines.
535 *
536 * Callers may add entropy via:
537 *
538 * static void mix_pool_bytes(const void *buf, size_t len)
539 *
540 * After which, if added entropy should be credited:
541 *
542 * static void credit_init_bits(size_t bits)
543 *
544 * Finally, extract entropy via:
545 *
546 * static void extract_entropy(void *buf, size_t len)
547 *
548 **********************************************************************/
549
550 enum {
551 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
552 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
553 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
554 };
555
556 static struct {
557 struct blake2s_state hash;
558 spinlock_t lock;
559 unsigned int init_bits;
560 } input_pool = {
561 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
562 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
563 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
564 .hash.outlen = BLAKE2S_HASH_SIZE,
565 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
566 };
567
_mix_pool_bytes(const void * buf,size_t len)568 static void _mix_pool_bytes(const void *buf, size_t len)
569 {
570 blake2s_update(&input_pool.hash, buf, len);
571 }
572
573 /*
574 * This function adds bytes into the input pool. It does not
575 * update the initialization bit counter; the caller should call
576 * credit_init_bits if this is appropriate.
577 */
mix_pool_bytes(const void * buf,size_t len)578 static void mix_pool_bytes(const void *buf, size_t len)
579 {
580 unsigned long flags;
581
582 spin_lock_irqsave(&input_pool.lock, flags);
583 _mix_pool_bytes(buf, len);
584 spin_unlock_irqrestore(&input_pool.lock, flags);
585 }
586
587 /*
588 * This is an HKDF-like construction for using the hashed collected entropy
589 * as a PRF key, that's then expanded block-by-block.
590 */
extract_entropy(void * buf,size_t len)591 static void extract_entropy(void *buf, size_t len)
592 {
593 unsigned long flags;
594 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
595 struct {
596 unsigned long rdseed[32 / sizeof(long)];
597 size_t counter;
598 } block;
599 size_t i;
600
601 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
602 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
603 !arch_get_random_long(&block.rdseed[i]))
604 block.rdseed[i] = random_get_entropy();
605 }
606
607 spin_lock_irqsave(&input_pool.lock, flags);
608
609 /* seed = HASHPRF(last_key, entropy_input) */
610 blake2s_final(&input_pool.hash, seed);
611
612 /* next_key = HASHPRF(seed, RDSEED || 0) */
613 block.counter = 0;
614 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
615 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
616
617 spin_unlock_irqrestore(&input_pool.lock, flags);
618 memzero_explicit(next_key, sizeof(next_key));
619
620 while (len) {
621 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
622 /* output = HASHPRF(seed, RDSEED || ++counter) */
623 ++block.counter;
624 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
625 len -= i;
626 buf += i;
627 }
628
629 memzero_explicit(seed, sizeof(seed));
630 memzero_explicit(&block, sizeof(block));
631 }
632
633 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
634
_credit_init_bits(size_t bits)635 static void __cold _credit_init_bits(size_t bits)
636 {
637 static struct execute_work set_ready;
638 unsigned int new, orig, add;
639 unsigned long flags;
640
641 if (!bits)
642 return;
643
644 add = min_t(size_t, bits, POOL_BITS);
645
646 do {
647 orig = READ_ONCE(input_pool.init_bits);
648 new = min_t(unsigned int, POOL_BITS, orig + add);
649 } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
650
651 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
652 crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
653 if (static_key_initialized)
654 execute_in_process_context(crng_set_ready, &set_ready);
655 wake_up_interruptible(&crng_init_wait);
656 kill_fasync(&fasync, SIGIO, POLL_IN);
657 pr_notice("crng init done\n");
658 if (urandom_warning.missed)
659 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
660 urandom_warning.missed);
661 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
662 spin_lock_irqsave(&base_crng.lock, flags);
663 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
664 if (crng_init == CRNG_EMPTY) {
665 extract_entropy(base_crng.key, sizeof(base_crng.key));
666 crng_init = CRNG_EARLY;
667 }
668 spin_unlock_irqrestore(&base_crng.lock, flags);
669 }
670 }
671
672
673 /**********************************************************************
674 *
675 * Entropy collection routines.
676 *
677 * The following exported functions are used for pushing entropy into
678 * the above entropy accumulation routines:
679 *
680 * void add_device_randomness(const void *buf, size_t len);
681 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
682 * void add_bootloader_randomness(const void *buf, size_t len);
683 * void add_vmfork_randomness(const void *unique_vm_id, size_t len);
684 * void add_interrupt_randomness(int irq);
685 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
686 * void add_disk_randomness(struct gendisk *disk);
687 *
688 * add_device_randomness() adds data to the input pool that
689 * is likely to differ between two devices (or possibly even per boot).
690 * This would be things like MAC addresses or serial numbers, or the
691 * read-out of the RTC. This does *not* credit any actual entropy to
692 * the pool, but it initializes the pool to different values for devices
693 * that might otherwise be identical and have very little entropy
694 * available to them (particularly common in the embedded world).
695 *
696 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
697 * entropy as specified by the caller. If the entropy pool is full it will
698 * block until more entropy is needed.
699 *
700 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
701 * and device tree, and credits its input depending on whether or not the
702 * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
703 *
704 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
705 * representing the current instance of a VM to the pool, without crediting,
706 * and then force-reseeds the crng so that it takes effect immediately.
707 *
708 * add_interrupt_randomness() uses the interrupt timing as random
709 * inputs to the entropy pool. Using the cycle counters and the irq source
710 * as inputs, it feeds the input pool roughly once a second or after 64
711 * interrupts, crediting 1 bit of entropy for whichever comes first.
712 *
713 * add_input_randomness() uses the input layer interrupt timing, as well
714 * as the event type information from the hardware.
715 *
716 * add_disk_randomness() uses what amounts to the seek time of block
717 * layer request events, on a per-disk_devt basis, as input to the
718 * entropy pool. Note that high-speed solid state drives with very low
719 * seek times do not make for good sources of entropy, as their seek
720 * times are usually fairly consistent.
721 *
722 * The last two routines try to estimate how many bits of entropy
723 * to credit. They do this by keeping track of the first and second
724 * order deltas of the event timings.
725 *
726 **********************************************************************/
727
728 static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
729 static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
parse_trust_cpu(char * arg)730 static int __init parse_trust_cpu(char *arg)
731 {
732 return kstrtobool(arg, &trust_cpu);
733 }
parse_trust_bootloader(char * arg)734 static int __init parse_trust_bootloader(char *arg)
735 {
736 return kstrtobool(arg, &trust_bootloader);
737 }
738 early_param("random.trust_cpu", parse_trust_cpu);
739 early_param("random.trust_bootloader", parse_trust_bootloader);
740
random_pm_notification(struct notifier_block * nb,unsigned long action,void * data)741 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
742 {
743 unsigned long flags, entropy = random_get_entropy();
744
745 /*
746 * Encode a representation of how long the system has been suspended,
747 * in a way that is distinct from prior system suspends.
748 */
749 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
750
751 spin_lock_irqsave(&input_pool.lock, flags);
752 _mix_pool_bytes(&action, sizeof(action));
753 _mix_pool_bytes(stamps, sizeof(stamps));
754 _mix_pool_bytes(&entropy, sizeof(entropy));
755 spin_unlock_irqrestore(&input_pool.lock, flags);
756
757 if (crng_ready() && (action == PM_RESTORE_PREPARE ||
758 (action == PM_POST_SUSPEND &&
759 !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) {
760 crng_reseed();
761 pr_notice("crng reseeded on system resumption\n");
762 }
763 return 0;
764 }
765
766 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
767
768 /*
769 * The first collection of entropy occurs at system boot while interrupts
770 * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
771 * utsname(), and the command line. Depending on the above configuration knob,
772 * RDSEED may be considered sufficient for initialization. Note that much
773 * earlier setup may already have pushed entropy into the input pool by the
774 * time we get here.
775 */
random_init(const char * command_line)776 int __init random_init(const char *command_line)
777 {
778 ktime_t now = ktime_get_real();
779 unsigned int i, arch_bits;
780 unsigned long entropy;
781
782 #if defined(LATENT_ENTROPY_PLUGIN)
783 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
784 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
785 #endif
786
787 for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
788 i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
789 if (!arch_get_random_seed_long_early(&entropy) &&
790 !arch_get_random_long_early(&entropy)) {
791 entropy = random_get_entropy();
792 arch_bits -= sizeof(entropy) * 8;
793 }
794 _mix_pool_bytes(&entropy, sizeof(entropy));
795 }
796 _mix_pool_bytes(&now, sizeof(now));
797 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
798 _mix_pool_bytes(command_line, strlen(command_line));
799 add_latent_entropy();
800
801 /*
802 * If we were initialized by the bootloader before jump labels are
803 * initialized, then we should enable the static branch here, where
804 * it's guaranteed that jump labels have been initialized.
805 */
806 if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
807 crng_set_ready(NULL);
808
809 if (crng_ready())
810 crng_reseed();
811 else if (trust_cpu)
812 _credit_init_bits(arch_bits);
813
814 WARN_ON(register_pm_notifier(&pm_notifier));
815
816 WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
817 "entropy collection will consequently suffer.");
818 return 0;
819 }
820
821 /*
822 * Add device- or boot-specific data to the input pool to help
823 * initialize it.
824 *
825 * None of this adds any entropy; it is meant to avoid the problem of
826 * the entropy pool having similar initial state across largely
827 * identical devices.
828 */
add_device_randomness(const void * buf,size_t len)829 void add_device_randomness(const void *buf, size_t len)
830 {
831 unsigned long entropy = random_get_entropy();
832 unsigned long flags;
833
834 spin_lock_irqsave(&input_pool.lock, flags);
835 _mix_pool_bytes(&entropy, sizeof(entropy));
836 _mix_pool_bytes(buf, len);
837 spin_unlock_irqrestore(&input_pool.lock, flags);
838 }
839 EXPORT_SYMBOL(add_device_randomness);
840
841 /*
842 * Interface for in-kernel drivers of true hardware RNGs.
843 * Those devices may produce endless random bits and will be throttled
844 * when our pool is full.
845 */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy)846 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
847 {
848 mix_pool_bytes(buf, len);
849 credit_init_bits(entropy);
850
851 /*
852 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
853 * we're not yet initialized.
854 */
855 if (!kthread_should_stop() && crng_ready())
856 schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
857 }
858 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
859
860 /*
861 * Handle random seed passed by bootloader, and credit it if
862 * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
863 */
add_bootloader_randomness(const void * buf,size_t len)864 void __init add_bootloader_randomness(const void *buf, size_t len)
865 {
866 mix_pool_bytes(buf, len);
867 if (trust_bootloader)
868 credit_init_bits(len * 8);
869 }
870
871 #if IS_ENABLED(CONFIG_VMGENID)
872 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
873
874 /*
875 * Handle a new unique VM ID, which is unique, not secret, so we
876 * don't credit it, but we do immediately force a reseed after so
877 * that it's used by the crng posthaste.
878 */
add_vmfork_randomness(const void * unique_vm_id,size_t len)879 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
880 {
881 add_device_randomness(unique_vm_id, len);
882 if (crng_ready()) {
883 crng_reseed();
884 pr_notice("crng reseeded due to virtual machine fork\n");
885 }
886 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
887 }
888 #if IS_MODULE(CONFIG_VMGENID)
889 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
890 #endif
891
register_random_vmfork_notifier(struct notifier_block * nb)892 int __cold register_random_vmfork_notifier(struct notifier_block *nb)
893 {
894 return blocking_notifier_chain_register(&vmfork_chain, nb);
895 }
896 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
897
unregister_random_vmfork_notifier(struct notifier_block * nb)898 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
899 {
900 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
901 }
902 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
903 #endif
904
905 struct fast_pool {
906 struct work_struct mix;
907 unsigned long pool[4];
908 unsigned long last;
909 unsigned int count;
910 };
911
912 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
913 #ifdef CONFIG_64BIT
914 #define FASTMIX_PERM SIPHASH_PERMUTATION
915 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
916 #else
917 #define FASTMIX_PERM HSIPHASH_PERMUTATION
918 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
919 #endif
920 };
921
922 /*
923 * This is [Half]SipHash-1-x, starting from an empty key. Because
924 * the key is fixed, it assumes that its inputs are non-malicious,
925 * and therefore this has no security on its own. s represents the
926 * four-word SipHash state, while v represents a two-word input.
927 */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)928 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
929 {
930 s[3] ^= v1;
931 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
932 s[0] ^= v1;
933 s[3] ^= v2;
934 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
935 s[0] ^= v2;
936 }
937
938 #ifdef CONFIG_SMP
939 /*
940 * This function is called when the CPU has just come online, with
941 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
942 */
random_online_cpu(unsigned int cpu)943 int __cold random_online_cpu(unsigned int cpu)
944 {
945 /*
946 * During CPU shutdown and before CPU onlining, add_interrupt_
947 * randomness() may schedule mix_interrupt_randomness(), and
948 * set the MIX_INFLIGHT flag. However, because the worker can
949 * be scheduled on a different CPU during this period, that
950 * flag will never be cleared. For that reason, we zero out
951 * the flag here, which runs just after workqueues are onlined
952 * for the CPU again. This also has the effect of setting the
953 * irq randomness count to zero so that new accumulated irqs
954 * are fresh.
955 */
956 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
957 return 0;
958 }
959 #endif
960
mix_interrupt_randomness(struct work_struct * work)961 static void mix_interrupt_randomness(struct work_struct *work)
962 {
963 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
964 /*
965 * The size of the copied stack pool is explicitly 2 longs so that we
966 * only ever ingest half of the siphash output each time, retaining
967 * the other half as the next "key" that carries over. The entropy is
968 * supposed to be sufficiently dispersed between bits so on average
969 * we don't wind up "losing" some.
970 */
971 unsigned long pool[2];
972 unsigned int count;
973
974 /* Check to see if we're running on the wrong CPU due to hotplug. */
975 local_irq_disable();
976 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
977 local_irq_enable();
978 return;
979 }
980
981 /*
982 * Copy the pool to the stack so that the mixer always has a
983 * consistent view, before we reenable irqs again.
984 */
985 memcpy(pool, fast_pool->pool, sizeof(pool));
986 count = fast_pool->count;
987 fast_pool->count = 0;
988 fast_pool->last = jiffies;
989 local_irq_enable();
990
991 mix_pool_bytes(pool, sizeof(pool));
992 credit_init_bits(max(1u, (count & U16_MAX) / 64));
993
994 memzero_explicit(pool, sizeof(pool));
995 }
996
add_interrupt_randomness(int irq)997 void add_interrupt_randomness(int irq)
998 {
999 enum { MIX_INFLIGHT = 1U << 31 };
1000 unsigned long entropy = random_get_entropy();
1001 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1002 struct pt_regs *regs = get_irq_regs();
1003 unsigned int new_count;
1004
1005 fast_mix(fast_pool->pool, entropy,
1006 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1007 new_count = ++fast_pool->count;
1008
1009 if (new_count & MIX_INFLIGHT)
1010 return;
1011
1012 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1013 return;
1014
1015 if (unlikely(!fast_pool->mix.func))
1016 INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
1017 fast_pool->count |= MIX_INFLIGHT;
1018 queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
1019 }
1020 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1021
1022 /* There is one of these per entropy source */
1023 struct timer_rand_state {
1024 unsigned long last_time;
1025 long last_delta, last_delta2;
1026 };
1027
1028 /*
1029 * This function adds entropy to the entropy "pool" by using timing
1030 * delays. It uses the timer_rand_state structure to make an estimate
1031 * of how many bits of entropy this call has added to the pool. The
1032 * value "num" is also added to the pool; it should somehow describe
1033 * the type of event that just happened.
1034 */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1035 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1036 {
1037 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1038 long delta, delta2, delta3;
1039 unsigned int bits;
1040
1041 /*
1042 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1043 * sometime after, so mix into the fast pool.
1044 */
1045 if (in_hardirq()) {
1046 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1047 } else {
1048 spin_lock_irqsave(&input_pool.lock, flags);
1049 _mix_pool_bytes(&entropy, sizeof(entropy));
1050 _mix_pool_bytes(&num, sizeof(num));
1051 spin_unlock_irqrestore(&input_pool.lock, flags);
1052 }
1053
1054 if (crng_ready())
1055 return;
1056
1057 /*
1058 * Calculate number of bits of randomness we probably added.
1059 * We take into account the first, second and third-order deltas
1060 * in order to make our estimate.
1061 */
1062 delta = now - READ_ONCE(state->last_time);
1063 WRITE_ONCE(state->last_time, now);
1064
1065 delta2 = delta - READ_ONCE(state->last_delta);
1066 WRITE_ONCE(state->last_delta, delta);
1067
1068 delta3 = delta2 - READ_ONCE(state->last_delta2);
1069 WRITE_ONCE(state->last_delta2, delta2);
1070
1071 if (delta < 0)
1072 delta = -delta;
1073 if (delta2 < 0)
1074 delta2 = -delta2;
1075 if (delta3 < 0)
1076 delta3 = -delta3;
1077 if (delta > delta2)
1078 delta = delta2;
1079 if (delta > delta3)
1080 delta = delta3;
1081
1082 /*
1083 * delta is now minimum absolute delta. Round down by 1 bit
1084 * on general principles, and limit entropy estimate to 11 bits.
1085 */
1086 bits = min(fls(delta >> 1), 11);
1087
1088 /*
1089 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1090 * will run after this, which uses a different crediting scheme of 1 bit
1091 * per every 64 interrupts. In order to let that function do accounting
1092 * close to the one in this function, we credit a full 64/64 bit per bit,
1093 * and then subtract one to account for the extra one added.
1094 */
1095 if (in_hardirq())
1096 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1097 else
1098 _credit_init_bits(bits);
1099 }
1100
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1101 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1102 {
1103 static unsigned char last_value;
1104 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1105
1106 /* Ignore autorepeat and the like. */
1107 if (value == last_value)
1108 return;
1109
1110 last_value = value;
1111 add_timer_randomness(&input_timer_state,
1112 (type << 4) ^ code ^ (code >> 4) ^ value);
1113 }
1114 EXPORT_SYMBOL_GPL(add_input_randomness);
1115
1116 #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1117 void add_disk_randomness(struct gendisk *disk)
1118 {
1119 if (!disk || !disk->random)
1120 return;
1121 /* First major is 1, so we get >= 0x200 here. */
1122 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1123 }
1124 EXPORT_SYMBOL_GPL(add_disk_randomness);
1125
rand_initialize_disk(struct gendisk * disk)1126 void __cold rand_initialize_disk(struct gendisk *disk)
1127 {
1128 struct timer_rand_state *state;
1129
1130 /*
1131 * If kzalloc returns null, we just won't use that entropy
1132 * source.
1133 */
1134 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1135 if (state) {
1136 state->last_time = INITIAL_JIFFIES;
1137 disk->random = state;
1138 }
1139 }
1140 #endif
1141
1142 struct entropy_timer_state {
1143 unsigned long entropy;
1144 struct timer_list timer;
1145 unsigned int samples, samples_per_bit;
1146 };
1147
1148 /*
1149 * Each time the timer fires, we expect that we got an unpredictable
1150 * jump in the cycle counter. Even if the timer is running on another
1151 * CPU, the timer activity will be touching the stack of the CPU that is
1152 * generating entropy..
1153 *
1154 * Note that we don't re-arm the timer in the timer itself - we are
1155 * happy to be scheduled away, since that just makes the load more
1156 * complex, but we do not want the timer to keep ticking unless the
1157 * entropy loop is running.
1158 *
1159 * So the re-arming always happens in the entropy loop itself.
1160 */
entropy_timer(struct timer_list * timer)1161 static void __cold entropy_timer(struct timer_list *timer)
1162 {
1163 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1164
1165 if (++state->samples == state->samples_per_bit) {
1166 credit_init_bits(1);
1167 state->samples = 0;
1168 }
1169 }
1170
1171 /*
1172 * If we have an actual cycle counter, see if we can
1173 * generate enough entropy with timing noise
1174 */
try_to_generate_entropy(void)1175 static void __cold try_to_generate_entropy(void)
1176 {
1177 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
1178 struct entropy_timer_state stack;
1179 unsigned int i, num_different = 0;
1180 unsigned long last = random_get_entropy();
1181
1182 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1183 stack.entropy = random_get_entropy();
1184 if (stack.entropy != last)
1185 ++num_different;
1186 last = stack.entropy;
1187 }
1188 stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1189 if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
1190 return;
1191
1192 stack.samples = 0;
1193 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1194 while (!crng_ready() && !signal_pending(current)) {
1195 if (!timer_pending(&stack.timer))
1196 mod_timer(&stack.timer, jiffies + 1);
1197 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1198 schedule();
1199 stack.entropy = random_get_entropy();
1200 }
1201
1202 del_timer_sync(&stack.timer);
1203 destroy_timer_on_stack(&stack.timer);
1204 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1205 }
1206
1207
1208 /**********************************************************************
1209 *
1210 * Userspace reader/writer interfaces.
1211 *
1212 * getrandom(2) is the primary modern interface into the RNG and should
1213 * be used in preference to anything else.
1214 *
1215 * Reading from /dev/random has the same functionality as calling
1216 * getrandom(2) with flags=0. In earlier versions, however, it had
1217 * vastly different semantics and should therefore be avoided, to
1218 * prevent backwards compatibility issues.
1219 *
1220 * Reading from /dev/urandom has the same functionality as calling
1221 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1222 * waiting for the RNG to be ready, it should not be used.
1223 *
1224 * Writing to either /dev/random or /dev/urandom adds entropy to
1225 * the input pool but does not credit it.
1226 *
1227 * Polling on /dev/random indicates when the RNG is initialized, on
1228 * the read side, and when it wants new entropy, on the write side.
1229 *
1230 * Both /dev/random and /dev/urandom have the same set of ioctls for
1231 * adding entropy, getting the entropy count, zeroing the count, and
1232 * reseeding the crng.
1233 *
1234 **********************************************************************/
1235
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1236 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1237 {
1238 struct iov_iter iter;
1239 struct iovec iov;
1240 int ret;
1241
1242 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1243 return -EINVAL;
1244
1245 /*
1246 * Requesting insecure and blocking randomness at the same time makes
1247 * no sense.
1248 */
1249 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1250 return -EINVAL;
1251
1252 if (!crng_ready() && !(flags & GRND_INSECURE)) {
1253 if (flags & GRND_NONBLOCK)
1254 return -EAGAIN;
1255 ret = wait_for_random_bytes();
1256 if (unlikely(ret))
1257 return ret;
1258 }
1259
1260 ret = import_single_range(READ, ubuf, len, &iov, &iter);
1261 if (unlikely(ret))
1262 return ret;
1263 return get_random_bytes_user(&iter);
1264 }
1265
random_poll(struct file * file,poll_table * wait)1266 static __poll_t random_poll(struct file *file, poll_table *wait)
1267 {
1268 poll_wait(file, &crng_init_wait, wait);
1269 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1270 }
1271
write_pool_user(struct iov_iter * iter)1272 static ssize_t write_pool_user(struct iov_iter *iter)
1273 {
1274 u8 block[BLAKE2S_BLOCK_SIZE];
1275 ssize_t ret = 0;
1276 size_t copied;
1277
1278 if (unlikely(!iov_iter_count(iter)))
1279 return 0;
1280
1281 for (;;) {
1282 copied = copy_from_iter(block, sizeof(block), iter);
1283 ret += copied;
1284 mix_pool_bytes(block, copied);
1285 if (!iov_iter_count(iter) || copied != sizeof(block))
1286 break;
1287
1288 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1289 if (ret % PAGE_SIZE == 0) {
1290 if (signal_pending(current))
1291 break;
1292 cond_resched();
1293 }
1294 }
1295
1296 memzero_explicit(block, sizeof(block));
1297 return ret ? ret : -EFAULT;
1298 }
1299
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1300 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1301 {
1302 return write_pool_user(iter);
1303 }
1304
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1305 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1306 {
1307 static int maxwarn = 10;
1308
1309 /*
1310 * Opportunistically attempt to initialize the RNG on platforms that
1311 * have fast cycle counters, but don't (for now) require it to succeed.
1312 */
1313 if (!crng_ready())
1314 try_to_generate_entropy();
1315
1316 if (!crng_ready()) {
1317 if (!ratelimit_disable && maxwarn <= 0)
1318 ++urandom_warning.missed;
1319 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1320 --maxwarn;
1321 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1322 current->comm, iov_iter_count(iter));
1323 }
1324 }
1325
1326 return get_random_bytes_user(iter);
1327 }
1328
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1329 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1330 {
1331 int ret;
1332
1333 ret = wait_for_random_bytes();
1334 if (ret != 0)
1335 return ret;
1336 return get_random_bytes_user(iter);
1337 }
1338
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1339 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1340 {
1341 int __user *p = (int __user *)arg;
1342 int ent_count;
1343
1344 switch (cmd) {
1345 case RNDGETENTCNT:
1346 /* Inherently racy, no point locking. */
1347 if (put_user(input_pool.init_bits, p))
1348 return -EFAULT;
1349 return 0;
1350 case RNDADDTOENTCNT:
1351 if (!capable(CAP_SYS_ADMIN))
1352 return -EPERM;
1353 if (get_user(ent_count, p))
1354 return -EFAULT;
1355 if (ent_count < 0)
1356 return -EINVAL;
1357 credit_init_bits(ent_count);
1358 return 0;
1359 case RNDADDENTROPY: {
1360 struct iov_iter iter;
1361 struct iovec iov;
1362 ssize_t ret;
1363 int len;
1364
1365 if (!capable(CAP_SYS_ADMIN))
1366 return -EPERM;
1367 if (get_user(ent_count, p++))
1368 return -EFAULT;
1369 if (ent_count < 0)
1370 return -EINVAL;
1371 if (get_user(len, p++))
1372 return -EFAULT;
1373 ret = import_single_range(WRITE, p, len, &iov, &iter);
1374 if (unlikely(ret))
1375 return ret;
1376 ret = write_pool_user(&iter);
1377 if (unlikely(ret < 0))
1378 return ret;
1379 /* Since we're crediting, enforce that it was all written into the pool. */
1380 if (unlikely(ret != len))
1381 return -EFAULT;
1382 credit_init_bits(ent_count);
1383 return 0;
1384 }
1385 case RNDZAPENTCNT:
1386 case RNDCLEARPOOL:
1387 /* No longer has any effect. */
1388 if (!capable(CAP_SYS_ADMIN))
1389 return -EPERM;
1390 return 0;
1391 case RNDRESEEDCRNG:
1392 if (!capable(CAP_SYS_ADMIN))
1393 return -EPERM;
1394 if (!crng_ready())
1395 return -ENODATA;
1396 crng_reseed();
1397 return 0;
1398 default:
1399 return -EINVAL;
1400 }
1401 }
1402
random_fasync(int fd,struct file * filp,int on)1403 static int random_fasync(int fd, struct file *filp, int on)
1404 {
1405 return fasync_helper(fd, filp, on, &fasync);
1406 }
1407
1408 const struct file_operations random_fops = {
1409 .read_iter = random_read_iter,
1410 .write_iter = random_write_iter,
1411 .poll = random_poll,
1412 .unlocked_ioctl = random_ioctl,
1413 .compat_ioctl = compat_ptr_ioctl,
1414 .fasync = random_fasync,
1415 .llseek = noop_llseek,
1416 .splice_read = generic_file_splice_read,
1417 .splice_write = iter_file_splice_write,
1418 };
1419
1420 const struct file_operations urandom_fops = {
1421 .read_iter = urandom_read_iter,
1422 .write_iter = random_write_iter,
1423 .unlocked_ioctl = random_ioctl,
1424 .compat_ioctl = compat_ptr_ioctl,
1425 .fasync = random_fasync,
1426 .llseek = noop_llseek,
1427 .splice_read = generic_file_splice_read,
1428 .splice_write = iter_file_splice_write,
1429 };
1430
1431
1432 /********************************************************************
1433 *
1434 * Sysctl interface.
1435 *
1436 * These are partly unused legacy knobs with dummy values to not break
1437 * userspace and partly still useful things. They are usually accessible
1438 * in /proc/sys/kernel/random/ and are as follows:
1439 *
1440 * - boot_id - a UUID representing the current boot.
1441 *
1442 * - uuid - a random UUID, different each time the file is read.
1443 *
1444 * - poolsize - the number of bits of entropy that the input pool can
1445 * hold, tied to the POOL_BITS constant.
1446 *
1447 * - entropy_avail - the number of bits of entropy currently in the
1448 * input pool. Always <= poolsize.
1449 *
1450 * - write_wakeup_threshold - the amount of entropy in the input pool
1451 * below which write polls to /dev/random will unblock, requesting
1452 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1453 * to avoid breaking old userspaces, but writing to it does not
1454 * change any behavior of the RNG.
1455 *
1456 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1457 * It is writable to avoid breaking old userspaces, but writing
1458 * to it does not change any behavior of the RNG.
1459 *
1460 ********************************************************************/
1461
1462 #ifdef CONFIG_SYSCTL
1463
1464 #include <linux/sysctl.h>
1465
1466 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1467 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1468 static int sysctl_poolsize = POOL_BITS;
1469 static u8 sysctl_bootid[UUID_SIZE];
1470
1471 /*
1472 * This function is used to return both the bootid UUID, and random
1473 * UUID. The difference is in whether table->data is NULL; if it is,
1474 * then a new UUID is generated and returned to the user.
1475 */
proc_do_uuid(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1476 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1477 size_t *lenp, loff_t *ppos)
1478 {
1479 u8 tmp_uuid[UUID_SIZE], *uuid;
1480 char uuid_string[UUID_STRING_LEN + 1];
1481 struct ctl_table fake_table = {
1482 .data = uuid_string,
1483 .maxlen = UUID_STRING_LEN
1484 };
1485
1486 if (write)
1487 return -EPERM;
1488
1489 uuid = table->data;
1490 if (!uuid) {
1491 uuid = tmp_uuid;
1492 generate_random_uuid(uuid);
1493 } else {
1494 static DEFINE_SPINLOCK(bootid_spinlock);
1495
1496 spin_lock(&bootid_spinlock);
1497 if (!uuid[8])
1498 generate_random_uuid(uuid);
1499 spin_unlock(&bootid_spinlock);
1500 }
1501
1502 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1503 return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1504 }
1505
1506 /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1507 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1508 size_t *lenp, loff_t *ppos)
1509 {
1510 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1511 }
1512
1513 static struct ctl_table random_table[] = {
1514 {
1515 .procname = "poolsize",
1516 .data = &sysctl_poolsize,
1517 .maxlen = sizeof(int),
1518 .mode = 0444,
1519 .proc_handler = proc_dointvec,
1520 },
1521 {
1522 .procname = "entropy_avail",
1523 .data = &input_pool.init_bits,
1524 .maxlen = sizeof(int),
1525 .mode = 0444,
1526 .proc_handler = proc_dointvec,
1527 },
1528 {
1529 .procname = "write_wakeup_threshold",
1530 .data = &sysctl_random_write_wakeup_bits,
1531 .maxlen = sizeof(int),
1532 .mode = 0644,
1533 .proc_handler = proc_do_rointvec,
1534 },
1535 {
1536 .procname = "urandom_min_reseed_secs",
1537 .data = &sysctl_random_min_urandom_seed,
1538 .maxlen = sizeof(int),
1539 .mode = 0644,
1540 .proc_handler = proc_do_rointvec,
1541 },
1542 {
1543 .procname = "boot_id",
1544 .data = &sysctl_bootid,
1545 .mode = 0444,
1546 .proc_handler = proc_do_uuid,
1547 },
1548 {
1549 .procname = "uuid",
1550 .mode = 0444,
1551 .proc_handler = proc_do_uuid,
1552 },
1553 { }
1554 };
1555
1556 /*
1557 * random_init() is called before sysctl_init(),
1558 * so we cannot call register_sysctl_init() in random_init()
1559 */
random_sysctls_init(void)1560 static int __init random_sysctls_init(void)
1561 {
1562 register_sysctl_init("kernel/random", random_table);
1563 return 0;
1564 }
1565 device_initcall(random_sysctls_init);
1566 #endif
1567