1 /*
2  * random.c -- A strong random number generator
3  *
4  * Version 1.89, last modified 19-Sep-99
5  *
6  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
7  * rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, and the entire permission notice in its entirety,
14  *    including the disclaimer of warranties.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote
19  *    products derived from this software without specific prior
20  *    written permission.
21  *
22  * ALTERNATIVELY, this product may be distributed under the terms of
23  * the GNU General Public License, in which case the provisions of the GPL are
24  * required INSTEAD OF the above restrictions.  (This clause is
25  * necessary due to a potential bad interaction between the GPL and
26  * the restrictions contained in a BSD-style copyright.)
27  *
28  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
29  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
31  * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
32  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
34  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
36  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38  * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
39  * DAMAGE.
40  */
41 
42 /*
43  * (now, with legal B.S. out of the way.....)
44  *
45  * This routine gathers environmental noise from device drivers, etc.,
46  * and returns good random numbers, suitable for cryptographic use.
47  * Besides the obvious cryptographic uses, these numbers are also good
48  * for seeding TCP sequence numbers, and other places where it is
49  * desirable to have numbers which are not only random, but hard to
50  * predict by an attacker.
51  *
52  * Theory of operation
53  * ===================
54  *
55  * Computers are very predictable devices.  Hence it is extremely hard
56  * to produce truly random numbers on a computer --- as opposed to
57  * pseudo-random numbers, which can easily generated by using a
58  * algorithm.  Unfortunately, it is very easy for attackers to guess
59  * the sequence of pseudo-random number generators, and for some
60  * applications this is not acceptable.  So instead, we must try to
61  * gather "environmental noise" from the computer's environment, which
62  * must be hard for outside attackers to observe, and use that to
63  * generate random numbers.  In a Unix environment, this is best done
64  * from inside the kernel.
65  *
66  * Sources of randomness from the environment include inter-keyboard
67  * timings, inter-interrupt timings from some interrupts, and other
68  * events which are both (a) non-deterministic and (b) hard for an
69  * outside observer to measure.  Randomness from these sources are
70  * added to an "entropy pool", which is mixed using a CRC-like function.
71  * This is not cryptographically strong, but it is adequate assuming
72  * the randomness is not chosen maliciously, and it is fast enough that
73  * the overhead of doing it on every interrupt is very reasonable.
74  * As random bytes are mixed into the entropy pool, the routines keep
75  * an *estimate* of how many bits of randomness have been stored into
76  * the random number generator's internal state.
77  *
78  * When random bytes are desired, they are obtained by taking the SHA
79  * hash of the contents of the "entropy pool".  The SHA hash avoids
80  * exposing the internal state of the entropy pool.  It is believed to
81  * be computationally infeasible to derive any useful information
82  * about the input of SHA from its output.  Even if it is possible to
83  * analyze SHA in some clever way, as long as the amount of data
84  * returned from the generator is less than the inherent entropy in
85  * the pool, the output data is totally unpredictable.  For this
86  * reason, the routine decreases its internal estimate of how many
87  * bits of "true randomness" are contained in the entropy pool as it
88  * outputs random numbers.
89  *
90  * If this estimate goes to zero, the routine can still generate
91  * random numbers; however, an attacker may (at least in theory) be
92  * able to infer the future output of the generator from prior
93  * outputs.  This requires successful cryptanalysis of SHA, which is
94  * not believed to be feasible, but there is a remote possibility.
95  * Nonetheless, these numbers should be useful for the vast majority
96  * of purposes.
97  *
98  * Exported interfaces ---- output
99  * ===============================
100  *
101  * There are three exported interfaces; the first is one designed to
102  * be used from within the kernel:
103  *
104  * 	void get_random_bytes(void *buf, int nbytes);
105  *
106  * This interface will return the requested number of random bytes,
107  * and place it in the requested buffer.
108  *
109  * The two other interfaces are two character devices /dev/random and
110  * /dev/urandom.  /dev/random is suitable for use when very high
111  * quality randomness is desired (for example, for key generation or
112  * one-time pads), as it will only return a maximum of the number of
113  * bits of randomness (as estimated by the random number generator)
114  * contained in the entropy pool.
115  *
116  * The /dev/urandom device does not have this limit, and will return
117  * as many bytes as are requested.  As more and more random bytes are
118  * requested without giving time for the entropy pool to recharge,
119  * this will result in random numbers that are merely cryptographically
120  * strong.  For many applications, however, this is acceptable.
121  *
122  * Exported interfaces ---- input
123  * ==============================
124  *
125  * The current exported interfaces for gathering environmental noise
126  * from the devices are:
127  *
128  * 	void add_keyboard_randomness(unsigned char scancode);
129  * 	void add_mouse_randomness(__u32 mouse_data);
130  * 	void add_interrupt_randomness(int irq);
131  * 	void add_blkdev_randomness(int irq);
132  *
133  * add_keyboard_randomness() uses the inter-keypress timing, as well as the
134  * scancode as random inputs into the "entropy pool".
135  *
136  * add_mouse_randomness() uses the mouse interrupt timing, as well as
137  * the reported position of the mouse from the hardware.
138  *
139  * add_interrupt_randomness() uses the inter-interrupt timing as random
140  * inputs to the entropy pool.  Note that not all interrupts are good
141  * sources of randomness!  For example, the timer interrupts is not a
142  * good choice, because the periodicity of the interrupts is too
143  * regular, and hence predictable to an attacker.  Disk interrupts are
144  * a better measure, since the timing of the disk interrupts are more
145  * unpredictable.
146  *
147  * add_blkdev_randomness() times the finishing time of block requests.
148  *
149  * All of these routines try to estimate how many bits of randomness a
150  * particular randomness source.  They do this by keeping track of the
151  * first and second order deltas of the event timings.
152  *
153  * Ensuring unpredictability at system startup
154  * ============================================
155  *
156  * When any operating system starts up, it will go through a sequence
157  * of actions that are fairly predictable by an adversary, especially
158  * if the start-up does not involve interaction with a human operator.
159  * This reduces the actual number of bits of unpredictability in the
160  * entropy pool below the value in entropy_count.  In order to
161  * counteract this effect, it helps to carry information in the
162  * entropy pool across shut-downs and start-ups.  To do this, put the
163  * following lines an appropriate script which is run during the boot
164  * sequence:
165  *
166  *	echo "Initializing random number generator..."
167  *	random_seed=/var/run/random-seed
168  *	# Carry a random seed from start-up to start-up
169  *	# Load and then save the whole entropy pool
170  *	if [ -f $random_seed ]; then
171  *		cat $random_seed >/dev/urandom
172  *	else
173  *		touch $random_seed
174  *	fi
175  *	chmod 600 $random_seed
176  *	poolfile=/proc/sys/kernel/random/poolsize
177  *	[ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512
178  *	dd if=/dev/urandom of=$random_seed count=1 bs=$bytes
179  *
180  * and the following lines in an appropriate script which is run as
181  * the system is shutdown:
182  *
183  *	# Carry a random seed from shut-down to start-up
184  *	# Save the whole entropy pool
185  *	echo "Saving random seed..."
186  *	random_seed=/var/run/random-seed
187  *	touch $random_seed
188  *	chmod 600 $random_seed
189  *	poolfile=/proc/sys/kernel/random/poolsize
190  *	[ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512
191  *	dd if=/dev/urandom of=$random_seed count=1 bs=$bytes
192  *
193  * For example, on most modern systems using the System V init
194  * scripts, such code fragments would be found in
195  * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
196  * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
197  *
198  * Effectively, these commands cause the contents of the entropy pool
199  * to be saved at shut-down time and reloaded into the entropy pool at
200  * start-up.  (The 'dd' in the addition to the bootup script is to
201  * make sure that /etc/random-seed is different for every start-up,
202  * even if the system crashes without executing rc.0.)  Even with
203  * complete knowledge of the start-up activities, predicting the state
204  * of the entropy pool requires knowledge of the previous history of
205  * the system.
206  *
207  * Configuring the /dev/random driver under Linux
208  * ==============================================
209  *
210  * The /dev/random driver under Linux uses minor numbers 8 and 9 of
211  * the /dev/mem major number (#1).  So if your system does not have
212  * /dev/random and /dev/urandom created already, they can be created
213  * by using the commands:
214  *
215  * 	mknod /dev/random c 1 8
216  * 	mknod /dev/urandom c 1 9
217  *
218  * Acknowledgements:
219  * =================
220  *
221  * Ideas for constructing this random number generator were derived
222  * from Pretty Good Privacy's random number generator, and from private
223  * discussions with Phil Karn.  Colin Plumb provided a faster random
224  * number generator, which speed up the mixing function of the entropy
225  * pool, taken from PGPfone.  Dale Worley has also contributed many
226  * useful ideas and suggestions to improve this driver.
227  *
228  * Any flaws in the design are solely my responsibility, and should
229  * not be attributed to the Phil, Colin, or any of authors of PGP.
230  *
231  * The code for SHA transform was taken from Peter Gutmann's
232  * implementation, which has been placed in the public domain.
233  * The code for MD5 transform was taken from Colin Plumb's
234  * implementation, which has been placed in the public domain.
235  * The MD5 cryptographic checksum was devised by Ronald Rivest, and is
236  * documented in RFC 1321, "The MD5 Message Digest Algorithm".
237  *
238  * Further background information on this topic may be obtained from
239  * RFC 1750, "Randomness Recommendations for Security", by Donald
240  * Eastlake, Steve Crocker, and Jeff Schiller.
241  */
242 
243 #include <linux/utsname.h>
244 #include <linux/config.h>
245 #include <linux/module.h>
246 #include <linux/kernel.h>
247 #include <linux/major.h>
248 #include <linux/string.h>
249 #include <linux/fcntl.h>
250 #include <linux/slab.h>
251 #include <linux/random.h>
252 #include <linux/poll.h>
253 #include <linux/init.h>
254 #include <linux/interrupt.h>
255 #include <linux/spinlock.h>
256 
257 #include <asm/processor.h>
258 #include <asm/uaccess.h>
259 #include <asm/irq.h>
260 #include <asm/io.h>
261 
262 /*
263  * Configuration information
264  */
265 #define DEFAULT_POOL_SIZE 512
266 #define SECONDARY_POOL_SIZE 128
267 #define BATCH_ENTROPY_SIZE 256
268 #define USE_SHA
269 
270 /*
271  * The minimum number of bits of entropy before we wake up a read on
272  * /dev/random.  Should always be at least 8, or at least 1 byte.
273  */
274 static int random_read_wakeup_thresh = 8;
275 
276 /*
277  * If the entropy count falls under this number of bits, then we
278  * should wake up processes which are selecting or polling on write
279  * access to /dev/random.
280  */
281 static int random_write_wakeup_thresh = 128;
282 
283 /*
284  * A pool of size .poolwords is stirred with a primitive polynomial
285  * of degree .poolwords over GF(2).  The taps for various sizes are
286  * defined below.  They are chosen to be evenly spaced (minimum RMS
287  * distance from evenly spaced; the numbers in the comments are a
288  * scaled squared error sum) except for the last tap, which is 1 to
289  * get the twisting happening as fast as possible.
290  */
291 static struct poolinfo {
292 	int	poolwords;
293 	int	tap1, tap2, tap3, tap4, tap5;
294 } poolinfo_table[] = {
295 	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
296 	{ 2048,	1638,	1231,	819,	411,	1 },
297 
298 	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
299 	{ 1024,	817,	615,	412,	204,	1 },
300 #if 0				/* Alternate polynomial */
301 	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
302 	{ 1024,	819,	616,	410,	207,	2 },
303 #endif
304 
305 	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
306 	{ 512,	411,	308,	208,	104,	1 },
307 #if 0				/* Alternates */
308 	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
309 	{ 512,	409,	307,	206,	102,	2 },
310 	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
311 	{ 512,	409,	309,	205,	103,	2 },
312 #endif
313 
314 	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
315 	{ 256,	205,	155,	101,	52,	1 },
316 
317 	/* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
318 	{ 128,	103,	76,	51,	25,	1 },
319 #if 0	/* Alternate polynomial */
320 	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
321 	{ 128,	103,	78,	51,	27,	2 },
322 #endif
323 
324 	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
325 	{ 64,	52,	39,	26,	14,	1 },
326 
327 	/* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
328 	{ 32,	26,	20,	14,	7,	1 },
329 
330 	{ 0,	0,	0,	0,	0,	0 },
331 };
332 
333 #define POOLBITS	poolwords*32
334 #define POOLBYTES	poolwords*4
335 
336 /*
337  * For the purposes of better mixing, we use the CRC-32 polynomial as
338  * well to make a twisted Generalized Feedback Shift Reigster
339  *
340  * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
341  * Transactions on Modeling and Computer Simulation 2(3):179-194.
342  * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
343  * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
344  *
345  * Thanks to Colin Plumb for suggesting this.
346  *
347  * We have not analyzed the resultant polynomial to prove it primitive;
348  * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
349  * of a random large-degree polynomial over GF(2) are more than large enough
350  * that periodicity is not a concern.
351  *
352  * The input hash is much less sensitive than the output hash.  All
353  * that we want of it is that it be a good non-cryptographic hash;
354  * i.e. it not produce collisions when fed "random" data of the sort
355  * we expect to see.  As long as the pool state differs for different
356  * inputs, we have preserved the input entropy and done a good job.
357  * The fact that an intelligent attacker can construct inputs that
358  * will produce controlled alterations to the pool's state is not
359  * important because we don't consider such inputs to contribute any
360  * randomness.  The only property we need with respect to them is that
361  * the attacker can't increase his/her knowledge of the pool's state.
362  * Since all additions are reversible (knowing the final state and the
363  * input, you can reconstruct the initial state), if an attacker has
364  * any uncertainty about the initial state, he/she can only shuffle
365  * that uncertainty about, but never cause any collisions (which would
366  * decrease the uncertainty).
367  *
368  * The chosen system lets the state of the pool be (essentially) the input
369  * modulo the generator polymnomial.  Now, for random primitive polynomials,
370  * this is a universal class of hash functions, meaning that the chance
371  * of a collision is limited by the attacker's knowledge of the generator
372  * polynomail, so if it is chosen at random, an attacker can never force
373  * a collision.  Here, we use a fixed polynomial, but we *can* assume that
374  * ###--> it is unknown to the processes generating the input entropy. <-###
375  * Because of this important property, this is a good, collision-resistant
376  * hash; hash collisions will occur no more often than chance.
377  */
378 
379 /*
380  * Linux 2.2 compatibility
381  */
382 #ifndef DECLARE_WAITQUEUE
383 #define DECLARE_WAITQUEUE(WAIT, PTR)	struct wait_queue WAIT = { PTR, NULL }
384 #endif
385 #ifndef DECLARE_WAIT_QUEUE_HEAD
386 #define DECLARE_WAIT_QUEUE_HEAD(WAIT) struct wait_queue *WAIT
387 #endif
388 
389 /*
390  * Static global variables
391  */
392 static struct entropy_store *random_state; /* The default global store */
393 static struct entropy_store *sec_random_state; /* secondary store */
394 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
395 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
396 
397 /*
398  * Forward procedure declarations
399  */
400 #ifdef CONFIG_SYSCTL
401 static void sysctl_init_random(struct entropy_store *random_state);
402 #endif
403 
404 /*****************************************************************
405  *
406  * Utility functions, with some ASM defined functions for speed
407  * purposes
408  *
409  *****************************************************************/
410 
411 /*
412  * Unfortunately, while the GCC optimizer for the i386 understands how
413  * to optimize a static rotate left of x bits, it doesn't know how to
414  * deal with a variable rotate of x bits.  So we use a bit of asm magic.
415  */
416 #if (!defined (__i386__))
rotate_left(int i,__u32 word)417 static inline __u32 rotate_left(int i, __u32 word)
418 {
419 	return (word << i) | (word >> (32 - i));
420 
421 }
422 #else
rotate_left(int i,__u32 word)423 static inline __u32 rotate_left(int i, __u32 word)
424 {
425 	__asm__("roll %%cl,%0"
426 		:"=r" (word)
427 		:"0" (word),"c" (i));
428 	return word;
429 }
430 #endif
431 
432 /*
433  * More asm magic....
434  *
435  * For entropy estimation, we need to do an integral base 2
436  * logarithm.
437  *
438  * Note the "12bits" suffix - this is used for numbers between
439  * 0 and 4095 only.  This allows a few shortcuts.
440  */
441 #if 0	/* Slow but clear version */
442 static inline __u32 int_ln_12bits(__u32 word)
443 {
444 	__u32 nbits = 0;
445 
446 	while (word >>= 1)
447 		nbits++;
448 	return nbits;
449 }
450 #else	/* Faster (more clever) version, courtesy Colin Plumb */
int_ln_12bits(__u32 word)451 static inline __u32 int_ln_12bits(__u32 word)
452 {
453 	/* Smear msbit right to make an n-bit mask */
454 	word |= word >> 8;
455 	word |= word >> 4;
456 	word |= word >> 2;
457 	word |= word >> 1;
458 	/* Remove one bit to make this a logarithm */
459 	word >>= 1;
460 	/* Count the bits set in the word */
461 	word -= (word >> 1) & 0x555;
462 	word = (word & 0x333) + ((word >> 2) & 0x333);
463 	word += (word >> 4);
464 	word += (word >> 8);
465 	return word & 15;
466 }
467 #endif
468 
469 #if 0
470 #define DEBUG_ENT(fmt, arg...) printk(KERN_DEBUG "random: " fmt, ## arg)
471 #else
472 #define DEBUG_ENT(fmt, arg...) do {} while (0)
473 #endif
474 
475 /**********************************************************************
476  *
477  * OS independent entropy store.   Here are the functions which handle
478  * storing entropy in an entropy pool.
479  *
480  **********************************************************************/
481 
482 struct entropy_store {
483 	unsigned	add_ptr;
484 	int		entropy_count;
485 	int		input_rotate;
486 	int		extract_count;
487 	struct poolinfo poolinfo;
488 	__u32		*pool;
489 };
490 
491 /*
492  * Initialize the entropy store.  The input argument is the size of
493  * the random pool.
494  *
495  * Returns an negative error if there is a problem.
496  */
create_entropy_store(int size,struct entropy_store ** ret_bucket)497 static int create_entropy_store(int size, struct entropy_store **ret_bucket)
498 {
499 	struct	entropy_store	*r;
500 	struct	poolinfo	*p;
501 	int	poolwords;
502 
503 	poolwords = (size + 3) / 4; /* Convert bytes->words */
504 	/* The pool size must be a multiple of 16 32-bit words */
505 	poolwords = ((poolwords + 15) / 16) * 16;
506 
507 	for (p = poolinfo_table; p->poolwords; p++) {
508 		if (poolwords == p->poolwords)
509 			break;
510 	}
511 	if (p->poolwords == 0)
512 		return -EINVAL;
513 
514 	r = kmalloc(sizeof(struct entropy_store), GFP_KERNEL);
515 	if (!r)
516 		return -ENOMEM;
517 
518 	memset (r, 0, sizeof(struct entropy_store));
519 	r->poolinfo = *p;
520 
521 	r->pool = kmalloc(POOLBYTES, GFP_KERNEL);
522 	if (!r->pool) {
523 		kfree(r);
524 		return -ENOMEM;
525 	}
526 	memset(r->pool, 0, POOLBYTES);
527 	*ret_bucket = r;
528 	return 0;
529 }
530 
531 /* Clear the entropy pool and associated counters. */
clear_entropy_store(struct entropy_store * r)532 static void clear_entropy_store(struct entropy_store *r)
533 {
534 	r->add_ptr = 0;
535 	r->entropy_count = 0;
536 	r->input_rotate = 0;
537 	r->extract_count = 0;
538 	memset(r->pool, 0, r->poolinfo.POOLBYTES);
539 }
540 
free_entropy_store(struct entropy_store * r)541 static void free_entropy_store(struct entropy_store *r)
542 {
543 	if (r->pool)
544 		kfree(r->pool);
545 	kfree(r);
546 }
547 
548 /*
549  * This function adds a byte into the entropy "pool".  It does not
550  * update the entropy estimate.  The caller should call
551  * credit_entropy_store if this is appropriate.
552  *
553  * The pool is stirred with a primitive polynomial of the appropriate
554  * degree, and then twisted.  We twist by three bits at a time because
555  * it's cheap to do so and helps slightly in the expected case where
556  * the entropy is concentrated in the low-order bits.
557  */
add_entropy_words(struct entropy_store * r,const __u32 * in,int nwords)558 static void add_entropy_words(struct entropy_store *r, const __u32 *in,
559 			      int nwords)
560 {
561 	static __u32 const twist_table[8] = {
562 		         0, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
563 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
564 	unsigned i;
565 	int new_rotate;
566 	int wordmask = r->poolinfo.poolwords - 1;
567 	__u32 w;
568 
569 	while (nwords--) {
570 		w = rotate_left(r->input_rotate, *in++);
571 		i = r->add_ptr = (r->add_ptr - 1) & wordmask;
572 		/*
573 		 * Normally, we add 7 bits of rotation to the pool.
574 		 * At the beginning of the pool, add an extra 7 bits
575 		 * rotation, so that successive passes spread the
576 		 * input bits across the pool evenly.
577 		 */
578 		new_rotate = r->input_rotate + 14;
579 		if (i)
580 			new_rotate = r->input_rotate + 7;
581 		r->input_rotate = new_rotate & 31;
582 
583 		/* XOR in the various taps */
584 		w ^= r->pool[(i + r->poolinfo.tap1) & wordmask];
585 		w ^= r->pool[(i + r->poolinfo.tap2) & wordmask];
586 		w ^= r->pool[(i + r->poolinfo.tap3) & wordmask];
587 		w ^= r->pool[(i + r->poolinfo.tap4) & wordmask];
588 		w ^= r->pool[(i + r->poolinfo.tap5) & wordmask];
589 		w ^= r->pool[i];
590 		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
591 	}
592 }
593 
594 /*
595  * Credit (or debit) the entropy store with n bits of entropy
596  */
credit_entropy_store(struct entropy_store * r,int nbits)597 static void credit_entropy_store(struct entropy_store *r, int nbits)
598 {
599 	if (r->entropy_count + nbits < 0) {
600 		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
601 			  r->entropy_count, nbits);
602 		r->entropy_count = 0;
603 	} else if (r->entropy_count + nbits > r->poolinfo.POOLBITS) {
604 		r->entropy_count = r->poolinfo.POOLBITS;
605 	} else {
606 		r->entropy_count += nbits;
607 		if (nbits)
608 			DEBUG_ENT("%s added %d bits, now %d\n",
609 				  r == sec_random_state ? "secondary" :
610 				  r == random_state ? "primary" : "unknown",
611 				  nbits, r->entropy_count);
612 	}
613 }
614 
615 /**********************************************************************
616  *
617  * Entropy batch input management
618  *
619  * We batch entropy to be added to avoid increasing interrupt latency
620  *
621  **********************************************************************/
622 
623 static __u32	*batch_entropy_pool;
624 static int	*batch_entropy_credit;
625 static int	batch_max;
626 static int	batch_head, batch_tail;
627 static struct tq_struct	batch_tqueue;
628 static void batch_entropy_process(void *private_);
629 
630 /* note: the size must be a power of 2 */
batch_entropy_init(int size,struct entropy_store * r)631 static int __init batch_entropy_init(int size, struct entropy_store *r)
632 {
633 	batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL);
634 	if (!batch_entropy_pool)
635 		return -1;
636 	batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL);
637 	if (!batch_entropy_credit) {
638 		kfree(batch_entropy_pool);
639 		return -1;
640 	}
641 	batch_head = batch_tail = 0;
642 	batch_max = size;
643 	batch_tqueue.routine = batch_entropy_process;
644 	batch_tqueue.data = r;
645 	return 0;
646 }
647 
648 /*
649  * Changes to the entropy data is put into a queue rather than being added to
650  * the entropy counts directly.  This is presumably to avoid doing heavy
651  * hashing calculations during an interrupt in add_timer_randomness().
652  * Instead, the entropy is only added to the pool once per timer tick.
653  */
batch_entropy_store(u32 a,u32 b,int num)654 void batch_entropy_store(u32 a, u32 b, int num)
655 {
656 	int	new;
657 
658 	if (!batch_max)
659 		return;
660 
661 	batch_entropy_pool[2*batch_head] = a;
662 	batch_entropy_pool[(2*batch_head) + 1] = b;
663 	batch_entropy_credit[batch_head] = num;
664 
665 	new = (batch_head+1) & (batch_max-1);
666 	if (new != batch_tail) {
667 		queue_task(&batch_tqueue, &tq_timer);
668 		batch_head = new;
669 	} else {
670 		DEBUG_ENT("batch entropy buffer full\n");
671 	}
672 }
673 
674 /*
675  * Flush out the accumulated entropy operations, adding entropy to the passed
676  * store (normally random_state).  If that store has enough entropy, alternate
677  * between randomizing the data of the primary and secondary stores.
678  */
batch_entropy_process(void * private_)679 static void batch_entropy_process(void *private_)
680 {
681 	struct entropy_store *r	= (struct entropy_store *) private_, *p;
682 	int max_entropy = r->poolinfo.POOLBITS;
683 
684 	if (!batch_max)
685 		return;
686 
687 	p = r;
688 	while (batch_head != batch_tail) {
689 		if (r->entropy_count >= max_entropy) {
690 			r = (r == sec_random_state) ?	random_state :
691 							sec_random_state;
692 			max_entropy = r->poolinfo.POOLBITS;
693 		}
694 		add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2);
695 		credit_entropy_store(r, batch_entropy_credit[batch_tail]);
696 		batch_tail = (batch_tail+1) & (batch_max-1);
697 	}
698 	if (p->entropy_count >= random_read_wakeup_thresh)
699 		wake_up_interruptible(&random_read_wait);
700 }
701 
702 /*********************************************************************
703  *
704  * Entropy input management
705  *
706  *********************************************************************/
707 
708 /* There is one of these per entropy source */
709 struct timer_rand_state {
710 	__u32		last_time;
711 	__s32		last_delta,last_delta2;
712 	int		dont_count_entropy:1;
713 };
714 
715 static struct timer_rand_state keyboard_timer_state;
716 static struct timer_rand_state mouse_timer_state;
717 static struct timer_rand_state extract_timer_state;
718 #ifndef CONFIG_ARCH_S390
719 static struct timer_rand_state *irq_timer_state[NR_IRQS];
720 #endif
721 static struct timer_rand_state *blkdev_timer_state[MAX_BLKDEV];
722 
723 /*
724  * This function adds entropy to the entropy "pool" by using timing
725  * delays.  It uses the timer_rand_state structure to make an estimate
726  * of how many bits of entropy this call has added to the pool.
727  *
728  * The number "num" is also added to the pool - it should somehow describe
729  * the type of event which just happened.  This is currently 0-255 for
730  * keyboard scan codes, and 256 upwards for interrupts.
731  * On the i386, this is assumed to be at most 16 bits, and the high bits
732  * are used for a high-resolution timer.
733  *
734  */
add_timer_randomness(struct timer_rand_state * state,unsigned num)735 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
736 {
737 	__u32		time;
738 	__s32		delta, delta2, delta3;
739 	int		entropy = 0;
740 
741 #if defined (__i386__)
742 	if (cpu_has_tsc) {
743 		__u32 high;
744 		rdtsc(time, high);
745 		num ^= high;
746 	} else {
747 		time = jiffies;
748 	}
749 #elif defined (__x86_64__)
750 	__u32 high;
751 	rdtsc(time, high);
752 	num ^= high;
753 #elif defined (__sparc_v9__)
754 	unsigned long tick = tick_ops->get_tick();
755 
756 	time = (unsigned int) tick;
757 	num ^= (tick >> 32UL);
758 #else
759 	time = jiffies;
760 #endif
761 
762 	/*
763 	 * Calculate number of bits of randomness we probably added.
764 	 * We take into account the first, second and third-order deltas
765 	 * in order to make our estimate.
766 	 */
767 	if (!state->dont_count_entropy) {
768 		delta = time - state->last_time;
769 		state->last_time = time;
770 
771 		delta2 = delta - state->last_delta;
772 		state->last_delta = delta;
773 
774 		delta3 = delta2 - state->last_delta2;
775 		state->last_delta2 = delta2;
776 
777 		if (delta < 0)
778 			delta = -delta;
779 		if (delta2 < 0)
780 			delta2 = -delta2;
781 		if (delta3 < 0)
782 			delta3 = -delta3;
783 		if (delta > delta2)
784 			delta = delta2;
785 		if (delta > delta3)
786 			delta = delta3;
787 
788 		/*
789 		 * delta is now minimum absolute delta.
790 		 * Round down by 1 bit on general principles,
791 		 * and limit entropy entimate to 12 bits.
792 		 */
793 		delta >>= 1;
794 		delta &= (1 << 12) - 1;
795 
796 		entropy = int_ln_12bits(delta);
797 	}
798 	batch_entropy_store(num, time, entropy);
799 }
800 
801 #ifndef CONFIG_ARCH_S390
add_keyboard_randomness(unsigned char scancode)802 void add_keyboard_randomness(unsigned char scancode)
803 {
804 	static unsigned char last_scancode;
805 	/* ignore autorepeat (multiple key down w/o key up) */
806 	if (scancode != last_scancode) {
807 		last_scancode = scancode;
808 		add_timer_randomness(&keyboard_timer_state, scancode);
809 	}
810 }
811 
add_mouse_randomness(__u32 mouse_data)812 void add_mouse_randomness(__u32 mouse_data)
813 {
814 	add_timer_randomness(&mouse_timer_state, mouse_data);
815 }
816 
add_interrupt_randomness(int irq)817 void add_interrupt_randomness(int irq)
818 {
819 	if (irq >= NR_IRQS || irq_timer_state[irq] == 0)
820 		return;
821 
822 	add_timer_randomness(irq_timer_state[irq], 0x100+irq);
823 }
824 #endif
825 
add_blkdev_randomness(int major)826 void add_blkdev_randomness(int major)
827 {
828 	if (major >= MAX_BLKDEV)
829 		return;
830 
831 	if (blkdev_timer_state[major] == 0) {
832 		rand_initialize_blkdev(major, GFP_ATOMIC);
833 		if (blkdev_timer_state[major] == 0)
834 			return;
835 	}
836 
837 	add_timer_randomness(blkdev_timer_state[major], 0x200+major);
838 }
839 
840 /******************************************************************
841  *
842  * Hash function definition
843  *
844  *******************************************************************/
845 
846 /*
847  * This chunk of code defines a function
848  * void HASH_TRANSFORM(__u32 digest[HASH_BUFFER_SIZE + HASH_EXTRA_SIZE],
849  * 		__u32 const data[16])
850  *
851  * The function hashes the input data to produce a digest in the first
852  * HASH_BUFFER_SIZE words of the digest[] array, and uses HASH_EXTRA_SIZE
853  * more words for internal purposes.  (This buffer is exported so the
854  * caller can wipe it once rather than this code doing it each call,
855  * and tacking it onto the end of the digest[] array is the quick and
856  * dirty way of doing it.)
857  *
858  * It so happens that MD5 and SHA share most of the initial vector
859  * used to initialize the digest[] array before the first call:
860  * 1) 0x67452301
861  * 2) 0xefcdab89
862  * 3) 0x98badcfe
863  * 4) 0x10325476
864  * 5) 0xc3d2e1f0 (SHA only)
865  *
866  * For /dev/random purposes, the length of the data being hashed is
867  * fixed in length, so appending a bit count in the usual way is not
868  * cryptographically necessary.
869  */
870 
871 #ifdef USE_SHA
872 
873 #define HASH_BUFFER_SIZE 5
874 #define HASH_EXTRA_SIZE 80
875 #define HASH_TRANSFORM SHATransform
876 
877 /* Various size/speed tradeoffs are available.  Choose 0..3. */
878 #define SHA_CODE_SIZE 0
879 
880 /*
881  * SHA transform algorithm, taken from code written by Peter Gutmann,
882  * and placed in the public domain.
883  */
884 
885 /* The SHA f()-functions.  */
886 
887 #define f1(x,y,z)   ( z ^ (x & (y^z)) )		/* Rounds  0-19: x ? y : z */
888 #define f2(x,y,z)   (x ^ y ^ z)			/* Rounds 20-39: XOR */
889 #define f3(x,y,z)   ( (x & y) + (z & (x ^ y)) )	/* Rounds 40-59: majority */
890 #define f4(x,y,z)   (x ^ y ^ z)			/* Rounds 60-79: XOR */
891 
892 /* The SHA Mysterious Constants */
893 
894 #define K1  0x5A827999L			/* Rounds  0-19: sqrt(2) * 2^30 */
895 #define K2  0x6ED9EBA1L			/* Rounds 20-39: sqrt(3) * 2^30 */
896 #define K3  0x8F1BBCDCL			/* Rounds 40-59: sqrt(5) * 2^30 */
897 #define K4  0xCA62C1D6L			/* Rounds 60-79: sqrt(10) * 2^30 */
898 
899 #define ROTL(n,X)  ( ( ( X ) << n ) | ( ( X ) >> ( 32 - n ) ) )
900 
901 #define subRound(a, b, c, d, e, f, k, data) \
902     ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) )
903 
904 
SHATransform(__u32 digest[85],__u32 const data[16])905 static void SHATransform(__u32 digest[85], __u32 const data[16])
906 {
907     __u32 A, B, C, D, E;     /* Local vars */
908     __u32 TEMP;
909     int	i;
910 #define W (digest + HASH_BUFFER_SIZE)	/* Expanded data array */
911 
912     /*
913      * Do the preliminary expansion of 16 to 80 words.  Doing it
914      * out-of-line line this is faster than doing it in-line on
915      * register-starved machines like the x86, and not really any
916      * slower on real processors.
917      */
918     memcpy(W, data, 16*sizeof(__u32));
919     for (i = 0; i < 64; i++) {
920 	    TEMP = W[i] ^ W[i+2] ^ W[i+8] ^ W[i+13];
921 	    W[i+16] = ROTL(1, TEMP);
922     }
923 
924     /* Set up first buffer and local data buffer */
925     A = digest[ 0 ];
926     B = digest[ 1 ];
927     C = digest[ 2 ];
928     D = digest[ 3 ];
929     E = digest[ 4 ];
930 
931     /* Heavy mangling, in 4 sub-rounds of 20 iterations each. */
932 #if SHA_CODE_SIZE == 0
933     /*
934      * Approximately 50% of the speed of the largest version, but
935      * takes up 1/16 the space.  Saves about 6k on an i386 kernel.
936      */
937     for (i = 0; i < 80; i++) {
938 	if (i < 40) {
939 	    if (i < 20)
940 		TEMP = f1(B, C, D) + K1;
941 	    else
942 		TEMP = f2(B, C, D) + K2;
943 	} else {
944 	    if (i < 60)
945 		TEMP = f3(B, C, D) + K3;
946 	    else
947 		TEMP = f4(B, C, D) + K4;
948 	}
949 	TEMP += ROTL(5, A) + E + W[i];
950 	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
951     }
952 #elif SHA_CODE_SIZE == 1
953     for (i = 0; i < 20; i++) {
954 	TEMP = f1(B, C, D) + K1 + ROTL(5, A) + E + W[i];
955 	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
956     }
957     for (; i < 40; i++) {
958 	TEMP = f2(B, C, D) + K2 + ROTL(5, A) + E + W[i];
959 	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
960     }
961     for (; i < 60; i++) {
962 	TEMP = f3(B, C, D) + K3 + ROTL(5, A) + E + W[i];
963 	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
964     }
965     for (; i < 80; i++) {
966 	TEMP = f4(B, C, D) + K4 + ROTL(5, A) + E + W[i];
967 	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
968     }
969 #elif SHA_CODE_SIZE == 2
970     for (i = 0; i < 20; i += 5) {
971 	subRound( A, B, C, D, E, f1, K1, W[ i   ] );
972 	subRound( E, A, B, C, D, f1, K1, W[ i+1 ] );
973 	subRound( D, E, A, B, C, f1, K1, W[ i+2 ] );
974 	subRound( C, D, E, A, B, f1, K1, W[ i+3 ] );
975 	subRound( B, C, D, E, A, f1, K1, W[ i+4 ] );
976     }
977     for (; i < 40; i += 5) {
978 	subRound( A, B, C, D, E, f2, K2, W[ i   ] );
979 	subRound( E, A, B, C, D, f2, K2, W[ i+1 ] );
980 	subRound( D, E, A, B, C, f2, K2, W[ i+2 ] );
981 	subRound( C, D, E, A, B, f2, K2, W[ i+3 ] );
982 	subRound( B, C, D, E, A, f2, K2, W[ i+4 ] );
983     }
984     for (; i < 60; i += 5) {
985 	subRound( A, B, C, D, E, f3, K3, W[ i   ] );
986 	subRound( E, A, B, C, D, f3, K3, W[ i+1 ] );
987 	subRound( D, E, A, B, C, f3, K3, W[ i+2 ] );
988 	subRound( C, D, E, A, B, f3, K3, W[ i+3 ] );
989 	subRound( B, C, D, E, A, f3, K3, W[ i+4 ] );
990     }
991     for (; i < 80; i += 5) {
992 	subRound( A, B, C, D, E, f4, K4, W[ i   ] );
993 	subRound( E, A, B, C, D, f4, K4, W[ i+1 ] );
994 	subRound( D, E, A, B, C, f4, K4, W[ i+2 ] );
995 	subRound( C, D, E, A, B, f4, K4, W[ i+3 ] );
996 	subRound( B, C, D, E, A, f4, K4, W[ i+4 ] );
997     }
998 #elif SHA_CODE_SIZE == 3 /* Really large version */
999     subRound( A, B, C, D, E, f1, K1, W[  0 ] );
1000     subRound( E, A, B, C, D, f1, K1, W[  1 ] );
1001     subRound( D, E, A, B, C, f1, K1, W[  2 ] );
1002     subRound( C, D, E, A, B, f1, K1, W[  3 ] );
1003     subRound( B, C, D, E, A, f1, K1, W[  4 ] );
1004     subRound( A, B, C, D, E, f1, K1, W[  5 ] );
1005     subRound( E, A, B, C, D, f1, K1, W[  6 ] );
1006     subRound( D, E, A, B, C, f1, K1, W[  7 ] );
1007     subRound( C, D, E, A, B, f1, K1, W[  8 ] );
1008     subRound( B, C, D, E, A, f1, K1, W[  9 ] );
1009     subRound( A, B, C, D, E, f1, K1, W[ 10 ] );
1010     subRound( E, A, B, C, D, f1, K1, W[ 11 ] );
1011     subRound( D, E, A, B, C, f1, K1, W[ 12 ] );
1012     subRound( C, D, E, A, B, f1, K1, W[ 13 ] );
1013     subRound( B, C, D, E, A, f1, K1, W[ 14 ] );
1014     subRound( A, B, C, D, E, f1, K1, W[ 15 ] );
1015     subRound( E, A, B, C, D, f1, K1, W[ 16 ] );
1016     subRound( D, E, A, B, C, f1, K1, W[ 17 ] );
1017     subRound( C, D, E, A, B, f1, K1, W[ 18 ] );
1018     subRound( B, C, D, E, A, f1, K1, W[ 19 ] );
1019 
1020     subRound( A, B, C, D, E, f2, K2, W[ 20 ] );
1021     subRound( E, A, B, C, D, f2, K2, W[ 21 ] );
1022     subRound( D, E, A, B, C, f2, K2, W[ 22 ] );
1023     subRound( C, D, E, A, B, f2, K2, W[ 23 ] );
1024     subRound( B, C, D, E, A, f2, K2, W[ 24 ] );
1025     subRound( A, B, C, D, E, f2, K2, W[ 25 ] );
1026     subRound( E, A, B, C, D, f2, K2, W[ 26 ] );
1027     subRound( D, E, A, B, C, f2, K2, W[ 27 ] );
1028     subRound( C, D, E, A, B, f2, K2, W[ 28 ] );
1029     subRound( B, C, D, E, A, f2, K2, W[ 29 ] );
1030     subRound( A, B, C, D, E, f2, K2, W[ 30 ] );
1031     subRound( E, A, B, C, D, f2, K2, W[ 31 ] );
1032     subRound( D, E, A, B, C, f2, K2, W[ 32 ] );
1033     subRound( C, D, E, A, B, f2, K2, W[ 33 ] );
1034     subRound( B, C, D, E, A, f2, K2, W[ 34 ] );
1035     subRound( A, B, C, D, E, f2, K2, W[ 35 ] );
1036     subRound( E, A, B, C, D, f2, K2, W[ 36 ] );
1037     subRound( D, E, A, B, C, f2, K2, W[ 37 ] );
1038     subRound( C, D, E, A, B, f2, K2, W[ 38 ] );
1039     subRound( B, C, D, E, A, f2, K2, W[ 39 ] );
1040 
1041     subRound( A, B, C, D, E, f3, K3, W[ 40 ] );
1042     subRound( E, A, B, C, D, f3, K3, W[ 41 ] );
1043     subRound( D, E, A, B, C, f3, K3, W[ 42 ] );
1044     subRound( C, D, E, A, B, f3, K3, W[ 43 ] );
1045     subRound( B, C, D, E, A, f3, K3, W[ 44 ] );
1046     subRound( A, B, C, D, E, f3, K3, W[ 45 ] );
1047     subRound( E, A, B, C, D, f3, K3, W[ 46 ] );
1048     subRound( D, E, A, B, C, f3, K3, W[ 47 ] );
1049     subRound( C, D, E, A, B, f3, K3, W[ 48 ] );
1050     subRound( B, C, D, E, A, f3, K3, W[ 49 ] );
1051     subRound( A, B, C, D, E, f3, K3, W[ 50 ] );
1052     subRound( E, A, B, C, D, f3, K3, W[ 51 ] );
1053     subRound( D, E, A, B, C, f3, K3, W[ 52 ] );
1054     subRound( C, D, E, A, B, f3, K3, W[ 53 ] );
1055     subRound( B, C, D, E, A, f3, K3, W[ 54 ] );
1056     subRound( A, B, C, D, E, f3, K3, W[ 55 ] );
1057     subRound( E, A, B, C, D, f3, K3, W[ 56 ] );
1058     subRound( D, E, A, B, C, f3, K3, W[ 57 ] );
1059     subRound( C, D, E, A, B, f3, K3, W[ 58 ] );
1060     subRound( B, C, D, E, A, f3, K3, W[ 59 ] );
1061 
1062     subRound( A, B, C, D, E, f4, K4, W[ 60 ] );
1063     subRound( E, A, B, C, D, f4, K4, W[ 61 ] );
1064     subRound( D, E, A, B, C, f4, K4, W[ 62 ] );
1065     subRound( C, D, E, A, B, f4, K4, W[ 63 ] );
1066     subRound( B, C, D, E, A, f4, K4, W[ 64 ] );
1067     subRound( A, B, C, D, E, f4, K4, W[ 65 ] );
1068     subRound( E, A, B, C, D, f4, K4, W[ 66 ] );
1069     subRound( D, E, A, B, C, f4, K4, W[ 67 ] );
1070     subRound( C, D, E, A, B, f4, K4, W[ 68 ] );
1071     subRound( B, C, D, E, A, f4, K4, W[ 69 ] );
1072     subRound( A, B, C, D, E, f4, K4, W[ 70 ] );
1073     subRound( E, A, B, C, D, f4, K4, W[ 71 ] );
1074     subRound( D, E, A, B, C, f4, K4, W[ 72 ] );
1075     subRound( C, D, E, A, B, f4, K4, W[ 73 ] );
1076     subRound( B, C, D, E, A, f4, K4, W[ 74 ] );
1077     subRound( A, B, C, D, E, f4, K4, W[ 75 ] );
1078     subRound( E, A, B, C, D, f4, K4, W[ 76 ] );
1079     subRound( D, E, A, B, C, f4, K4, W[ 77 ] );
1080     subRound( C, D, E, A, B, f4, K4, W[ 78 ] );
1081     subRound( B, C, D, E, A, f4, K4, W[ 79 ] );
1082 #else
1083 #error Illegal SHA_CODE_SIZE
1084 #endif
1085 
1086     /* Build message digest */
1087     digest[ 0 ] += A;
1088     digest[ 1 ] += B;
1089     digest[ 2 ] += C;
1090     digest[ 3 ] += D;
1091     digest[ 4 ] += E;
1092 
1093 	/* W is wiped by the caller */
1094 #undef W
1095 }
1096 
1097 #undef ROTL
1098 #undef f1
1099 #undef f2
1100 #undef f3
1101 #undef f4
1102 #undef K1
1103 #undef K2
1104 #undef K3
1105 #undef K4
1106 #undef subRound
1107 
1108 #else /* !USE_SHA - Use MD5 */
1109 
1110 #define HASH_BUFFER_SIZE 4
1111 #define HASH_EXTRA_SIZE 0
1112 #define HASH_TRANSFORM MD5Transform
1113 
1114 /*
1115  * MD5 transform algorithm, taken from code written by Colin Plumb,
1116  * and put into the public domain
1117  */
1118 
1119 /* The four core functions - F1 is optimized somewhat */
1120 
1121 /* #define F1(x, y, z) (x & y | ~x & z) */
1122 #define F1(x, y, z) (z ^ (x & (y ^ z)))
1123 #define F2(x, y, z) F1(z, x, y)
1124 #define F3(x, y, z) (x ^ y ^ z)
1125 #define F4(x, y, z) (y ^ (x | ~z))
1126 
1127 /* This is the central step in the MD5 algorithm. */
1128 #define MD5STEP(f, w, x, y, z, data, s) \
1129 	( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
1130 
1131 /*
1132  * The core of the MD5 algorithm, this alters an existing MD5 hash to
1133  * reflect the addition of 16 longwords of new data.  MD5Update blocks
1134  * the data and converts bytes into longwords for this routine.
1135  */
MD5Transform(__u32 buf[HASH_BUFFER_SIZE],__u32 const in[16])1136 static void MD5Transform(__u32 buf[HASH_BUFFER_SIZE], __u32 const in[16])
1137 {
1138 	__u32 a, b, c, d;
1139 
1140 	a = buf[0];
1141 	b = buf[1];
1142 	c = buf[2];
1143 	d = buf[3];
1144 
1145 	MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478,  7);
1146 	MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
1147 	MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
1148 	MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
1149 	MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf,  7);
1150 	MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
1151 	MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
1152 	MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
1153 	MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8,  7);
1154 	MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
1155 	MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
1156 	MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
1157 	MD5STEP(F1, a, b, c, d, in[12]+0x6b901122,  7);
1158 	MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
1159 	MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
1160 	MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
1161 
1162 	MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562,  5);
1163 	MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340,  9);
1164 	MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
1165 	MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
1166 	MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d,  5);
1167 	MD5STEP(F2, d, a, b, c, in[10]+0x02441453,  9);
1168 	MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
1169 	MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
1170 	MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6,  5);
1171 	MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6,  9);
1172 	MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
1173 	MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
1174 	MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905,  5);
1175 	MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8,  9);
1176 	MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
1177 	MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
1178 
1179 	MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942,  4);
1180 	MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
1181 	MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
1182 	MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
1183 	MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44,  4);
1184 	MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
1185 	MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
1186 	MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
1187 	MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6,  4);
1188 	MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
1189 	MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
1190 	MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
1191 	MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039,  4);
1192 	MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
1193 	MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
1194 	MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
1195 
1196 	MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244,  6);
1197 	MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
1198 	MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
1199 	MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
1200 	MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3,  6);
1201 	MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
1202 	MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
1203 	MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
1204 	MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f,  6);
1205 	MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
1206 	MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
1207 	MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
1208 	MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82,  6);
1209 	MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
1210 	MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
1211 	MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
1212 
1213 	buf[0] += a;
1214 	buf[1] += b;
1215 	buf[2] += c;
1216 	buf[3] += d;
1217 }
1218 
1219 #undef F1
1220 #undef F2
1221 #undef F3
1222 #undef F4
1223 #undef MD5STEP
1224 
1225 #endif /* !USE_SHA */
1226 
1227 /*********************************************************************
1228  *
1229  * Entropy extraction routines
1230  *
1231  *********************************************************************/
1232 
1233 #define EXTRACT_ENTROPY_USER		1
1234 #define EXTRACT_ENTROPY_SECONDARY	2
1235 #define TMP_BUF_SIZE			(HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
1236 #define SEC_XFER_SIZE			(TMP_BUF_SIZE*4)
1237 
1238 static ssize_t extract_entropy(struct entropy_store *r, void * buf,
1239 			       size_t nbytes, int flags);
1240 
1241 /*
1242  * This utility inline function is responsible for transfering entropy
1243  * from the primary pool to the secondary extraction pool.  We pull
1244  * randomness under two conditions; one is if there isn't enough entropy
1245  * in the secondary pool.  The other is after we have extracted 1024 bytes,
1246  * at which point we do a "catastrophic reseeding".
1247  */
xfer_secondary_pool(struct entropy_store * r,size_t nbytes,__u32 * tmp,size_t tmpsize)1248 static inline void xfer_secondary_pool(struct entropy_store *r,
1249 				       size_t nbytes, __u32 *tmp,
1250 				       size_t tmpsize)
1251 {
1252 	if (r->entropy_count < nbytes * 8 &&
1253 	    r->entropy_count < r->poolinfo.POOLBITS) {
1254 		int nwords = min_t(int,
1255 				   r->poolinfo.poolwords - r->entropy_count/32,
1256 				   tmpsize / 4);
1257 
1258 		DEBUG_ENT("xfer %d from primary to %s (have %d, need %d)\n",
1259 			  nwords * 32,
1260 			  r == sec_random_state ? "secondary" : "unknown",
1261 			  r->entropy_count, nbytes * 8);
1262 
1263 		extract_entropy(random_state, tmp, nwords * 4, 0);
1264 		add_entropy_words(r, tmp, nwords);
1265 		credit_entropy_store(r, nwords * 32);
1266 	}
1267 	if (r->extract_count > 1024) {
1268 		DEBUG_ENT("reseeding %s with %d from primary\n",
1269 			  r == sec_random_state ? "secondary" : "unknown",
1270 			  tmpsize * 8);
1271 		extract_entropy(random_state, tmp, tmpsize, 0);
1272 		add_entropy_words(r, tmp, tmpsize / 4);
1273 		r->extract_count = 0;
1274 	}
1275 }
1276 
1277 /*
1278  * This function extracts randomness from the "entropy pool", and
1279  * returns it in a buffer.  This function computes how many remaining
1280  * bits of entropy are left in the pool, but it does not restrict the
1281  * number of bytes that are actually obtained.  If the EXTRACT_ENTROPY_USER
1282  * flag is given, then the buf pointer is assumed to be in user space.
1283  *
1284  * If the EXTRACT_ENTROPY_SECONDARY flag is given, then we are actually
1285  * extracting entropy from the secondary pool, and can refill from the
1286  * primary pool if needed.
1287  *
1288  * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
1289  */
extract_entropy(struct entropy_store * r,void * buf,size_t nbytes,int flags)1290 static ssize_t extract_entropy(struct entropy_store *r, void * buf,
1291 			       size_t nbytes, int flags)
1292 {
1293 	ssize_t ret, i;
1294 	__u32 tmp[TMP_BUF_SIZE];
1295 	__u32 x;
1296 
1297 	add_timer_randomness(&extract_timer_state, nbytes);
1298 
1299 	/* Redundant, but just in case... */
1300 	if (r->entropy_count > r->poolinfo.POOLBITS)
1301 		r->entropy_count = r->poolinfo.POOLBITS;
1302 
1303 	if (flags & EXTRACT_ENTROPY_SECONDARY)
1304 		xfer_secondary_pool(r, nbytes, tmp, sizeof(tmp));
1305 
1306 	DEBUG_ENT("%s has %d bits, want %d bits\n",
1307 		  r == sec_random_state ? "secondary" :
1308 		  r == random_state ? "primary" : "unknown",
1309 		  r->entropy_count, nbytes * 8);
1310 
1311 	if (r->entropy_count / 8 >= nbytes)
1312 		r->entropy_count -= nbytes*8;
1313 	else
1314 		r->entropy_count = 0;
1315 
1316 	if (r->entropy_count < random_write_wakeup_thresh)
1317 		wake_up_interruptible(&random_write_wait);
1318 
1319 	r->extract_count += nbytes;
1320 
1321 	ret = 0;
1322 	while (nbytes) {
1323 		/*
1324 		 * Check if we need to break out or reschedule....
1325 		 */
1326 		if ((flags & EXTRACT_ENTROPY_USER) && current->need_resched) {
1327 			if (signal_pending(current)) {
1328 				if (ret == 0)
1329 					ret = -ERESTARTSYS;
1330 				break;
1331 			}
1332 			schedule();
1333 		}
1334 
1335 		/* Hash the pool to get the output */
1336 		tmp[0] = 0x67452301;
1337 		tmp[1] = 0xefcdab89;
1338 		tmp[2] = 0x98badcfe;
1339 		tmp[3] = 0x10325476;
1340 #ifdef USE_SHA
1341 		tmp[4] = 0xc3d2e1f0;
1342 #endif
1343 		/*
1344 		 * As we hash the pool, we mix intermediate values of
1345 		 * the hash back into the pool.  This eliminates
1346 		 * backtracking attacks (where the attacker knows
1347 		 * the state of the pool plus the current outputs, and
1348 		 * attempts to find previous ouputs), unless the hash
1349 		 * function can be inverted.
1350 		 */
1351 		for (i = 0, x = 0; i < r->poolinfo.poolwords; i += 16, x+=2) {
1352 			HASH_TRANSFORM(tmp, r->pool+i);
1353 			add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1);
1354 		}
1355 
1356 		/*
1357 		 * In case the hash function has some recognizable
1358 		 * output pattern, we fold it in half.
1359 		 */
1360 		for (i = 0; i <  HASH_BUFFER_SIZE/2; i++)
1361 			tmp[i] ^= tmp[i + (HASH_BUFFER_SIZE+1)/2];
1362 #if HASH_BUFFER_SIZE & 1	/* There's a middle word to deal with */
1363 		x = tmp[HASH_BUFFER_SIZE/2];
1364 		x ^= (x >> 16);		/* Fold it in half */
1365 		((__u16 *)tmp)[HASH_BUFFER_SIZE-1] = (__u16)x;
1366 #endif
1367 
1368 		/* Copy data to destination buffer */
1369 		i = min(nbytes, HASH_BUFFER_SIZE*sizeof(__u32)/2);
1370 		if (flags & EXTRACT_ENTROPY_USER) {
1371 			i -= copy_to_user(buf, (__u8 const *)tmp, i);
1372 			if (!i) {
1373 				ret = -EFAULT;
1374 				break;
1375 			}
1376 		} else
1377 			memcpy(buf, (__u8 const *)tmp, i);
1378 		nbytes -= i;
1379 		buf += i;
1380 		ret += i;
1381 		add_timer_randomness(&extract_timer_state, nbytes);
1382 	}
1383 
1384 	/* Wipe data just returned from memory */
1385 	memset(tmp, 0, sizeof(tmp));
1386 
1387 	return ret;
1388 }
1389 
1390 /*
1391  * This function is the exported kernel interface.  It returns some
1392  * number of good random numbers, suitable for seeding TCP sequence
1393  * numbers, etc.
1394  */
get_random_bytes(void * buf,int nbytes)1395 void get_random_bytes(void *buf, int nbytes)
1396 {
1397 	if (sec_random_state)
1398 		extract_entropy(sec_random_state, (char *) buf, nbytes,
1399 				EXTRACT_ENTROPY_SECONDARY);
1400 	else if (random_state)
1401 		extract_entropy(random_state, (char *) buf, nbytes, 0);
1402 	else
1403 		printk(KERN_NOTICE "get_random_bytes called before "
1404 				   "random driver initialization\n");
1405 }
1406 
1407 /*********************************************************************
1408  *
1409  * Functions to interface with Linux
1410  *
1411  *********************************************************************/
1412 
1413 /*
1414  * Initialize the random pool with standard stuff.
1415  *
1416  * NOTE: This is an OS-dependent function.
1417  */
init_std_data(struct entropy_store * r)1418 static void init_std_data(struct entropy_store *r)
1419 {
1420 	struct timeval 	tv;
1421 	__u32		words[2];
1422 	char 		*p;
1423 	int		i;
1424 
1425 	do_gettimeofday(&tv);
1426 	words[0] = tv.tv_sec;
1427 	words[1] = tv.tv_usec;
1428 	add_entropy_words(r, words, 2);
1429 
1430 	/*
1431 	 *	This doesn't lock system.utsname. However, we are generating
1432 	 *	entropy so a race with a name set here is fine.
1433 	 */
1434 	p = (char *) &system_utsname;
1435 	for (i = sizeof(system_utsname) / sizeof(words); i; i--) {
1436 		memcpy(words, p, sizeof(words));
1437 		add_entropy_words(r, words, sizeof(words)/4);
1438 		p += sizeof(words);
1439 	}
1440 }
1441 
rand_initialize(void)1442 void __init rand_initialize(void)
1443 {
1444 	int i;
1445 
1446 	if (create_entropy_store(DEFAULT_POOL_SIZE, &random_state))
1447 		return;		/* Error, return */
1448 	if (batch_entropy_init(BATCH_ENTROPY_SIZE, random_state))
1449 		return;		/* Error, return */
1450 	if (create_entropy_store(SECONDARY_POOL_SIZE, &sec_random_state))
1451 		return;		/* Error, return */
1452 	clear_entropy_store(random_state);
1453 	clear_entropy_store(sec_random_state);
1454 	init_std_data(random_state);
1455 #ifdef CONFIG_SYSCTL
1456 	sysctl_init_random(random_state);
1457 #endif
1458 #ifndef CONFIG_ARCH_S390
1459 	for (i = 0; i < NR_IRQS; i++)
1460 		irq_timer_state[i] = NULL;
1461 #endif
1462 	for (i = 0; i < MAX_BLKDEV; i++)
1463 		blkdev_timer_state[i] = NULL;
1464 	memset(&keyboard_timer_state, 0, sizeof(struct timer_rand_state));
1465 	memset(&mouse_timer_state, 0, sizeof(struct timer_rand_state));
1466 	memset(&extract_timer_state, 0, sizeof(struct timer_rand_state));
1467 	extract_timer_state.dont_count_entropy = 1;
1468 }
1469 
1470 #ifndef CONFIG_ARCH_S390
rand_initialize_irq(int irq)1471 void rand_initialize_irq(int irq)
1472 {
1473 	struct timer_rand_state *state;
1474 
1475 	if (irq >= NR_IRQS || irq_timer_state[irq])
1476 		return;
1477 
1478 	/*
1479 	 * If kmalloc returns null, we just won't use that entropy
1480 	 * source.
1481 	 */
1482 	state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1483 	if (state) {
1484 		memset(state, 0, sizeof(struct timer_rand_state));
1485 		irq_timer_state[irq] = state;
1486 	}
1487 }
1488 #endif
1489 
rand_initialize_blkdev(int major,int mode)1490 void rand_initialize_blkdev(int major, int mode)
1491 {
1492 	struct timer_rand_state *state;
1493 
1494 	if (major >= MAX_BLKDEV || blkdev_timer_state[major])
1495 		return;
1496 
1497 	/*
1498 	 * If kmalloc returns null, we just won't use that entropy
1499 	 * source.
1500 	 */
1501 	state = kmalloc(sizeof(struct timer_rand_state), mode);
1502 	if (state) {
1503 		memset(state, 0, sizeof(struct timer_rand_state));
1504 		blkdev_timer_state[major] = state;
1505 	}
1506 }
1507 
1508 
1509 static ssize_t
random_read(struct file * file,char * buf,size_t nbytes,loff_t * ppos)1510 random_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos)
1511 {
1512 	DECLARE_WAITQUEUE(wait, current);
1513 	ssize_t			n, retval = 0, count = 0;
1514 
1515 	if (nbytes == 0)
1516 		return 0;
1517 
1518 	add_wait_queue(&random_read_wait, &wait);
1519 	while (nbytes > 0) {
1520 		set_current_state(TASK_INTERRUPTIBLE);
1521 
1522 		n = nbytes;
1523 		if (n > SEC_XFER_SIZE)
1524 			n = SEC_XFER_SIZE;
1525 		if (n > random_state->entropy_count / 8)
1526 			n = random_state->entropy_count / 8;
1527 		if (n == 0) {
1528 			if (file->f_flags & O_NONBLOCK) {
1529 				retval = -EAGAIN;
1530 				break;
1531 			}
1532 			if (signal_pending(current)) {
1533 				retval = -ERESTARTSYS;
1534 				break;
1535 			}
1536 			schedule();
1537 			continue;
1538 		}
1539 		n = extract_entropy(sec_random_state, buf, n,
1540 				    EXTRACT_ENTROPY_USER |
1541 				    EXTRACT_ENTROPY_SECONDARY);
1542 		if (n < 0) {
1543 			retval = n;
1544 			break;
1545 		}
1546 		count += n;
1547 		buf += n;
1548 		nbytes -= n;
1549 		break;		/* This break makes the device work */
1550 				/* like a named pipe */
1551 	}
1552 	current->state = TASK_RUNNING;
1553 	remove_wait_queue(&random_read_wait, &wait);
1554 
1555 	/*
1556 	 * If we gave the user some bytes, update the access time.
1557 	 */
1558 	if (count != 0) {
1559 		UPDATE_ATIME(file->f_dentry->d_inode);
1560 	}
1561 
1562 	return (count ? count : retval);
1563 }
1564 
1565 static ssize_t
urandom_read(struct file * file,char * buf,size_t nbytes,loff_t * ppos)1566 urandom_read(struct file * file, char * buf,
1567 		      size_t nbytes, loff_t *ppos)
1568 {
1569 	return extract_entropy(sec_random_state, buf, nbytes,
1570 			       EXTRACT_ENTROPY_USER |
1571 			       EXTRACT_ENTROPY_SECONDARY);
1572 }
1573 
1574 static unsigned int
random_poll(struct file * file,poll_table * wait)1575 random_poll(struct file *file, poll_table * wait)
1576 {
1577 	unsigned int mask;
1578 
1579 	poll_wait(file, &random_read_wait, wait);
1580 	poll_wait(file, &random_write_wait, wait);
1581 	mask = 0;
1582 	if (random_state->entropy_count >= random_read_wakeup_thresh)
1583 		mask |= POLLIN | POLLRDNORM;
1584 	if (random_state->entropy_count < random_write_wakeup_thresh)
1585 		mask |= POLLOUT | POLLWRNORM;
1586 	return mask;
1587 }
1588 
1589 static ssize_t
random_write(struct file * file,const char * buffer,size_t count,loff_t * ppos)1590 random_write(struct file * file, const char * buffer,
1591 	     size_t count, loff_t *ppos)
1592 {
1593 	int		ret = 0;
1594 	size_t		bytes;
1595 	__u32 		buf[16];
1596 	const char 	*p = buffer;
1597 	size_t		c = count;
1598 
1599 	while (c > 0) {
1600 		bytes = min(c, sizeof(buf));
1601 
1602 		bytes -= copy_from_user(&buf, p, bytes);
1603 		if (!bytes) {
1604 			ret = -EFAULT;
1605 			break;
1606 		}
1607 		c -= bytes;
1608 		p += bytes;
1609 
1610 		add_entropy_words(random_state, buf, (bytes + 3) / 4);
1611 	}
1612 	if (p == buffer) {
1613 		return (ssize_t)ret;
1614 	} else {
1615 		file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
1616 		mark_inode_dirty(file->f_dentry->d_inode);
1617 		return (ssize_t)(p - buffer);
1618 	}
1619 }
1620 
1621 static int
random_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)1622 random_ioctl(struct inode * inode, struct file * file,
1623 	     unsigned int cmd, unsigned long arg)
1624 {
1625 	int *p, size, ent_count;
1626 	int retval;
1627 
1628 	switch (cmd) {
1629 	case RNDGETENTCNT:
1630 		ent_count = random_state->entropy_count;
1631 		if (put_user(ent_count, (int *) arg))
1632 			return -EFAULT;
1633 		return 0;
1634 	case RNDADDTOENTCNT:
1635 		if (!capable(CAP_SYS_ADMIN))
1636 			return -EPERM;
1637 		if (get_user(ent_count, (int *) arg))
1638 			return -EFAULT;
1639 		credit_entropy_store(random_state, ent_count);
1640 		/*
1641 		 * Wake up waiting processes if we have enough
1642 		 * entropy.
1643 		 */
1644 		if (random_state->entropy_count >= random_read_wakeup_thresh)
1645 			wake_up_interruptible(&random_read_wait);
1646 		return 0;
1647 	case RNDGETPOOL:
1648 		if (!capable(CAP_SYS_ADMIN))
1649 			return -EPERM;
1650 		p = (int *) arg;
1651 		ent_count = random_state->entropy_count;
1652 		if (put_user(ent_count, p++) ||
1653 		    get_user(size, p) ||
1654 		    put_user(random_state->poolinfo.poolwords, p++))
1655 			return -EFAULT;
1656 		if (size < 0)
1657 			return -EINVAL;
1658 		if (size > random_state->poolinfo.poolwords)
1659 			size = random_state->poolinfo.poolwords;
1660 		if (copy_to_user(p, random_state->pool, size * sizeof(__u32)))
1661 			return -EFAULT;
1662 		return 0;
1663 	case RNDADDENTROPY:
1664 		if (!capable(CAP_SYS_ADMIN))
1665 			return -EPERM;
1666 		p = (int *) arg;
1667 		if (get_user(ent_count, p++))
1668 			return -EFAULT;
1669 		if (ent_count < 0)
1670 			return -EINVAL;
1671 		if (get_user(size, p++))
1672 			return -EFAULT;
1673 		retval = random_write(file, (const char *) p,
1674 				      size, &file->f_pos);
1675 		if (retval < 0)
1676 			return retval;
1677 		credit_entropy_store(random_state, ent_count);
1678 		/*
1679 		 * Wake up waiting processes if we have enough
1680 		 * entropy.
1681 		 */
1682 		if (random_state->entropy_count >= random_read_wakeup_thresh)
1683 			wake_up_interruptible(&random_read_wait);
1684 		return 0;
1685 	case RNDZAPENTCNT:
1686 		if (!capable(CAP_SYS_ADMIN))
1687 			return -EPERM;
1688 		random_state->entropy_count = 0;
1689 		return 0;
1690 	case RNDCLEARPOOL:
1691 		/* Clear the entropy pool and associated counters. */
1692 		if (!capable(CAP_SYS_ADMIN))
1693 			return -EPERM;
1694 		clear_entropy_store(random_state);
1695 		init_std_data(random_state);
1696 		return 0;
1697 	default:
1698 		return -EINVAL;
1699 	}
1700 }
1701 
1702 struct file_operations random_fops = {
1703 	read:		random_read,
1704 	write:		random_write,
1705 	poll:		random_poll,
1706 	ioctl:		random_ioctl,
1707 };
1708 
1709 struct file_operations urandom_fops = {
1710 	read:		urandom_read,
1711 	write:		random_write,
1712 	ioctl:		random_ioctl,
1713 };
1714 
1715 /***************************************************************
1716  * Random UUID interface
1717  *
1718  * Used here for a Boot ID, but can be useful for other kernel
1719  * drivers.
1720  ***************************************************************/
1721 
1722 /*
1723  * Generate random UUID
1724  */
generate_random_uuid(unsigned char uuid_out[16])1725 void generate_random_uuid(unsigned char uuid_out[16])
1726 {
1727 	get_random_bytes(uuid_out, 16);
1728 	/* Set UUID version to 4 --- truely random generation */
1729 	uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
1730 	/* Set the UUID variant to DCE */
1731 	uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1732 }
1733 
1734 /********************************************************************
1735  *
1736  * Sysctl interface
1737  *
1738  ********************************************************************/
1739 
1740 #ifdef CONFIG_SYSCTL
1741 
1742 #include <linux/sysctl.h>
1743 
1744 static int sysctl_poolsize;
1745 static int min_read_thresh, max_read_thresh;
1746 static int min_write_thresh, max_write_thresh;
1747 static char sysctl_bootid[16];
1748 
1749 /*
1750  * This function handles a request from the user to change the pool size
1751  * of the primary entropy store.
1752  */
change_poolsize(int poolsize)1753 static int change_poolsize(int poolsize)
1754 {
1755 	struct entropy_store	*new_store, *old_store;
1756 	int			ret;
1757 
1758 	if ((ret = create_entropy_store(poolsize, &new_store)))
1759 		return ret;
1760 
1761 	add_entropy_words(new_store, random_state->pool,
1762 			  random_state->poolinfo.poolwords);
1763 	credit_entropy_store(new_store, random_state->entropy_count);
1764 
1765 	sysctl_init_random(new_store);
1766 	old_store = random_state;
1767 	random_state = batch_tqueue.data = new_store;
1768 	free_entropy_store(old_store);
1769 	return 0;
1770 }
1771 
proc_do_poolsize(ctl_table * table,int write,struct file * filp,void * buffer,size_t * lenp)1772 static int proc_do_poolsize(ctl_table *table, int write, struct file *filp,
1773 			    void *buffer, size_t *lenp)
1774 {
1775 	int	ret;
1776 
1777 	sysctl_poolsize = random_state->poolinfo.POOLBYTES;
1778 
1779 	ret = proc_dointvec(table, write, filp, buffer, lenp);
1780 	if (ret || !write ||
1781 	    (sysctl_poolsize == random_state->poolinfo.POOLBYTES))
1782 		return ret;
1783 
1784 	return change_poolsize(sysctl_poolsize);
1785 }
1786 
poolsize_strategy(ctl_table * table,int * name,int nlen,void * oldval,size_t * oldlenp,void * newval,size_t newlen,void ** context)1787 static int poolsize_strategy(ctl_table *table, int *name, int nlen,
1788 			     void *oldval, size_t *oldlenp,
1789 			     void *newval, size_t newlen, void **context)
1790 {
1791 	unsigned int	len;
1792 
1793 	sysctl_poolsize = random_state->poolinfo.POOLBYTES;
1794 
1795 	/*
1796 	 * We only handle the write case, since the read case gets
1797 	 * handled by the default handler (and we don't care if the
1798 	 * write case happens twice; it's harmless).
1799 	 */
1800 	if (newval && newlen) {
1801 		len = newlen;
1802 		if (len > table->maxlen)
1803 			len = table->maxlen;
1804 		if (copy_from_user(table->data, newval, len))
1805 			return -EFAULT;
1806 	}
1807 
1808 	if (sysctl_poolsize != random_state->poolinfo.POOLBYTES)
1809 		return change_poolsize(sysctl_poolsize);
1810 
1811 	return 0;
1812 }
1813 
1814 /*
1815  * These functions is used to return both the bootid UUID, and random
1816  * UUID.  The difference is in whether table->data is NULL; if it is,
1817  * then a new UUID is generated and returned to the user.
1818  *
1819  * If the user accesses this via the proc interface, it will be returned
1820  * as an ASCII string in the standard UUID format.  If accesses via the
1821  * sysctl system call, it is returned as 16 bytes of binary data.
1822  */
proc_do_uuid(ctl_table * table,int write,struct file * filp,void * buffer,size_t * lenp)1823 static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
1824 			void *buffer, size_t *lenp)
1825 {
1826 	ctl_table	fake_table;
1827 	unsigned char	buf[64], tmp_uuid[16], *uuid;
1828 
1829 	uuid = table->data;
1830 	if (!uuid) {
1831 		uuid = tmp_uuid;
1832 		uuid[8] = 0;
1833 	}
1834 	if (uuid[8] == 0)
1835 		generate_random_uuid(uuid);
1836 
1837 	sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
1838 		"%02x%02x%02x%02x%02x%02x",
1839 		uuid[0],  uuid[1],  uuid[2],  uuid[3],
1840 		uuid[4],  uuid[5],  uuid[6],  uuid[7],
1841 		uuid[8],  uuid[9],  uuid[10], uuid[11],
1842 		uuid[12], uuid[13], uuid[14], uuid[15]);
1843 	fake_table.data = buf;
1844 	fake_table.maxlen = sizeof(buf);
1845 
1846 	return proc_dostring(&fake_table, write, filp, buffer, lenp);
1847 }
1848 
uuid_strategy(ctl_table * table,int * name,int nlen,void * oldval,size_t * oldlenp,void * newval,size_t newlen,void ** context)1849 static int uuid_strategy(ctl_table *table, int *name, int nlen,
1850 			 void *oldval, size_t *oldlenp,
1851 			 void *newval, size_t newlen, void **context)
1852 {
1853 	unsigned char	tmp_uuid[16], *uuid;
1854 	unsigned int	len;
1855 
1856 	if (!oldval || !oldlenp)
1857 		return 1;
1858 
1859 	uuid = table->data;
1860 	if (!uuid) {
1861 		uuid = tmp_uuid;
1862 		uuid[8] = 0;
1863 	}
1864 	if (uuid[8] == 0)
1865 		generate_random_uuid(uuid);
1866 
1867 	if (get_user(len, oldlenp))
1868 		return -EFAULT;
1869 	if (len) {
1870 		if (len > 16)
1871 			len = 16;
1872 		if (copy_to_user(oldval, uuid, len) ||
1873 		    put_user(len, oldlenp))
1874 			return -EFAULT;
1875 	}
1876 	return 1;
1877 }
1878 
1879 ctl_table random_table[] = {
1880 	{RANDOM_POOLSIZE, "poolsize",
1881 	 &sysctl_poolsize, sizeof(int), 0644, NULL,
1882 	 &proc_do_poolsize, &poolsize_strategy},
1883 	{RANDOM_ENTROPY_COUNT, "entropy_avail",
1884 	 NULL, sizeof(int), 0444, NULL,
1885 	 &proc_dointvec},
1886 	{RANDOM_READ_THRESH, "read_wakeup_threshold",
1887 	 &random_read_wakeup_thresh, sizeof(int), 0644, NULL,
1888 	 &proc_dointvec_minmax, &sysctl_intvec, 0,
1889 	 &min_read_thresh, &max_read_thresh},
1890 	{RANDOM_WRITE_THRESH, "write_wakeup_threshold",
1891 	 &random_write_wakeup_thresh, sizeof(int), 0644, NULL,
1892 	 &proc_dointvec_minmax, &sysctl_intvec, 0,
1893 	 &min_write_thresh, &max_write_thresh},
1894 	{RANDOM_BOOT_ID, "boot_id",
1895 	 &sysctl_bootid, 16, 0444, NULL,
1896 	 &proc_do_uuid, &uuid_strategy},
1897 	{RANDOM_UUID, "uuid",
1898 	 NULL, 16, 0444, NULL,
1899 	 &proc_do_uuid, &uuid_strategy},
1900 	{0}
1901 };
1902 
sysctl_init_random(struct entropy_store * random_state)1903 static void sysctl_init_random(struct entropy_store *random_state)
1904 {
1905 	min_read_thresh = 8;
1906 	min_write_thresh = 0;
1907 	max_read_thresh = max_write_thresh = random_state->poolinfo.POOLBITS;
1908 	random_table[1].data = &random_state->entropy_count;
1909 }
1910 #endif 	/* CONFIG_SYSCTL */
1911 
1912 /********************************************************************
1913  *
1914  * Random funtions for networking
1915  *
1916  ********************************************************************/
1917 
1918 /*
1919  * TCP initial sequence number picking.  This uses the random number
1920  * generator to pick an initial secret value.  This value is hashed
1921  * along with the TCP endpoint information to provide a unique
1922  * starting point for each pair of TCP endpoints.  This defeats
1923  * attacks which rely on guessing the initial TCP sequence number.
1924  * This algorithm was suggested by Steve Bellovin.
1925  *
1926  * Using a very strong hash was taking an appreciable amount of the total
1927  * TCP connection establishment time, so this is a weaker hash,
1928  * compensated for by changing the secret periodically.
1929  */
1930 
1931 /* F, G and H are basic MD4 functions: selection, majority, parity */
1932 #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
1933 #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
1934 #define H(x, y, z) ((x) ^ (y) ^ (z))
1935 
1936 /*
1937  * The generic round function.  The application is so specific that
1938  * we don't bother protecting all the arguments with parens, as is generally
1939  * good macro practice, in favor of extra legibility.
1940  * Rotation is separate from addition to prevent recomputation
1941  */
1942 #define ROUND(f, a, b, c, d, x, s)	\
1943 	(a += f(b, c, d) + x, a = (a << s) | (a >> (32-s)))
1944 #define K1 0
1945 #define K2 013240474631UL
1946 #define K3 015666365641UL
1947 
1948 /*
1949  * Basic cut-down MD4 transform.  Returns only 32 bits of result.
1950  */
halfMD4Transform(__u32 const buf[4],__u32 const in[8])1951 static __u32 halfMD4Transform (__u32 const buf[4], __u32 const in[8])
1952 {
1953 	__u32	a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1954 
1955 	/* Round 1 */
1956 	ROUND(F, a, b, c, d, in[0] + K1,  3);
1957 	ROUND(F, d, a, b, c, in[1] + K1,  7);
1958 	ROUND(F, c, d, a, b, in[2] + K1, 11);
1959 	ROUND(F, b, c, d, a, in[3] + K1, 19);
1960 	ROUND(F, a, b, c, d, in[4] + K1,  3);
1961 	ROUND(F, d, a, b, c, in[5] + K1,  7);
1962 	ROUND(F, c, d, a, b, in[6] + K1, 11);
1963 	ROUND(F, b, c, d, a, in[7] + K1, 19);
1964 
1965 	/* Round 2 */
1966 	ROUND(G, a, b, c, d, in[1] + K2,  3);
1967 	ROUND(G, d, a, b, c, in[3] + K2,  5);
1968 	ROUND(G, c, d, a, b, in[5] + K2,  9);
1969 	ROUND(G, b, c, d, a, in[7] + K2, 13);
1970 	ROUND(G, a, b, c, d, in[0] + K2,  3);
1971 	ROUND(G, d, a, b, c, in[2] + K2,  5);
1972 	ROUND(G, c, d, a, b, in[4] + K2,  9);
1973 	ROUND(G, b, c, d, a, in[6] + K2, 13);
1974 
1975 	/* Round 3 */
1976 	ROUND(H, a, b, c, d, in[3] + K3,  3);
1977 	ROUND(H, d, a, b, c, in[7] + K3,  9);
1978 	ROUND(H, c, d, a, b, in[2] + K3, 11);
1979 	ROUND(H, b, c, d, a, in[6] + K3, 15);
1980 	ROUND(H, a, b, c, d, in[1] + K3,  3);
1981 	ROUND(H, d, a, b, c, in[5] + K3,  9);
1982 	ROUND(H, c, d, a, b, in[0] + K3, 11);
1983 	ROUND(H, b, c, d, a, in[4] + K3, 15);
1984 
1985 	return buf[1] + b;	/* "most hashed" word */
1986 	/* Alternative: return sum of all words? */
1987 }
1988 
1989 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1990 
twothirdsMD4Transform(__u32 const buf[4],__u32 const in[12])1991 static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
1992 {
1993 	__u32	a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1994 
1995 	/* Round 1 */
1996 	ROUND(F, a, b, c, d, in[ 0] + K1,  3);
1997 	ROUND(F, d, a, b, c, in[ 1] + K1,  7);
1998 	ROUND(F, c, d, a, b, in[ 2] + K1, 11);
1999 	ROUND(F, b, c, d, a, in[ 3] + K1, 19);
2000 	ROUND(F, a, b, c, d, in[ 4] + K1,  3);
2001 	ROUND(F, d, a, b, c, in[ 5] + K1,  7);
2002 	ROUND(F, c, d, a, b, in[ 6] + K1, 11);
2003 	ROUND(F, b, c, d, a, in[ 7] + K1, 19);
2004 	ROUND(F, a, b, c, d, in[ 8] + K1,  3);
2005 	ROUND(F, d, a, b, c, in[ 9] + K1,  7);
2006 	ROUND(F, c, d, a, b, in[10] + K1, 11);
2007 	ROUND(F, b, c, d, a, in[11] + K1, 19);
2008 
2009 	/* Round 2 */
2010 	ROUND(G, a, b, c, d, in[ 1] + K2,  3);
2011 	ROUND(G, d, a, b, c, in[ 3] + K2,  5);
2012 	ROUND(G, c, d, a, b, in[ 5] + K2,  9);
2013 	ROUND(G, b, c, d, a, in[ 7] + K2, 13);
2014 	ROUND(G, a, b, c, d, in[ 9] + K2,  3);
2015 	ROUND(G, d, a, b, c, in[11] + K2,  5);
2016 	ROUND(G, c, d, a, b, in[ 0] + K2,  9);
2017 	ROUND(G, b, c, d, a, in[ 2] + K2, 13);
2018 	ROUND(G, a, b, c, d, in[ 4] + K2,  3);
2019 	ROUND(G, d, a, b, c, in[ 6] + K2,  5);
2020 	ROUND(G, c, d, a, b, in[ 8] + K2,  9);
2021 	ROUND(G, b, c, d, a, in[10] + K2, 13);
2022 
2023 	/* Round 3 */
2024 	ROUND(H, a, b, c, d, in[ 3] + K3,  3);
2025 	ROUND(H, d, a, b, c, in[ 7] + K3,  9);
2026 	ROUND(H, c, d, a, b, in[11] + K3, 11);
2027 	ROUND(H, b, c, d, a, in[ 2] + K3, 15);
2028 	ROUND(H, a, b, c, d, in[ 6] + K3,  3);
2029 	ROUND(H, d, a, b, c, in[10] + K3,  9);
2030 	ROUND(H, c, d, a, b, in[ 1] + K3, 11);
2031 	ROUND(H, b, c, d, a, in[ 5] + K3, 15);
2032 	ROUND(H, a, b, c, d, in[ 9] + K3,  3);
2033 	ROUND(H, d, a, b, c, in[ 0] + K3,  9);
2034 	ROUND(H, c, d, a, b, in[ 4] + K3, 11);
2035 	ROUND(H, b, c, d, a, in[ 8] + K3, 15);
2036 
2037 	return buf[1] + b;	/* "most hashed" word */
2038 	/* Alternative: return sum of all words? */
2039 }
2040 #endif
2041 
2042 #undef ROUND
2043 #undef F
2044 #undef G
2045 #undef H
2046 #undef K1
2047 #undef K2
2048 #undef K3
2049 
2050 /* This should not be decreased so low that ISNs wrap too fast. */
2051 #define REKEY_INTERVAL	300
2052 /*
2053  * Bit layout of the tcp sequence numbers (before adding current time):
2054  * bit 24-31: increased after every key exchange
2055  * bit 0-23: hash(source,dest)
2056  *
2057  * The implementation is similar to the algorithm described
2058  * in the Appendix of RFC 1185, except that
2059  * - it uses a 1 MHz clock instead of a 250 kHz clock
2060  * - it performs a rekey every 5 minutes, which is equivalent
2061  * 	to a (source,dest) tulple dependent forward jump of the
2062  * 	clock by 0..2^(HASH_BITS+1)
2063  *
2064  * Thus the average ISN wraparound time is 68 minutes instead of
2065  * 4.55 hours.
2066  *
2067  * SMP cleanup and lock avoidance with poor man's RCU.
2068  * 			Manfred Spraul <manfred@colorfullife.com>
2069  *
2070  */
2071 #define COUNT_BITS	8
2072 #define COUNT_MASK	( (1<<COUNT_BITS)-1)
2073 #define HASH_BITS	24
2074 #define HASH_MASK	( (1<<HASH_BITS)-1 )
2075 
2076 static struct keydata {
2077 	time_t rekey_time;
2078 	__u32	count;		// already shifted to the final position
2079 	__u32	secret[12];
2080 } ____cacheline_aligned ip_keydata[2];
2081 
2082 static spinlock_t ip_lock = SPIN_LOCK_UNLOCKED;
2083 static unsigned int ip_cnt;
2084 
__check_and_rekey(time_t time)2085 static struct keydata *__check_and_rekey(time_t time)
2086 {
2087 	struct keydata *keyptr;
2088 	spin_lock_bh(&ip_lock);
2089 	keyptr = &ip_keydata[ip_cnt&1];
2090 	if (!keyptr->rekey_time || (time - keyptr->rekey_time) > REKEY_INTERVAL) {
2091 		keyptr = &ip_keydata[1^(ip_cnt&1)];
2092 		keyptr->rekey_time = time;
2093 		get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
2094 		keyptr->count = (ip_cnt&COUNT_MASK)<<HASH_BITS;
2095 		mb();
2096 		ip_cnt++;
2097 	}
2098 	spin_unlock_bh(&ip_lock);
2099 	return keyptr;
2100 }
2101 
check_and_rekey(time_t time)2102 static inline struct keydata *check_and_rekey(time_t time)
2103 {
2104 	struct keydata *keyptr = &ip_keydata[ip_cnt&1];
2105 
2106 	rmb();
2107 	if (!keyptr->rekey_time || (time - keyptr->rekey_time) > REKEY_INTERVAL) {
2108 		keyptr = __check_and_rekey(time);
2109 	}
2110 
2111 	return keyptr;
2112 }
2113 
2114 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
secure_tcpv6_sequence_number(__u32 * saddr,__u32 * daddr,__u16 sport,__u16 dport)2115 __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
2116 				   __u16 sport, __u16 dport)
2117 {
2118 	struct timeval 	tv;
2119 	__u32		seq;
2120 	__u32		hash[12];
2121 	struct keydata *keyptr;
2122 
2123 	/* The procedure is the same as for IPv4, but addresses are longer.
2124 	 * Thus we must use twothirdsMD4Transform.
2125 	 */
2126 
2127 	do_gettimeofday(&tv);	/* We need the usecs below... */
2128 	keyptr = check_and_rekey(tv.tv_sec);
2129 
2130 	memcpy(hash, saddr, 16);
2131 	hash[4]=(sport << 16) + dport;
2132 	memcpy(&hash[5],keyptr->secret,sizeof(__u32)*7);
2133 
2134 	seq = twothirdsMD4Transform(daddr, hash) & HASH_MASK;
2135 	seq += keyptr->count;
2136 	seq += tv.tv_usec + tv.tv_sec*1000000;
2137 
2138 	return seq;
2139 }
2140 
secure_ipv6_id(__u32 * daddr)2141 __u32 secure_ipv6_id(__u32 *daddr)
2142 {
2143 	struct keydata *keyptr;
2144 
2145 	keyptr = check_and_rekey(CURRENT_TIME);
2146 
2147 	return halfMD4Transform(daddr, keyptr->secret);
2148 }
2149 
2150 #endif
2151 
2152 
secure_tcp_sequence_number(__u32 saddr,__u32 daddr,__u16 sport,__u16 dport)2153 __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
2154 				 __u16 sport, __u16 dport)
2155 {
2156 	struct timeval 	tv;
2157 	__u32		seq;
2158 	__u32	hash[4];
2159 	struct keydata *keyptr;
2160 
2161 	/*
2162 	 * Pick a random secret every REKEY_INTERVAL seconds.
2163 	 */
2164 	do_gettimeofday(&tv);	/* We need the usecs below... */
2165 	keyptr = check_and_rekey(tv.tv_sec);
2166 
2167 	/*
2168 	 *  Pick a unique starting offset for each TCP connection endpoints
2169 	 *  (saddr, daddr, sport, dport).
2170 	 *  Note that the words are placed into the starting vector, which is
2171 	 *  then mixed with a partial MD4 over random data.
2172 	 */
2173 	hash[0]=saddr;
2174 	hash[1]=daddr;
2175 	hash[2]=(sport << 16) + dport;
2176 	hash[3]=keyptr->secret[11];
2177 
2178 	seq = halfMD4Transform(hash, keyptr->secret) & HASH_MASK;
2179 	seq += keyptr->count;
2180 	/*
2181 	 *	As close as possible to RFC 793, which
2182 	 *	suggests using a 250 kHz clock.
2183 	 *	Further reading shows this assumes 2 Mb/s networks.
2184 	 *	For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
2185 	 *	That's funny, Linux has one built in!  Use it!
2186 	 *	(Networks are faster now - should this be increased?)
2187 	 */
2188 	seq += tv.tv_usec + tv.tv_sec*1000000;
2189 #if 0
2190 	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
2191 	       saddr, daddr, sport, dport, seq);
2192 #endif
2193 	return seq;
2194 }
2195 
2196 /*  The code below is shamelessly stolen from secure_tcp_sequence_number().
2197  *  All blames to Andrey V. Savochkin <saw@msu.ru>.
2198  */
secure_ip_id(__u32 daddr)2199 __u32 secure_ip_id(__u32 daddr)
2200 {
2201 	struct keydata *keyptr;
2202 	__u32 hash[4];
2203 
2204 	keyptr = check_and_rekey(CURRENT_TIME);
2205 
2206 	/*
2207 	 *  Pick a unique starting offset for each IP destination.
2208 	 *  The dest ip address is placed in the starting vector,
2209 	 *  which is then hashed with random data.
2210 	 */
2211 	hash[0] = daddr;
2212 	hash[1] = keyptr->secret[9];
2213 	hash[2] = keyptr->secret[10];
2214 	hash[3] = keyptr->secret[11];
2215 
2216 	return halfMD4Transform(hash, keyptr->secret);
2217 }
2218 
2219 #ifdef CONFIG_SYN_COOKIES
2220 /*
2221  * Secure SYN cookie computation. This is the algorithm worked out by
2222  * Dan Bernstein and Eric Schenk.
2223  *
2224  * For linux I implement the 1 minute counter by looking at the jiffies clock.
2225  * The count is passed in as a parameter, so this code doesn't much care.
2226  */
2227 
2228 #define COOKIEBITS 24	/* Upper bits store count */
2229 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
2230 
2231 static int	syncookie_init;
2232 static __u32	syncookie_secret[2][16-3+HASH_BUFFER_SIZE];
2233 
secure_tcp_syn_cookie(__u32 saddr,__u32 daddr,__u16 sport,__u16 dport,__u32 sseq,__u32 count,__u32 data)2234 __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport,
2235 		__u16 dport, __u32 sseq, __u32 count, __u32 data)
2236 {
2237 	__u32 	tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE];
2238 	__u32	seq;
2239 
2240 	/*
2241 	 * Pick two random secrets the first time we need a cookie.
2242 	 */
2243 	if (syncookie_init == 0) {
2244 		get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
2245 		syncookie_init = 1;
2246 	}
2247 
2248 	/*
2249 	 * Compute the secure sequence number.
2250 	 * The output should be:
2251    	 *   HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
2252 	 *      + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
2253 	 * Where sseq is their sequence number and count increases every
2254 	 * minute by 1.
2255 	 * As an extra hack, we add a small "data" value that encodes the
2256 	 * MSS into the second hash value.
2257 	 */
2258 
2259 	memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0]));
2260 	tmp[0]=saddr;
2261 	tmp[1]=daddr;
2262 	tmp[2]=(sport << 16) + dport;
2263 	HASH_TRANSFORM(tmp+16, tmp);
2264 	seq = tmp[17] + sseq + (count << COOKIEBITS);
2265 
2266 	memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1]));
2267 	tmp[0]=saddr;
2268 	tmp[1]=daddr;
2269 	tmp[2]=(sport << 16) + dport;
2270 	tmp[3] = count;	/* minute counter */
2271 	HASH_TRANSFORM(tmp+16, tmp);
2272 
2273 	/* Add in the second hash and the data */
2274 	return seq + ((tmp[17] + data) & COOKIEMASK);
2275 }
2276 
2277 /*
2278  * This retrieves the small "data" value from the syncookie.
2279  * If the syncookie is bad, the data returned will be out of
2280  * range.  This must be checked by the caller.
2281  *
2282  * The count value used to generate the cookie must be within
2283  * "maxdiff" if the current (passed-in) "count".  The return value
2284  * is (__u32)-1 if this test fails.
2285  */
check_tcp_syn_cookie(__u32 cookie,__u32 saddr,__u32 daddr,__u16 sport,__u16 dport,__u32 sseq,__u32 count,__u32 maxdiff)2286 __u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr, __u16 sport,
2287 		__u16 dport, __u32 sseq, __u32 count, __u32 maxdiff)
2288 {
2289 	__u32 	tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE];
2290 	__u32	diff;
2291 
2292 	if (syncookie_init == 0)
2293 		return (__u32)-1;	/* Well, duh! */
2294 
2295 	/* Strip away the layers from the cookie */
2296 	memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0]));
2297 	tmp[0]=saddr;
2298 	tmp[1]=daddr;
2299 	tmp[2]=(sport << 16) + dport;
2300 	HASH_TRANSFORM(tmp+16, tmp);
2301 	cookie -= tmp[17] + sseq;
2302 	/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
2303 
2304 	diff = (count - (cookie >> COOKIEBITS)) & ((__u32)-1 >> COOKIEBITS);
2305 	if (diff >= maxdiff)
2306 		return (__u32)-1;
2307 
2308 	memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1]));
2309 	tmp[0] = saddr;
2310 	tmp[1] = daddr;
2311 	tmp[2] = (sport << 16) + dport;
2312 	tmp[3] = count - diff;	/* minute counter */
2313 	HASH_TRANSFORM(tmp+16, tmp);
2314 
2315 	return (cookie - tmp[17]) & COOKIEMASK;	/* Leaving the data behind */
2316 }
2317 #endif
2318 
2319 
2320 
2321 #ifndef CONFIG_ARCH_S390
2322 EXPORT_SYMBOL(add_keyboard_randomness);
2323 EXPORT_SYMBOL(add_mouse_randomness);
2324 EXPORT_SYMBOL(add_interrupt_randomness);
2325 #endif
2326 EXPORT_SYMBOL(add_blkdev_randomness);
2327 EXPORT_SYMBOL(batch_entropy_store);
2328 EXPORT_SYMBOL(generate_random_uuid);
2329 
2330