1 /*
2 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
3 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
4
5 derived from
6
7 Hardware driver for the AMD 768 Random Number Generator (RNG)
8 (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
9
10 derived from
11
12 Hardware driver for Intel i810 Random Number Generator (RNG)
13 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
14 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
15
16 Please read Documentation/hw_random.txt for details on use.
17
18 ----------------------------------------------------------
19 This software may be used and distributed according to the terms
20 of the GNU General Public License, incorporated herein by reference.
21
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/random.h>
33 #include <linux/miscdevice.h>
34 #include <linux/smp_lock.h>
35 #include <linux/mm.h>
36 #include <linux/delay.h>
37
38 #ifdef __i386__
39 #include <asm/msr.h>
40 #include <asm/cpufeature.h>
41 #endif
42
43 #include <asm/io.h>
44 #include <asm/uaccess.h>
45
46
47 /*
48 * core module and version information
49 */
50 #define RNG_VERSION "1.0.0"
51 #define RNG_MODULE_NAME "hw_random"
52 #define RNG_DRIVER_NAME RNG_MODULE_NAME " hardware driver " RNG_VERSION
53 #define PFX RNG_MODULE_NAME ": "
54
55
56 /*
57 * debugging macros
58 */
59 #undef RNG_DEBUG /* define to enable copious debugging info */
60
61 #ifdef RNG_DEBUG
62 /* note: prints function name for you */
63 #define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
64 #else
65 #define DPRINTK(fmt, args...)
66 #endif
67
68 #define RNG_NDEBUG /* define to disable lightweight runtime checks */
69 #ifdef RNG_NDEBUG
70 #define assert(expr)
71 #else
72 #define assert(expr) \
73 if(!(expr)) { \
74 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
75 #expr,__FILE__,__FUNCTION__,__LINE__); \
76 }
77 #endif
78
79 #define RNG_MISCDEV_MINOR 183 /* official */
80
81 static int rng_dev_open (struct inode *inode, struct file *filp);
82 static ssize_t rng_dev_read (struct file *filp, char *buf, size_t size,
83 loff_t * offp);
84
85 static int __init intel_init (struct pci_dev *dev);
86 static void intel_cleanup(void);
87 static unsigned int intel_data_present (void);
88 static u32 intel_data_read (void);
89
90 static int __init amd_init (struct pci_dev *dev);
91 static void amd_cleanup(void);
92 static unsigned int amd_data_present (void);
93 static u32 amd_data_read (void);
94
95 static int __init via_init(struct pci_dev *dev);
96 static void via_cleanup(void);
97 static unsigned int via_data_present (void);
98 static u32 via_data_read (void);
99
100 struct rng_operations {
101 int (*init) (struct pci_dev *dev);
102 void (*cleanup) (void);
103 unsigned int (*data_present) (void);
104 u32 (*data_read) (void);
105 unsigned int n_bytes; /* number of bytes per ->data_read */
106 };
107 static struct rng_operations *rng_ops;
108
109 static struct file_operations rng_chrdev_ops = {
110 .owner = THIS_MODULE,
111 .open = rng_dev_open,
112 .read = rng_dev_read,
113 };
114
115
116 static struct miscdevice rng_miscdev = {
117 RNG_MISCDEV_MINOR,
118 RNG_MODULE_NAME,
119 &rng_chrdev_ops,
120 };
121
122 enum {
123 rng_hw_none,
124 rng_hw_intel,
125 rng_hw_amd,
126 rng_hw_via,
127 };
128
129 static struct rng_operations rng_vendor_ops[] = {
130 /* rng_hw_none */
131 { },
132
133 /* rng_hw_intel */
134 { intel_init, intel_cleanup, intel_data_present,
135 intel_data_read, 1 },
136
137 /* rng_hw_amd */
138 { amd_init, amd_cleanup, amd_data_present, amd_data_read, 4 },
139
140 /* rng_hw_via */
141 { via_init, via_cleanup, via_data_present, via_data_read, 1 },
142 };
143
144 /*
145 * Data for PCI driver interface
146 *
147 * This data only exists for exporting the supported
148 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
149 * register a pci_driver, because someone else might one day
150 * want to register another driver on the same PCI id.
151 */
152 static struct pci_device_id rng_pci_tbl[] __initdata = {
153 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
154 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
155
156 { 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
157 { 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
158 { 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
159 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
160 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
161
162 { 0, }, /* terminate list */
163 };
164 MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
165
166
167 /***********************************************************************
168 *
169 * Intel RNG operations
170 *
171 */
172
173 /*
174 * RNG registers (offsets from rng_mem)
175 */
176 #define INTEL_RNG_HW_STATUS 0
177 #define INTEL_RNG_PRESENT 0x40
178 #define INTEL_RNG_ENABLED 0x01
179 #define INTEL_RNG_STATUS 1
180 #define INTEL_RNG_DATA_PRESENT 0x01
181 #define INTEL_RNG_DATA 2
182
183 /*
184 * Magic address at which Intel PCI bridges locate the RNG
185 */
186 #define INTEL_RNG_ADDR 0xFFBC015F
187 #define INTEL_RNG_ADDR_LEN 3
188
189 /* token to our ioremap'd RNG register area */
190 static void *rng_mem;
191
intel_hwstatus(void)192 static inline u8 intel_hwstatus (void)
193 {
194 assert (rng_mem != NULL);
195 return readb (rng_mem + INTEL_RNG_HW_STATUS);
196 }
197
intel_hwstatus_set(u8 hw_status)198 static inline u8 intel_hwstatus_set (u8 hw_status)
199 {
200 assert (rng_mem != NULL);
201 writeb (hw_status, rng_mem + INTEL_RNG_HW_STATUS);
202 return intel_hwstatus ();
203 }
204
intel_data_present(void)205 static unsigned int intel_data_present(void)
206 {
207 assert (rng_mem != NULL);
208
209 return (readb (rng_mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT) ?
210 1 : 0;
211 }
212
intel_data_read(void)213 static u32 intel_data_read(void)
214 {
215 assert (rng_mem != NULL);
216
217 return readb (rng_mem + INTEL_RNG_DATA);
218 }
219
intel_init(struct pci_dev * dev)220 static int __init intel_init (struct pci_dev *dev)
221 {
222 int rc;
223 u8 hw_status;
224
225 DPRINTK ("ENTER\n");
226
227 rng_mem = ioremap (INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
228 if (rng_mem == NULL) {
229 printk (KERN_ERR PFX "cannot ioremap RNG Memory\n");
230 rc = -EBUSY;
231 goto err_out;
232 }
233
234 /* Check for Intel 82802 */
235 hw_status = intel_hwstatus ();
236 if ((hw_status & INTEL_RNG_PRESENT) == 0) {
237 printk (KERN_ERR PFX "RNG not detected\n");
238 rc = -ENODEV;
239 goto err_out_free_map;
240 }
241
242 /* turn RNG h/w on, if it's off */
243 if ((hw_status & INTEL_RNG_ENABLED) == 0)
244 hw_status = intel_hwstatus_set (hw_status | INTEL_RNG_ENABLED);
245 if ((hw_status & INTEL_RNG_ENABLED) == 0) {
246 printk (KERN_ERR PFX "cannot enable RNG, aborting\n");
247 rc = -EIO;
248 goto err_out_free_map;
249 }
250
251 DPRINTK ("EXIT, returning 0\n");
252 return 0;
253
254 err_out_free_map:
255 iounmap (rng_mem);
256 rng_mem = NULL;
257 err_out:
258 DPRINTK ("EXIT, returning %d\n", rc);
259 return rc;
260 }
261
intel_cleanup(void)262 static void intel_cleanup(void)
263 {
264 u8 hw_status;
265
266 hw_status = intel_hwstatus ();
267 if (hw_status & INTEL_RNG_ENABLED)
268 intel_hwstatus_set (hw_status & ~INTEL_RNG_ENABLED);
269 else
270 printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
271 iounmap(rng_mem);
272 rng_mem = NULL;
273 }
274
275 /***********************************************************************
276 *
277 * AMD RNG operations
278 *
279 */
280
281 static u32 pmbase; /* PMxx I/O base */
282 static struct pci_dev *amd_dev;
283
amd_data_present(void)284 static unsigned int amd_data_present (void)
285 {
286 return inl(pmbase + 0xF4) & 1;
287 }
288
289
amd_data_read(void)290 static u32 amd_data_read (void)
291 {
292 return inl(pmbase + 0xF0);
293 }
294
amd_init(struct pci_dev * dev)295 static int __init amd_init (struct pci_dev *dev)
296 {
297 int rc;
298 u8 rnen;
299
300 DPRINTK ("ENTER\n");
301
302 pci_read_config_dword(dev, 0x58, &pmbase);
303
304 pmbase &= 0x0000FF00;
305
306 if (pmbase == 0)
307 {
308 printk (KERN_ERR PFX "power management base not set\n");
309 rc = -EIO;
310 goto err_out;
311 }
312
313 pci_read_config_byte(dev, 0x40, &rnen);
314 rnen |= (1 << 7); /* RNG on */
315 pci_write_config_byte(dev, 0x40, rnen);
316
317 pci_read_config_byte(dev, 0x41, &rnen);
318 rnen |= (1 << 7); /* PMIO enable */
319 pci_write_config_byte(dev, 0x41, rnen);
320
321 printk(KERN_INFO PFX "AMD768 system management I/O registers at 0x%X.\n", pmbase);
322
323 amd_dev = dev;
324
325 DPRINTK ("EXIT, returning 0\n");
326 return 0;
327
328 err_out:
329 DPRINTK ("EXIT, returning %d\n", rc);
330 return rc;
331 }
332
amd_cleanup(void)333 static void amd_cleanup(void)
334 {
335 u8 rnen;
336
337 pci_read_config_byte(amd_dev, 0x40, &rnen);
338 rnen &= ~(1 << 7); /* RNG off */
339 pci_write_config_byte(amd_dev, 0x40, rnen);
340
341 /* FIXME: twiddle pmio, also? */
342 }
343
344 /***********************************************************************
345 *
346 * VIA RNG operations
347 *
348 */
349
350 enum {
351 VIA_STRFILT_CNT_SHIFT = 16,
352 VIA_STRFILT_FAIL = (1 << 15),
353 VIA_STRFILT_ENABLE = (1 << 14),
354 VIA_RAWBITS_ENABLE = (1 << 13),
355 VIA_RNG_ENABLE = (1 << 6),
356 VIA_XSTORE_CNT_MASK = 0x0F,
357
358 VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
359 VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
360 VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
361 VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
362 VIA_RNG_CHUNK_2_MASK = 0xFFFF,
363 VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
364 VIA_RNG_CHUNK_1_MASK = 0xFF,
365 };
366
367 u32 via_rng_datum;
368
369 /*
370 * Investigate using the 'rep' prefix to obtain 32 bits of random data
371 * in one insn. The upside is potentially better performance. The
372 * downside is that the instruction becomes no longer atomic. Due to
373 * this, just like familiar issues with /dev/random itself, the worst
374 * case of a 'rep xstore' could potentially pause a cpu for an
375 * unreasonably long time. In practice, this condition would likely
376 * only occur when the hardware is failing. (or so we hope :))
377 *
378 * Another possible performance boost may come from simply buffering
379 * until we have 4 bytes, thus returning a u32 at a time,
380 * instead of the current u8-at-a-time.
381 */
382
xstore(u32 * addr,u32 edx_in)383 static inline u32 xstore(u32 *addr, u32 edx_in)
384 {
385 u32 eax_out;
386
387 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
388 :"=m"(*addr), "=a"(eax_out)
389 :"D"(addr), "d"(edx_in));
390
391 return eax_out;
392 }
393
via_data_present(void)394 static unsigned int via_data_present(void)
395 {
396 u32 bytes_out;
397
398 /* We choose the recommended 1-byte-per-instruction RNG rate,
399 * for greater randomness at the expense of speed. Larger
400 * values 2, 4, or 8 bytes-per-instruction yield greater
401 * speed at lesser randomness.
402 *
403 * If you change this to another VIA_CHUNK_n, you must also
404 * change the ->n_bytes values in rng_vendor_ops[] tables.
405 * VIA_CHUNK_8 requires further code changes.
406 *
407 * A copy of MSR_VIA_RNG is placed in eax_out when xstore
408 * completes.
409 */
410 via_rng_datum = 0; /* paranoia, not really necessary */
411 bytes_out = xstore(&via_rng_datum, VIA_RNG_CHUNK_1) & VIA_XSTORE_CNT_MASK;
412 if (bytes_out == 0)
413 return 0;
414
415 return 1;
416 }
417
via_data_read(void)418 static u32 via_data_read(void)
419 {
420 return via_rng_datum;
421 }
422
via_init(struct pci_dev * dev)423 static int __init via_init(struct pci_dev *dev)
424 {
425 u32 lo, hi, old_lo;
426
427 /* Control the RNG via MSR. Tread lightly and pay very close
428 * close attention to values written, as the reserved fields
429 * are documented to be "undefined and unpredictable"; but it
430 * does not say to write them as zero, so I make a guess that
431 * we restore the values we find in the register.
432 */
433 rdmsr(MSR_VIA_RNG, lo, hi);
434
435 old_lo = lo;
436 lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
437 lo &= ~VIA_XSTORE_CNT_MASK;
438 lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
439 lo |= VIA_RNG_ENABLE;
440
441 if (lo != old_lo)
442 wrmsr(MSR_VIA_RNG, lo, hi);
443
444 /* perhaps-unnecessary sanity check; remove after testing if
445 unneeded */
446 rdmsr(MSR_VIA_RNG, lo, hi);
447 if ((lo & VIA_RNG_ENABLE) == 0) {
448 printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
449 return -ENODEV;
450 }
451
452 return 0;
453 }
454
via_cleanup(void)455 static void via_cleanup(void)
456 {
457 u32 lo, hi;
458
459 rdmsr(MSR_VIA_RNG, lo, hi);
460 lo &= ~VIA_RNG_ENABLE;
461 wrmsr(MSR_VIA_RNG, lo, hi);
462 }
463
464
465 /***********************************************************************
466 *
467 * /dev/hwrandom character device handling (major 10, minor 183)
468 *
469 */
470
rng_dev_open(struct inode * inode,struct file * filp)471 static int rng_dev_open (struct inode *inode, struct file *filp)
472 {
473 /* enforce read-only access to this chrdev */
474 if ((filp->f_mode & FMODE_READ) == 0)
475 return -EINVAL;
476 if (filp->f_mode & FMODE_WRITE)
477 return -EINVAL;
478
479 return 0;
480 }
481
482
rng_dev_read(struct file * filp,char * buf,size_t size,loff_t * offp)483 static ssize_t rng_dev_read (struct file *filp, char *buf, size_t size,
484 loff_t * offp)
485 {
486 static spinlock_t rng_lock = SPIN_LOCK_UNLOCKED;
487 unsigned int have_data;
488 u32 data = 0;
489 ssize_t ret = 0;
490
491 while (size) {
492 spin_lock(&rng_lock);
493
494 have_data = 0;
495 if (rng_ops->data_present()) {
496 data = rng_ops->data_read();
497 have_data = rng_ops->n_bytes;
498 }
499
500 spin_unlock (&rng_lock);
501
502 while (have_data && size) {
503 if (put_user((u8)data, buf++)) {
504 ret = ret ? : -EFAULT;
505 break;
506 }
507 size--;
508 ret++;
509 have_data--;
510 data>>=8;
511 }
512
513 if (filp->f_flags & O_NONBLOCK)
514 return ret ? : -EAGAIN;
515
516 if(need_resched())
517 {
518 current->state = TASK_INTERRUPTIBLE;
519 schedule_timeout(1);
520 }
521 else
522 udelay(200); /* FIXME: We could poll for 250uS ?? */
523
524 if (signal_pending (current))
525 return ret ? : -ERESTARTSYS;
526 }
527 return ret;
528 }
529
530
531
532 /*
533 * rng_init_one - look for and attempt to init a single RNG
534 */
rng_init_one(struct pci_dev * dev)535 static int __init rng_init_one (struct pci_dev *dev)
536 {
537 int rc;
538
539 DPRINTK ("ENTER\n");
540
541 assert(rng_ops != NULL);
542
543 rc = rng_ops->init(dev);
544 if (rc)
545 goto err_out;
546
547 rc = misc_register (&rng_miscdev);
548 if (rc) {
549 printk (KERN_ERR PFX "misc device register failed\n");
550 goto err_out_cleanup_hw;
551 }
552
553 DPRINTK ("EXIT, returning 0\n");
554 return 0;
555
556 err_out_cleanup_hw:
557 rng_ops->cleanup();
558 err_out:
559 DPRINTK ("EXIT, returning %d\n", rc);
560 return rc;
561 }
562
563
564
565 MODULE_AUTHOR("The Linux Kernel team");
566 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
567 MODULE_LICENSE("GPL");
568
569
570 /*
571 * rng_init - initialize RNG module
572 */
rng_init(void)573 static int __init rng_init (void)
574 {
575 int rc;
576 struct pci_dev *pdev = NULL;
577 const struct pci_device_id *ent;
578
579 DPRINTK ("ENTER\n");
580
581 /* Probe for Intel, AMD RNGs */
582 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
583 ent = pci_match_device (rng_pci_tbl, pdev);
584 if (ent) {
585 rng_ops = &rng_vendor_ops[ent->driver_data];
586 goto match;
587 }
588 }
589
590 #ifdef __i386__
591 /* Probe for VIA RNG */
592 if (cpu_has_xstore) {
593 rng_ops = &rng_vendor_ops[rng_hw_via];
594 pdev = NULL;
595 goto match;
596 }
597 #endif
598
599 DPRINTK ("EXIT, returning -ENODEV\n");
600 return -ENODEV;
601
602 match:
603 rc = rng_init_one (pdev);
604 if (rc)
605 return rc;
606
607 printk (KERN_INFO RNG_DRIVER_NAME " loaded\n");
608
609 DPRINTK ("EXIT, returning 0\n");
610 return 0;
611 }
612
613
614 /*
615 * rng_init - shutdown RNG module
616 */
rng_cleanup(void)617 static void __exit rng_cleanup (void)
618 {
619 DPRINTK ("ENTER\n");
620
621 misc_deregister (&rng_miscdev);
622
623 if (rng_ops->cleanup)
624 rng_ops->cleanup();
625
626 DPRINTK ("EXIT\n");
627 }
628
629
630 module_init (rng_init);
631 module_exit (rng_cleanup);
632