1 /*
2 * Copyright (C) 2005, 2006 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
7 *
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9 *
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
12 *
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include <linux/freezer.h>
30 #include "tpm.h"
31
32 enum tis_access {
33 TPM_ACCESS_VALID = 0x80,
34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
35 TPM_ACCESS_REQUEST_PENDING = 0x04,
36 TPM_ACCESS_REQUEST_USE = 0x02,
37 };
38
39 enum tis_status {
40 TPM_STS_VALID = 0x80,
41 TPM_STS_COMMAND_READY = 0x40,
42 TPM_STS_GO = 0x20,
43 TPM_STS_DATA_AVAIL = 0x10,
44 TPM_STS_DATA_EXPECT = 0x08,
45 };
46
47 enum tis_int_flags {
48 TPM_GLOBAL_INT_ENABLE = 0x80000000,
49 TPM_INTF_BURST_COUNT_STATIC = 0x100,
50 TPM_INTF_CMD_READY_INT = 0x080,
51 TPM_INTF_INT_EDGE_FALLING = 0x040,
52 TPM_INTF_INT_EDGE_RISING = 0x020,
53 TPM_INTF_INT_LEVEL_LOW = 0x010,
54 TPM_INTF_INT_LEVEL_HIGH = 0x008,
55 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
56 TPM_INTF_STS_VALID_INT = 0x002,
57 TPM_INTF_DATA_AVAIL_INT = 0x001,
58 };
59
60 enum tis_defaults {
61 TIS_MEM_BASE = 0xFED40000,
62 TIS_MEM_LEN = 0x5000,
63 TIS_SHORT_TIMEOUT = 750, /* ms */
64 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
65 };
66
67 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
68 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
69 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
70 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
71 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
72 #define TPM_STS(l) (0x0018 | ((l) << 12))
73 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
74
75 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
76 #define TPM_RID(l) (0x0F04 | ((l) << 12))
77
78 static LIST_HEAD(tis_chips);
79 static DEFINE_MUTEX(tis_lock);
80
81 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
is_itpm(struct pnp_dev * dev)82 static int is_itpm(struct pnp_dev *dev)
83 {
84 struct acpi_device *acpi = pnp_acpi_device(dev);
85 struct acpi_hardware_id *id;
86
87 list_for_each_entry(id, &acpi->pnp.ids, list) {
88 if (!strcmp("INTC0102", id->id))
89 return 1;
90 }
91
92 return 0;
93 }
94 #else
is_itpm(struct pnp_dev * dev)95 static inline int is_itpm(struct pnp_dev *dev)
96 {
97 return 0;
98 }
99 #endif
100
check_locality(struct tpm_chip * chip,int l)101 static int check_locality(struct tpm_chip *chip, int l)
102 {
103 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
104 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
105 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
106 return chip->vendor.locality = l;
107
108 return -1;
109 }
110
release_locality(struct tpm_chip * chip,int l,int force)111 static void release_locality(struct tpm_chip *chip, int l, int force)
112 {
113 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
114 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
115 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
116 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
117 chip->vendor.iobase + TPM_ACCESS(l));
118 }
119
request_locality(struct tpm_chip * chip,int l)120 static int request_locality(struct tpm_chip *chip, int l)
121 {
122 unsigned long stop, timeout;
123 long rc;
124
125 if (check_locality(chip, l) >= 0)
126 return l;
127
128 iowrite8(TPM_ACCESS_REQUEST_USE,
129 chip->vendor.iobase + TPM_ACCESS(l));
130
131 stop = jiffies + chip->vendor.timeout_a;
132
133 if (chip->vendor.irq) {
134 again:
135 timeout = stop - jiffies;
136 if ((long)timeout <= 0)
137 return -1;
138 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
139 (check_locality
140 (chip, l) >= 0),
141 timeout);
142 if (rc > 0)
143 return l;
144 if (rc == -ERESTARTSYS && freezing(current)) {
145 clear_thread_flag(TIF_SIGPENDING);
146 goto again;
147 }
148 } else {
149 /* wait for burstcount */
150 do {
151 if (check_locality(chip, l) >= 0)
152 return l;
153 msleep(TPM_TIMEOUT);
154 }
155 while (time_before(jiffies, stop));
156 }
157 return -1;
158 }
159
tpm_tis_status(struct tpm_chip * chip)160 static u8 tpm_tis_status(struct tpm_chip *chip)
161 {
162 return ioread8(chip->vendor.iobase +
163 TPM_STS(chip->vendor.locality));
164 }
165
tpm_tis_ready(struct tpm_chip * chip)166 static void tpm_tis_ready(struct tpm_chip *chip)
167 {
168 /* this causes the current command to be aborted */
169 iowrite8(TPM_STS_COMMAND_READY,
170 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
171 }
172
get_burstcount(struct tpm_chip * chip)173 static int get_burstcount(struct tpm_chip *chip)
174 {
175 unsigned long stop;
176 int burstcnt;
177
178 /* wait for burstcount */
179 /* which timeout value, spec has 2 answers (c & d) */
180 stop = jiffies + chip->vendor.timeout_d;
181 do {
182 burstcnt = ioread8(chip->vendor.iobase +
183 TPM_STS(chip->vendor.locality) + 1);
184 burstcnt += ioread8(chip->vendor.iobase +
185 TPM_STS(chip->vendor.locality) +
186 2) << 8;
187 if (burstcnt)
188 return burstcnt;
189 msleep(TPM_TIMEOUT);
190 } while (time_before(jiffies, stop));
191 return -EBUSY;
192 }
193
recv_data(struct tpm_chip * chip,u8 * buf,size_t count)194 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
195 {
196 int size = 0, burstcnt;
197 while (size < count &&
198 wait_for_tpm_stat(chip,
199 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
200 chip->vendor.timeout_c,
201 &chip->vendor.read_queue)
202 == 0) {
203 burstcnt = get_burstcount(chip);
204 for (; burstcnt > 0 && size < count; burstcnt--)
205 buf[size++] = ioread8(chip->vendor.iobase +
206 TPM_DATA_FIFO(chip->vendor.
207 locality));
208 }
209 return size;
210 }
211
tpm_tis_recv(struct tpm_chip * chip,u8 * buf,size_t count)212 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
213 {
214 int size = 0;
215 int expected, status;
216
217 if (count < TPM_HEADER_SIZE) {
218 size = -EIO;
219 goto out;
220 }
221
222 /* read first 10 bytes, including tag, paramsize, and result */
223 if ((size =
224 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
225 dev_err(chip->dev, "Unable to read header\n");
226 goto out;
227 }
228
229 expected = be32_to_cpu(*(__be32 *) (buf + 2));
230 if (expected > count) {
231 size = -EIO;
232 goto out;
233 }
234
235 if ((size +=
236 recv_data(chip, &buf[TPM_HEADER_SIZE],
237 expected - TPM_HEADER_SIZE)) < expected) {
238 dev_err(chip->dev, "Unable to read remainder of result\n");
239 size = -ETIME;
240 goto out;
241 }
242
243 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
244 &chip->vendor.int_queue);
245 status = tpm_tis_status(chip);
246 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
247 dev_err(chip->dev, "Error left over data\n");
248 size = -EIO;
249 goto out;
250 }
251
252 out:
253 tpm_tis_ready(chip);
254 release_locality(chip, chip->vendor.locality, 0);
255 return size;
256 }
257
258 static bool itpm;
259 module_param(itpm, bool, 0444);
260 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
261
262 /*
263 * If interrupts are used (signaled by an irq set in the vendor structure)
264 * tpm.c can skip polling for the data to be available as the interrupt is
265 * waited for here
266 */
tpm_tis_send_data(struct tpm_chip * chip,u8 * buf,size_t len)267 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
268 {
269 int rc, status, burstcnt;
270 size_t count = 0;
271
272 if (request_locality(chip, 0) < 0)
273 return -EBUSY;
274
275 status = tpm_tis_status(chip);
276 if ((status & TPM_STS_COMMAND_READY) == 0) {
277 tpm_tis_ready(chip);
278 if (wait_for_tpm_stat
279 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
280 &chip->vendor.int_queue) < 0) {
281 rc = -ETIME;
282 goto out_err;
283 }
284 }
285
286 while (count < len - 1) {
287 burstcnt = get_burstcount(chip);
288 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
289 iowrite8(buf[count], chip->vendor.iobase +
290 TPM_DATA_FIFO(chip->vendor.locality));
291 count++;
292 }
293
294 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
295 &chip->vendor.int_queue);
296 status = tpm_tis_status(chip);
297 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
298 rc = -EIO;
299 goto out_err;
300 }
301 }
302
303 /* write last byte */
304 iowrite8(buf[count],
305 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
306 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
307 &chip->vendor.int_queue);
308 status = tpm_tis_status(chip);
309 if ((status & TPM_STS_DATA_EXPECT) != 0) {
310 rc = -EIO;
311 goto out_err;
312 }
313
314 return 0;
315
316 out_err:
317 tpm_tis_ready(chip);
318 release_locality(chip, chip->vendor.locality, 0);
319 return rc;
320 }
321
322 /*
323 * If interrupts are used (signaled by an irq set in the vendor structure)
324 * tpm.c can skip polling for the data to be available as the interrupt is
325 * waited for here
326 */
tpm_tis_send(struct tpm_chip * chip,u8 * buf,size_t len)327 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
328 {
329 int rc;
330 u32 ordinal;
331
332 rc = tpm_tis_send_data(chip, buf, len);
333 if (rc < 0)
334 return rc;
335
336 /* go and do it */
337 iowrite8(TPM_STS_GO,
338 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
339
340 if (chip->vendor.irq) {
341 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
342 if (wait_for_tpm_stat
343 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
344 tpm_calc_ordinal_duration(chip, ordinal),
345 &chip->vendor.read_queue) < 0) {
346 rc = -ETIME;
347 goto out_err;
348 }
349 }
350 return len;
351 out_err:
352 tpm_tis_ready(chip);
353 release_locality(chip, chip->vendor.locality, 0);
354 return rc;
355 }
356
357 /*
358 * Early probing for iTPM with STS_DATA_EXPECT flaw.
359 * Try sending command without itpm flag set and if that
360 * fails, repeat with itpm flag set.
361 */
probe_itpm(struct tpm_chip * chip)362 static int probe_itpm(struct tpm_chip *chip)
363 {
364 int rc = 0;
365 u8 cmd_getticks[] = {
366 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
367 0x00, 0x00, 0x00, 0xf1
368 };
369 size_t len = sizeof(cmd_getticks);
370 bool rem_itpm = itpm;
371 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
372
373 /* probe only iTPMS */
374 if (vendor != TPM_VID_INTEL)
375 return 0;
376
377 itpm = 0;
378
379 rc = tpm_tis_send_data(chip, cmd_getticks, len);
380 if (rc == 0)
381 goto out;
382
383 tpm_tis_ready(chip);
384 release_locality(chip, chip->vendor.locality, 0);
385
386 itpm = 1;
387
388 rc = tpm_tis_send_data(chip, cmd_getticks, len);
389 if (rc == 0) {
390 dev_info(chip->dev, "Detected an iTPM.\n");
391 rc = 1;
392 } else
393 rc = -EFAULT;
394
395 out:
396 itpm = rem_itpm;
397 tpm_tis_ready(chip);
398 release_locality(chip, chip->vendor.locality, 0);
399
400 return rc;
401 }
402
403 static const struct file_operations tis_ops = {
404 .owner = THIS_MODULE,
405 .llseek = no_llseek,
406 .open = tpm_open,
407 .read = tpm_read,
408 .write = tpm_write,
409 .release = tpm_release,
410 };
411
412 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
413 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
414 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
415 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
416 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
417 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
418 NULL);
419 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
420 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
421 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
422 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
423
424 static struct attribute *tis_attrs[] = {
425 &dev_attr_pubek.attr,
426 &dev_attr_pcrs.attr,
427 &dev_attr_enabled.attr,
428 &dev_attr_active.attr,
429 &dev_attr_owned.attr,
430 &dev_attr_temp_deactivated.attr,
431 &dev_attr_caps.attr,
432 &dev_attr_cancel.attr,
433 &dev_attr_durations.attr,
434 &dev_attr_timeouts.attr, NULL,
435 };
436
437 static struct attribute_group tis_attr_grp = {
438 .attrs = tis_attrs
439 };
440
441 static struct tpm_vendor_specific tpm_tis = {
442 .status = tpm_tis_status,
443 .recv = tpm_tis_recv,
444 .send = tpm_tis_send,
445 .cancel = tpm_tis_ready,
446 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
447 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
448 .req_canceled = TPM_STS_COMMAND_READY,
449 .attr_group = &tis_attr_grp,
450 .miscdev = {
451 .fops = &tis_ops,},
452 };
453
tis_int_probe(int irq,void * dev_id)454 static irqreturn_t tis_int_probe(int irq, void *dev_id)
455 {
456 struct tpm_chip *chip = dev_id;
457 u32 interrupt;
458
459 interrupt = ioread32(chip->vendor.iobase +
460 TPM_INT_STATUS(chip->vendor.locality));
461
462 if (interrupt == 0)
463 return IRQ_NONE;
464
465 chip->vendor.probed_irq = irq;
466
467 /* Clear interrupts handled with TPM_EOI */
468 iowrite32(interrupt,
469 chip->vendor.iobase +
470 TPM_INT_STATUS(chip->vendor.locality));
471 return IRQ_HANDLED;
472 }
473
tis_int_handler(int dummy,void * dev_id)474 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
475 {
476 struct tpm_chip *chip = dev_id;
477 u32 interrupt;
478 int i;
479
480 interrupt = ioread32(chip->vendor.iobase +
481 TPM_INT_STATUS(chip->vendor.locality));
482
483 if (interrupt == 0)
484 return IRQ_NONE;
485
486 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
487 wake_up_interruptible(&chip->vendor.read_queue);
488 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
489 for (i = 0; i < 5; i++)
490 if (check_locality(chip, i) >= 0)
491 break;
492 if (interrupt &
493 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
494 TPM_INTF_CMD_READY_INT))
495 wake_up_interruptible(&chip->vendor.int_queue);
496
497 /* Clear interrupts handled with TPM_EOI */
498 iowrite32(interrupt,
499 chip->vendor.iobase +
500 TPM_INT_STATUS(chip->vendor.locality));
501 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
502 return IRQ_HANDLED;
503 }
504
505 static bool interrupts = 1;
506 module_param(interrupts, bool, 0444);
507 MODULE_PARM_DESC(interrupts, "Enable interrupts");
508
tpm_tis_init(struct device * dev,resource_size_t start,resource_size_t len,unsigned int irq)509 static int tpm_tis_init(struct device *dev, resource_size_t start,
510 resource_size_t len, unsigned int irq)
511 {
512 u32 vendor, intfcaps, intmask;
513 int rc, i, irq_s, irq_e, probe;
514 struct tpm_chip *chip;
515
516 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
517 return -ENODEV;
518
519 chip->vendor.iobase = ioremap(start, len);
520 if (!chip->vendor.iobase) {
521 rc = -EIO;
522 goto out_err;
523 }
524
525 /* Default timeouts */
526 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
527 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
528 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
529 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
530
531 if (request_locality(chip, 0) != 0) {
532 rc = -ENODEV;
533 goto out_err;
534 }
535
536 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
537
538 dev_info(dev,
539 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
540 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
541
542 if (!itpm) {
543 probe = probe_itpm(chip);
544 if (probe < 0) {
545 rc = -ENODEV;
546 goto out_err;
547 }
548 itpm = (probe == 0) ? 0 : 1;
549 }
550
551 if (itpm)
552 dev_info(dev, "Intel iTPM workaround enabled\n");
553
554
555 /* Figure out the capabilities */
556 intfcaps =
557 ioread32(chip->vendor.iobase +
558 TPM_INTF_CAPS(chip->vendor.locality));
559 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
560 intfcaps);
561 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
562 dev_dbg(dev, "\tBurst Count Static\n");
563 if (intfcaps & TPM_INTF_CMD_READY_INT)
564 dev_dbg(dev, "\tCommand Ready Int Support\n");
565 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
566 dev_dbg(dev, "\tInterrupt Edge Falling\n");
567 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
568 dev_dbg(dev, "\tInterrupt Edge Rising\n");
569 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
570 dev_dbg(dev, "\tInterrupt Level Low\n");
571 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
572 dev_dbg(dev, "\tInterrupt Level High\n");
573 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
574 dev_dbg(dev, "\tLocality Change Int Support\n");
575 if (intfcaps & TPM_INTF_STS_VALID_INT)
576 dev_dbg(dev, "\tSts Valid Int Support\n");
577 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
578 dev_dbg(dev, "\tData Avail Int Support\n");
579
580 /* get the timeouts before testing for irqs */
581 if (tpm_get_timeouts(chip)) {
582 dev_err(dev, "Could not get TPM timeouts and durations\n");
583 rc = -ENODEV;
584 goto out_err;
585 }
586
587 if (tpm_do_selftest(chip)) {
588 dev_err(dev, "TPM self test failed\n");
589 rc = -ENODEV;
590 goto out_err;
591 }
592
593 /* INTERRUPT Setup */
594 init_waitqueue_head(&chip->vendor.read_queue);
595 init_waitqueue_head(&chip->vendor.int_queue);
596
597 intmask =
598 ioread32(chip->vendor.iobase +
599 TPM_INT_ENABLE(chip->vendor.locality));
600
601 intmask |= TPM_INTF_CMD_READY_INT
602 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
603 | TPM_INTF_STS_VALID_INT;
604
605 iowrite32(intmask,
606 chip->vendor.iobase +
607 TPM_INT_ENABLE(chip->vendor.locality));
608 if (interrupts)
609 chip->vendor.irq = irq;
610 if (interrupts && !chip->vendor.irq) {
611 irq_s =
612 ioread8(chip->vendor.iobase +
613 TPM_INT_VECTOR(chip->vendor.locality));
614 if (irq_s) {
615 irq_e = irq_s;
616 } else {
617 irq_s = 3;
618 irq_e = 15;
619 }
620
621 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
622 iowrite8(i, chip->vendor.iobase +
623 TPM_INT_VECTOR(chip->vendor.locality));
624 if (request_irq
625 (i, tis_int_probe, IRQF_SHARED,
626 chip->vendor.miscdev.name, chip) != 0) {
627 dev_info(chip->dev,
628 "Unable to request irq: %d for probe\n",
629 i);
630 continue;
631 }
632
633 /* Clear all existing */
634 iowrite32(ioread32
635 (chip->vendor.iobase +
636 TPM_INT_STATUS(chip->vendor.locality)),
637 chip->vendor.iobase +
638 TPM_INT_STATUS(chip->vendor.locality));
639
640 /* Turn on */
641 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
642 chip->vendor.iobase +
643 TPM_INT_ENABLE(chip->vendor.locality));
644
645 chip->vendor.probed_irq = 0;
646
647 /* Generate Interrupts */
648 tpm_gen_interrupt(chip);
649
650 chip->vendor.irq = chip->vendor.probed_irq;
651
652 /* free_irq will call into tis_int_probe;
653 clear all irqs we haven't seen while doing
654 tpm_gen_interrupt */
655 iowrite32(ioread32
656 (chip->vendor.iobase +
657 TPM_INT_STATUS(chip->vendor.locality)),
658 chip->vendor.iobase +
659 TPM_INT_STATUS(chip->vendor.locality));
660
661 /* Turn off */
662 iowrite32(intmask,
663 chip->vendor.iobase +
664 TPM_INT_ENABLE(chip->vendor.locality));
665 free_irq(i, chip);
666 }
667 }
668 if (chip->vendor.irq) {
669 iowrite8(chip->vendor.irq,
670 chip->vendor.iobase +
671 TPM_INT_VECTOR(chip->vendor.locality));
672 if (request_irq
673 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
674 chip->vendor.miscdev.name, chip) != 0) {
675 dev_info(chip->dev,
676 "Unable to request irq: %d for use\n",
677 chip->vendor.irq);
678 chip->vendor.irq = 0;
679 } else {
680 /* Clear all existing */
681 iowrite32(ioread32
682 (chip->vendor.iobase +
683 TPM_INT_STATUS(chip->vendor.locality)),
684 chip->vendor.iobase +
685 TPM_INT_STATUS(chip->vendor.locality));
686
687 /* Turn on */
688 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
689 chip->vendor.iobase +
690 TPM_INT_ENABLE(chip->vendor.locality));
691 }
692 }
693
694 INIT_LIST_HEAD(&chip->vendor.list);
695 mutex_lock(&tis_lock);
696 list_add(&chip->vendor.list, &tis_chips);
697 mutex_unlock(&tis_lock);
698
699
700 return 0;
701 out_err:
702 if (chip->vendor.iobase)
703 iounmap(chip->vendor.iobase);
704 tpm_remove_hardware(chip->dev);
705 return rc;
706 }
707
tpm_tis_reenable_interrupts(struct tpm_chip * chip)708 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
709 {
710 u32 intmask;
711
712 /* reenable interrupts that device may have lost or
713 BIOS/firmware may have disabled */
714 iowrite8(chip->vendor.irq, chip->vendor.iobase +
715 TPM_INT_VECTOR(chip->vendor.locality));
716
717 intmask =
718 ioread32(chip->vendor.iobase +
719 TPM_INT_ENABLE(chip->vendor.locality));
720
721 intmask |= TPM_INTF_CMD_READY_INT
722 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
723 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
724
725 iowrite32(intmask,
726 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
727 }
728
729
730 #ifdef CONFIG_PNP
tpm_tis_pnp_init(struct pnp_dev * pnp_dev,const struct pnp_device_id * pnp_id)731 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
732 const struct pnp_device_id *pnp_id)
733 {
734 resource_size_t start, len;
735 unsigned int irq = 0;
736
737 start = pnp_mem_start(pnp_dev, 0);
738 len = pnp_mem_len(pnp_dev, 0);
739
740 if (pnp_irq_valid(pnp_dev, 0))
741 irq = pnp_irq(pnp_dev, 0);
742 else
743 interrupts = 0;
744
745 if (is_itpm(pnp_dev))
746 itpm = 1;
747
748 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
749 }
750
tpm_tis_pnp_suspend(struct pnp_dev * dev,pm_message_t msg)751 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
752 {
753 return tpm_pm_suspend(&dev->dev, msg);
754 }
755
tpm_tis_pnp_resume(struct pnp_dev * dev)756 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
757 {
758 struct tpm_chip *chip = pnp_get_drvdata(dev);
759 int ret;
760
761 if (chip->vendor.irq)
762 tpm_tis_reenable_interrupts(chip);
763
764 ret = tpm_pm_resume(&dev->dev);
765 if (!ret)
766 tpm_do_selftest(chip);
767
768 return ret;
769 }
770
771 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
772 {"PNP0C31", 0}, /* TPM */
773 {"ATM1200", 0}, /* Atmel */
774 {"IFX0102", 0}, /* Infineon */
775 {"BCM0101", 0}, /* Broadcom */
776 {"BCM0102", 0}, /* Broadcom */
777 {"NSC1200", 0}, /* National */
778 {"ICO0102", 0}, /* Intel */
779 /* Add new here */
780 {"", 0}, /* User Specified */
781 {"", 0} /* Terminator */
782 };
783 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
784
tpm_tis_pnp_remove(struct pnp_dev * dev)785 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
786 {
787 struct tpm_chip *chip = pnp_get_drvdata(dev);
788
789 tpm_dev_vendor_release(chip);
790
791 kfree(chip);
792 }
793
794
795 static struct pnp_driver tis_pnp_driver = {
796 .name = "tpm_tis",
797 .id_table = tpm_pnp_tbl,
798 .probe = tpm_tis_pnp_init,
799 .suspend = tpm_tis_pnp_suspend,
800 .resume = tpm_tis_pnp_resume,
801 .remove = tpm_tis_pnp_remove,
802 };
803
804 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
805 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
806 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
807 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
808 #endif
tpm_tis_suspend(struct platform_device * dev,pm_message_t msg)809 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
810 {
811 return tpm_pm_suspend(&dev->dev, msg);
812 }
813
tpm_tis_resume(struct platform_device * dev)814 static int tpm_tis_resume(struct platform_device *dev)
815 {
816 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
817
818 if (chip->vendor.irq)
819 tpm_tis_reenable_interrupts(chip);
820
821 return tpm_pm_resume(&dev->dev);
822 }
823 static struct platform_driver tis_drv = {
824 .driver = {
825 .name = "tpm_tis",
826 .owner = THIS_MODULE,
827 },
828 .suspend = tpm_tis_suspend,
829 .resume = tpm_tis_resume,
830 };
831
832 static struct platform_device *pdev;
833
834 static bool force;
835 module_param(force, bool, 0444);
836 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
init_tis(void)837 static int __init init_tis(void)
838 {
839 int rc;
840 #ifdef CONFIG_PNP
841 if (!force)
842 return pnp_register_driver(&tis_pnp_driver);
843 #endif
844
845 rc = platform_driver_register(&tis_drv);
846 if (rc < 0)
847 return rc;
848 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
849 return PTR_ERR(pdev);
850 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
851 platform_device_unregister(pdev);
852 platform_driver_unregister(&tis_drv);
853 }
854 return rc;
855 }
856
cleanup_tis(void)857 static void __exit cleanup_tis(void)
858 {
859 struct tpm_vendor_specific *i, *j;
860 struct tpm_chip *chip;
861 mutex_lock(&tis_lock);
862 list_for_each_entry_safe(i, j, &tis_chips, list) {
863 chip = to_tpm_chip(i);
864 tpm_remove_hardware(chip->dev);
865 iowrite32(~TPM_GLOBAL_INT_ENABLE &
866 ioread32(chip->vendor.iobase +
867 TPM_INT_ENABLE(chip->vendor.
868 locality)),
869 chip->vendor.iobase +
870 TPM_INT_ENABLE(chip->vendor.locality));
871 release_locality(chip, chip->vendor.locality, 1);
872 if (chip->vendor.irq)
873 free_irq(chip->vendor.irq, chip);
874 iounmap(i->iobase);
875 list_del(&i->list);
876 }
877 mutex_unlock(&tis_lock);
878 #ifdef CONFIG_PNP
879 if (!force) {
880 pnp_unregister_driver(&tis_pnp_driver);
881 return;
882 }
883 #endif
884 platform_device_unregister(pdev);
885 platform_driver_unregister(&tis_drv);
886 }
887
888 module_init(init_tis);
889 module_exit(cleanup_tis);
890 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
891 MODULE_DESCRIPTION("TPM Driver");
892 MODULE_VERSION("2.0");
893 MODULE_LICENSE("GPL");
894