1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Secure Processor device driver
4 *
5 * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/pci_ids.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/kthread.h>
18 #include <linux/sched.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/ccp.h>
23
24 #include "ccp-dev.h"
25 #include "psp-dev.h"
26
27 #define MSIX_VECTORS 2
28
29 struct sp_pci {
30 int msix_count;
31 struct msix_entry msix_entry[MSIX_VECTORS];
32 };
33 static struct sp_device *sp_dev_master;
34
35 #define attribute_show(name, def) \
36 static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
37 char *buf) \
38 { \
39 struct sp_device *sp = dev_get_drvdata(d); \
40 struct psp_device *psp = sp->psp_data; \
41 int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \
42 return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
43 }
44
45 attribute_show(fused_part, FUSED_PART)
46 static DEVICE_ATTR_RO(fused_part);
47 attribute_show(debug_lock_on, DEBUG_LOCK_ON)
48 static DEVICE_ATTR_RO(debug_lock_on);
49 attribute_show(tsme_status, TSME_STATUS)
50 static DEVICE_ATTR_RO(tsme_status);
51 attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
52 static DEVICE_ATTR_RO(anti_rollback_status);
53 attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
54 static DEVICE_ATTR_RO(rpmc_production_enabled);
55 attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
56 static DEVICE_ATTR_RO(rpmc_spirom_available);
57 attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
58 static DEVICE_ATTR_RO(hsp_tpm_available);
59 attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
60 static DEVICE_ATTR_RO(rom_armor_enforced);
61
62 static struct attribute *psp_attrs[] = {
63 &dev_attr_fused_part.attr,
64 &dev_attr_debug_lock_on.attr,
65 &dev_attr_tsme_status.attr,
66 &dev_attr_anti_rollback_status.attr,
67 &dev_attr_rpmc_production_enabled.attr,
68 &dev_attr_rpmc_spirom_available.attr,
69 &dev_attr_hsp_tpm_available.attr,
70 &dev_attr_rom_armor_enforced.attr,
71 NULL
72 };
73
psp_security_is_visible(struct kobject * kobj,struct attribute * attr,int idx)74 static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
75 {
76 struct device *dev = kobj_to_dev(kobj);
77 struct sp_device *sp = dev_get_drvdata(dev);
78 struct psp_device *psp = sp->psp_data;
79
80 if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING))
81 return 0444;
82
83 return 0;
84 }
85
86 static struct attribute_group psp_attr_group = {
87 .attrs = psp_attrs,
88 .is_visible = psp_security_is_visible,
89 };
90
91 static const struct attribute_group *psp_groups[] = {
92 &psp_attr_group,
93 NULL,
94 };
95
sp_get_msix_irqs(struct sp_device * sp)96 static int sp_get_msix_irqs(struct sp_device *sp)
97 {
98 struct sp_pci *sp_pci = sp->dev_specific;
99 struct device *dev = sp->dev;
100 struct pci_dev *pdev = to_pci_dev(dev);
101 int v, ret;
102
103 for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++)
104 sp_pci->msix_entry[v].entry = v;
105
106 ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v);
107 if (ret < 0)
108 return ret;
109
110 sp_pci->msix_count = ret;
111 sp->use_tasklet = true;
112
113 sp->psp_irq = sp_pci->msix_entry[0].vector;
114 sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector
115 : sp_pci->msix_entry[0].vector;
116 return 0;
117 }
118
sp_get_msi_irq(struct sp_device * sp)119 static int sp_get_msi_irq(struct sp_device *sp)
120 {
121 struct device *dev = sp->dev;
122 struct pci_dev *pdev = to_pci_dev(dev);
123 int ret;
124
125 ret = pci_enable_msi(pdev);
126 if (ret)
127 return ret;
128
129 sp->ccp_irq = pdev->irq;
130 sp->psp_irq = pdev->irq;
131
132 return 0;
133 }
134
sp_get_irqs(struct sp_device * sp)135 static int sp_get_irqs(struct sp_device *sp)
136 {
137 struct device *dev = sp->dev;
138 int ret;
139
140 ret = sp_get_msix_irqs(sp);
141 if (!ret)
142 return 0;
143
144 /* Couldn't get MSI-X vectors, try MSI */
145 dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
146 ret = sp_get_msi_irq(sp);
147 if (!ret)
148 return 0;
149
150 /* Couldn't get MSI interrupt */
151 dev_notice(dev, "could not enable MSI (%d)\n", ret);
152
153 return ret;
154 }
155
sp_free_irqs(struct sp_device * sp)156 static void sp_free_irqs(struct sp_device *sp)
157 {
158 struct sp_pci *sp_pci = sp->dev_specific;
159 struct device *dev = sp->dev;
160 struct pci_dev *pdev = to_pci_dev(dev);
161
162 if (sp_pci->msix_count)
163 pci_disable_msix(pdev);
164 else if (sp->psp_irq)
165 pci_disable_msi(pdev);
166
167 sp->ccp_irq = 0;
168 sp->psp_irq = 0;
169 }
170
sp_pci_is_master(struct sp_device * sp)171 static bool sp_pci_is_master(struct sp_device *sp)
172 {
173 struct device *dev_cur, *dev_new;
174 struct pci_dev *pdev_cur, *pdev_new;
175
176 dev_new = sp->dev;
177 dev_cur = sp_dev_master->dev;
178
179 pdev_new = to_pci_dev(dev_new);
180 pdev_cur = to_pci_dev(dev_cur);
181
182 if (pdev_new->bus->number < pdev_cur->bus->number)
183 return true;
184
185 if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
186 return true;
187
188 if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
189 return true;
190
191 return false;
192 }
193
psp_set_master(struct sp_device * sp)194 static void psp_set_master(struct sp_device *sp)
195 {
196 if (!sp_dev_master) {
197 sp_dev_master = sp;
198 return;
199 }
200
201 if (sp_pci_is_master(sp))
202 sp_dev_master = sp;
203 }
204
psp_get_master(void)205 static struct sp_device *psp_get_master(void)
206 {
207 return sp_dev_master;
208 }
209
psp_clear_master(struct sp_device * sp)210 static void psp_clear_master(struct sp_device *sp)
211 {
212 if (sp == sp_dev_master) {
213 sp_dev_master = NULL;
214 dev_dbg(sp->dev, "Cleared sp_dev_master\n");
215 }
216 }
217
sp_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)218 static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
219 {
220 struct sp_device *sp;
221 struct sp_pci *sp_pci;
222 struct device *dev = &pdev->dev;
223 void __iomem * const *iomap_table;
224 int bar_mask;
225 int ret;
226
227 ret = -ENOMEM;
228 sp = sp_alloc_struct(dev);
229 if (!sp)
230 goto e_err;
231
232 sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL);
233 if (!sp_pci)
234 goto e_err;
235
236 sp->dev_specific = sp_pci;
237 sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
238 if (!sp->dev_vdata) {
239 ret = -ENODEV;
240 dev_err(dev, "missing driver data\n");
241 goto e_err;
242 }
243
244 ret = pcim_enable_device(pdev);
245 if (ret) {
246 dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
247 goto e_err;
248 }
249
250 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
251 ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
252 if (ret) {
253 dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
254 goto e_err;
255 }
256
257 iomap_table = pcim_iomap_table(pdev);
258 if (!iomap_table) {
259 dev_err(dev, "pcim_iomap_table failed\n");
260 ret = -ENOMEM;
261 goto e_err;
262 }
263
264 sp->io_map = iomap_table[sp->dev_vdata->bar];
265 if (!sp->io_map) {
266 dev_err(dev, "ioremap failed\n");
267 ret = -ENOMEM;
268 goto e_err;
269 }
270
271 ret = sp_get_irqs(sp);
272 if (ret)
273 goto e_err;
274
275 pci_set_master(pdev);
276 sp->set_psp_master_device = psp_set_master;
277 sp->get_psp_master_device = psp_get_master;
278 sp->clear_psp_master_device = psp_clear_master;
279
280 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
281 if (ret) {
282 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
283 if (ret) {
284 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
285 ret);
286 goto free_irqs;
287 }
288 }
289
290 dev_set_drvdata(dev, sp);
291
292 ret = sp_init(sp);
293 if (ret)
294 goto free_irqs;
295
296 return 0;
297
298 free_irqs:
299 sp_free_irqs(sp);
300 e_err:
301 dev_notice(dev, "initialization failed\n");
302 return ret;
303 }
304
sp_pci_shutdown(struct pci_dev * pdev)305 static void sp_pci_shutdown(struct pci_dev *pdev)
306 {
307 struct device *dev = &pdev->dev;
308 struct sp_device *sp = dev_get_drvdata(dev);
309
310 if (!sp)
311 return;
312
313 sp_destroy(sp);
314 }
315
sp_pci_remove(struct pci_dev * pdev)316 static void sp_pci_remove(struct pci_dev *pdev)
317 {
318 struct device *dev = &pdev->dev;
319 struct sp_device *sp = dev_get_drvdata(dev);
320
321 if (!sp)
322 return;
323
324 sp_destroy(sp);
325
326 sp_free_irqs(sp);
327 }
328
sp_pci_suspend(struct device * dev)329 static int __maybe_unused sp_pci_suspend(struct device *dev)
330 {
331 struct sp_device *sp = dev_get_drvdata(dev);
332
333 return sp_suspend(sp);
334 }
335
sp_pci_resume(struct device * dev)336 static int __maybe_unused sp_pci_resume(struct device *dev)
337 {
338 struct sp_device *sp = dev_get_drvdata(dev);
339
340 return sp_resume(sp);
341 }
342
343 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
344 static const struct sev_vdata sevv1 = {
345 .cmdresp_reg = 0x10580,
346 .cmdbuff_addr_lo_reg = 0x105e0,
347 .cmdbuff_addr_hi_reg = 0x105e4,
348 };
349
350 static const struct sev_vdata sevv2 = {
351 .cmdresp_reg = 0x10980,
352 .cmdbuff_addr_lo_reg = 0x109e0,
353 .cmdbuff_addr_hi_reg = 0x109e4,
354 };
355
356 static const struct tee_vdata teev1 = {
357 .cmdresp_reg = 0x10544,
358 .cmdbuff_addr_lo_reg = 0x10548,
359 .cmdbuff_addr_hi_reg = 0x1054c,
360 .ring_wptr_reg = 0x10550,
361 .ring_rptr_reg = 0x10554,
362 };
363
364 static const struct psp_vdata pspv1 = {
365 .sev = &sevv1,
366 .feature_reg = 0x105fc,
367 .inten_reg = 0x10610,
368 .intsts_reg = 0x10614,
369 };
370
371 static const struct psp_vdata pspv2 = {
372 .sev = &sevv2,
373 .feature_reg = 0x109fc,
374 .inten_reg = 0x10690,
375 .intsts_reg = 0x10694,
376 };
377
378 static const struct psp_vdata pspv3 = {
379 .tee = &teev1,
380 .feature_reg = 0x109fc,
381 .inten_reg = 0x10690,
382 .intsts_reg = 0x10694,
383 };
384 #endif
385
386 static const struct sp_dev_vdata dev_vdata[] = {
387 { /* 0 */
388 .bar = 2,
389 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
390 .ccp_vdata = &ccpv3,
391 #endif
392 },
393 { /* 1 */
394 .bar = 2,
395 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
396 .ccp_vdata = &ccpv5a,
397 #endif
398 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
399 .psp_vdata = &pspv1,
400 #endif
401 },
402 { /* 2 */
403 .bar = 2,
404 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
405 .ccp_vdata = &ccpv5b,
406 #endif
407 },
408 { /* 3 */
409 .bar = 2,
410 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
411 .ccp_vdata = &ccpv5a,
412 #endif
413 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
414 .psp_vdata = &pspv2,
415 #endif
416 },
417 { /* 4 */
418 .bar = 2,
419 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
420 .ccp_vdata = &ccpv5a,
421 #endif
422 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
423 .psp_vdata = &pspv3,
424 #endif
425 },
426 { /* 5 */
427 .bar = 2,
428 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
429 .psp_vdata = &pspv2,
430 #endif
431 },
432 };
433 static const struct pci_device_id sp_pci_table[] = {
434 { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
435 { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
436 { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
437 { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
438 { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
439 { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
440 { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
441 /* Last entry must be zero */
442 { 0, }
443 };
444 MODULE_DEVICE_TABLE(pci, sp_pci_table);
445
446 static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume);
447
448 static struct pci_driver sp_pci_driver = {
449 .name = "ccp",
450 .id_table = sp_pci_table,
451 .probe = sp_pci_probe,
452 .remove = sp_pci_remove,
453 .shutdown = sp_pci_shutdown,
454 .driver.pm = &sp_pci_pm_ops,
455 .dev_groups = psp_groups,
456 };
457
sp_pci_init(void)458 int sp_pci_init(void)
459 {
460 return pci_register_driver(&sp_pci_driver);
461 }
462
sp_pci_exit(void)463 void sp_pci_exit(void)
464 {
465 pci_unregister_driver(&sp_pci_driver);
466 }
467